PPC_E500_VECTOR = 0x20000000,
/* PowerPC 4xx dedicated instructions */
PPC_4xx_COMMON = 0x40000000,
+ /* PowerPC 2.03 specification extensions */
+ PPC_203 = 0x80000000,
};
/* CPU run-time flags (MMU and exception model) */
PPC_FLAGS_MMU_403 = 0x00000005,
/* Freescale e500 MMU model */
PPC_FLAGS_MMU_e500 = 0x00000006,
+ /* BookE MMU model */
+ PPC_FLAGS_MMU_BOOKE = 0x00000007,
/* Exception model */
PPC_FLAGS_EXCP_MASK = 0x000000F0,
/* Standard PowerPC exception model */
PPC_FLAGS_EXCP_74xx = 0x00000080,
/* PowerPC 970 exception model */
PPC_FLAGS_EXCP_970 = 0x00000090,
+ /* BookE exception model */
+ PPC_FLAGS_EXCP_BOOKE = 0x000000A0,
};
#define PPC_MMU(env) (env->flags & PPC_FLAGS_MMU_MASK)
/* PowerPC 440 */
#define PPC_INSNS_440 (PPC_INSNS_EMB | PPC_CACHE_OPT | PPC_BOOKE | \
PPC_4xx_COMMON | PPC_405_MAC | PPC_440_SPEC)
-#define PPC_FLAGS_440 (PPC_FLAGS_TODO)
+#define PPC_FLAGS_440 (PPC_FLAGS_MMU_BOOKE | PPC_FLAGS_EXCP_BOOKE)
/* Generic BookE PowerPC */
#define PPC_INSNS_BOOKE (PPC_INSNS_EMB | PPC_BOOKE | PPC_MEM_EIEIO | \
PPC_FLOAT | PPC_FLOAT_OPT | PPC_CACHE_OPT)
-#define PPC_FLAGS_BOOKE (PPC_FLAGS_MMU_SOFT_4xx | PPC_FLAGS_EXCP_40x)
+#define PPC_FLAGS_BOOKE (PPC_FLAGS_MMU_BOOKE | PPC_FLAGS_EXCP_BOOKE)
/* e500 core */
#define PPC_INSNS_E500 (PPC_INSNS_EMB | PPC_BOOKE | PPC_MEM_EIEIO | \
PPC_CACHE_OPT | PPC_E500_VECTOR)
typedef struct ppc_avr_t ppc_avr_t;
typedef struct ppc_tlb_t ppc_tlb_t;
-
/* SPR access micro-ops generations callbacks */
struct ppc_spr_t {
void (*uea_read)(void *opaque, int spr_num);
*/
target_ulong t0, t1, t2;
#endif
+ ppc_avr_t t0_avr, t1_avr, t2_avr;
+
/* general purpose registers */
ppc_gpr_t gpr[32];
/* LR */
/* Altivec registers */
ppc_avr_t avr[32];
uint32_t vscr;
+ /* SPE registers */
+ ppc_gpr_t spe_acc;
+ uint32_t spe_fscr;
/* Internal devices resources */
/* Time base and decrementer */
void do_store_dbatl (CPUPPCState *env, int nr, target_ulong value);
target_ulong do_load_sdr1 (CPUPPCState *env);
void do_store_sdr1 (CPUPPCState *env, target_ulong value);
-target_ulong do_load_asr (CPUPPCState *env);
-void do_store_asr (CPUPPCState *env, target_ulong value);
+#if defined(TARGET_PPC64)
+target_ulong ppc_load_asr (CPUPPCState *env);
+void ppc_store_asr (CPUPPCState *env, target_ulong value);
+#endif
target_ulong do_load_sr (CPUPPCState *env, int srnum);
void do_store_sr (CPUPPCState *env, int srnum, target_ulong value);
#endif
void ppc_store_xer (CPUPPCState *env, uint32_t value);
target_ulong do_load_msr (CPUPPCState *env);
void do_store_msr (CPUPPCState *env, target_ulong value);
+void ppc_store_msr32 (CPUPPCState *env, uint32_t value);
void do_compute_hflags (CPUPPCState *env);
void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value);
uint32_t cpu_ppc_load_decr (CPUPPCState *env);
void cpu_ppc_store_decr (CPUPPCState *env, uint32_t value);
+uint32_t cpu_ppc601_load_rtcl (CPUPPCState *env);
+uint32_t cpu_ppc601_load_rtcu (CPUPPCState *env);
+#if !defined(CONFIG_USER_ONLY)
+void cpu_ppc601_store_rtcl (CPUPPCState *env, uint32_t value);
+void cpu_ppc601_store_rtcu (CPUPPCState *env, uint32_t value);
+target_ulong load_40x_pit (CPUPPCState *env);
+void store_40x_pit (CPUPPCState *env, target_ulong val);
+void store_booke_tcr (CPUPPCState *env, target_ulong val);
+void store_booke_tsr (CPUPPCState *env, target_ulong val);
+#endif
#endif
#define TARGET_PAGE_BITS 12
#define T1 (env->t1)
#define T2 (env->t2)
#else
-/* This may be more efficient if HOST_LONG_BITS > TARGET_LONG_BITS
- * To be set to one when we'll be sure it does not cause bugs....
- */
-#if 0
register unsigned long T0 asm(AREG1);
register unsigned long T1 asm(AREG2);
register unsigned long T2 asm(AREG3);
-#else
-register target_ulong T0 asm(AREG1);
-register target_ulong T1 asm(AREG2);
-register target_ulong T2 asm(AREG3);
#endif
+/* We may, sometime, need 64 bits registers on 32 bits target */
+#if defined(TARGET_PPC64) || (HOST_LONG_BITS == 64)
+#define T0_64 T0
+#define T1_64 T0
+#define T2_64 T0
+#else
+/* no registers can be used */
+#define T0_64 (env->t0)
+#define T1_64 (env->t1)
+#define T2_64 (env->t2)
#endif
+/* Provision for Altivec */
+#define T0_avr (env->t0_avr)
+#define T1_avr (env->t1_avr)
+#define T2_avr (env->t2_avr)
/* XXX: to clean: remove this mess */
#define PARAM(n) ((uint32_t)PARAM##n)
/*****************************************************************************/
/* PowerPC MMU emulation */
-#if defined(CONFIG_USER_ONLY)
+#if defined(CONFIG_USER_ONLY)
int cpu_ppc_handle_mmu_fault (CPUState *env, uint32_t address, int rw,
int is_user, int is_softmmu)
{
int exception, error_code;
-
+
if (rw == 2) {
exception = EXCP_ISI;
error_code = 0;
ppc_tlb_t *tlb;
int nr, best, way;
int ret;
-
+
best = -1;
ret = -1; /* No TLB found */
for (way = 0; way < env->nb_ways; way++) {
if (loglevel > 0) {
fprintf(logfile, "%s\n", __func__);
}
-#endif
+#endif
if ((access_type == ACCESS_CODE && msr_ir == 0) ||
(access_type != ACCESS_CODE && msr_dr == 0)) {
/* No address translation */
__func__, eaddr, ctx->raddr);
}
#endif
-
+
return ret;
}
int exception = 0, error_code = 0;
int access_type;
int ret = 0;
-
+
if (rw == 2) {
/* code access */
rw = 0;
/*****************************************************************************/
/* Special registers manipulation */
+#if defined(TARGET_PPC64)
+target_ulong ppc_load_asr (CPUPPCState *env)
+{
+ return env->asr;
+}
+
+void ppc_store_asr (CPUPPCState *env, target_ulong value)
+{
+ if (env->asr != value) {
+ env->asr = value;
+ tlb_flush(env, 1);
+ }
+}
+#endif
+
target_ulong do_load_sdr1 (CPUPPCState *env)
{
return env->sdr1;
xer_ov = (value >> XER_OV) & 0x01;
xer_ca = (value >> XER_CA) & 0x01;
xer_cmp = (value >> XER_CMP) & 0xFF;
- xer_bc = (value >> XER_BC) & 0x3F;
+ xer_bc = (value >> XER_BC) & 0x7F;
}
/* Swap temporary saved registers with GPRs */
{
return
#if defined (TARGET_PPC64)
- (msr_sf << MSR_SF) |
- (msr_isf << MSR_ISF) |
- (msr_hv << MSR_HV) |
+ ((target_ulong)msr_sf << MSR_SF) |
+ ((target_ulong)msr_isf << MSR_ISF) |
+ ((target_ulong)msr_hv << MSR_HV) |
#endif
- (msr_ucle << MSR_UCLE) |
- (msr_vr << MSR_VR) | /* VR / SPE */
- (msr_ap << MSR_AP) |
- (msr_sa << MSR_SA) |
- (msr_key << MSR_KEY) |
- (msr_pow << MSR_POW) | /* POW / WE */
- (msr_tlb << MSR_TLB) | /* TLB / TGPE / CE */
- (msr_ile << MSR_ILE) |
- (msr_ee << MSR_EE) |
- (msr_pr << MSR_PR) |
- (msr_fp << MSR_FP) |
- (msr_me << MSR_ME) |
- (msr_fe0 << MSR_FE0) |
- (msr_se << MSR_SE) | /* SE / DWE / UBLE */
- (msr_be << MSR_BE) | /* BE / DE */
- (msr_fe1 << MSR_FE1) |
- (msr_al << MSR_AL) |
- (msr_ip << MSR_IP) |
- (msr_ir << MSR_IR) | /* IR / IS */
- (msr_dr << MSR_DR) | /* DR / DS */
- (msr_pe << MSR_PE) | /* PE / EP */
- (msr_px << MSR_PX) | /* PX / PMM */
- (msr_ri << MSR_RI) |
- (msr_le << MSR_LE);
+ ((target_ulong)msr_ucle << MSR_UCLE) |
+ ((target_ulong)msr_vr << MSR_VR) | /* VR / SPE */
+ ((target_ulong)msr_ap << MSR_AP) |
+ ((target_ulong)msr_sa << MSR_SA) |
+ ((target_ulong)msr_key << MSR_KEY) |
+ ((target_ulong)msr_pow << MSR_POW) | /* POW / WE */
+ ((target_ulong)msr_tlb << MSR_TLB) | /* TLB / TGPE / CE */
+ ((target_ulong)msr_ile << MSR_ILE) |
+ ((target_ulong)msr_ee << MSR_EE) |
+ ((target_ulong)msr_pr << MSR_PR) |
+ ((target_ulong)msr_fp << MSR_FP) |
+ ((target_ulong)msr_me << MSR_ME) |
+ ((target_ulong)msr_fe0 << MSR_FE0) |
+ ((target_ulong)msr_se << MSR_SE) | /* SE / DWE / UBLE */
+ ((target_ulong)msr_be << MSR_BE) | /* BE / DE */
+ ((target_ulong)msr_fe1 << MSR_FE1) |
+ ((target_ulong)msr_al << MSR_AL) |
+ ((target_ulong)msr_ip << MSR_IP) |
+ ((target_ulong)msr_ir << MSR_IR) | /* IR / IS */
+ ((target_ulong)msr_dr << MSR_DR) | /* DR / DS */
+ ((target_ulong)msr_pe << MSR_PE) | /* PE / EP */
+ ((target_ulong)msr_px << MSR_PX) | /* PX / PMM */
+ ((target_ulong)msr_ri << MSR_RI) |
+ ((target_ulong)msr_le << MSR_LE);
}
void do_store_msr (CPUPPCState *env, target_ulong value)
enter_pm = 0;
switch (PPC_EXCP(env)) {
+ case PPC_FLAGS_EXCP_603:
+ /* Don't handle SLEEP mode: we should disable all clocks...
+ * No dynamic power-management.
+ */
+ if (msr_pow == 1 && (env->spr[SPR_HID0] & 0x00C00000) != 0)
+ enter_pm = 1;
+ break;
+ case PPC_FLAGS_EXCP_604:
+ if (msr_pow == 1)
+ enter_pm = 1;
+ break;
case PPC_FLAGS_EXCP_7x0:
if (msr_pow == 1 && (env->spr[SPR_HID0] & 0x00E00000) != 0)
enter_pm = 1;
}
}
+#if defined(TARGET_PPC64)
+void ppc_store_msr_32 (CPUPPCState *env, target_ulong value)
+{
+ do_store_msr(env, (uint32_t)value);
+}
+#endif
+
void do_compute_hflags (CPUPPCState *env)
{
/* Compute current hflags */
env->hflags = (msr_pr << MSR_PR) | (msr_le << MSR_LE) |
(msr_fp << MSR_FP) | (msr_fe0 << MSR_FE0) | (msr_fe1 << MSR_FE1) |
- (msr_vr << MSR_VR) | (msr_ap << MSR_AP) | (msr_sa << MSR_SA) |
+ (msr_vr << MSR_VR) | (msr_ap << MSR_AP) | (msr_sa << MSR_SA) |
(msr_se << MSR_SE) | (msr_be << MSR_BE);
#if defined (TARGET_PPC64)
- env->hflags |= (msr_sf << MSR_SF) | (msr_hv << MSR_HV);
+ env->hflags |= (msr_sf << (MSR_SF - 32)) | (msr_hv << (MSR_HV - 32));
#endif
}
#else /* defined (CONFIG_USER_ONLY) */
static void dump_syscall(CPUState *env)
{
- fprintf(logfile, "syscall r0=0x%08x r3=0x%08x r4=0x%08x "
- "r5=0x%08x r6=0x%08x nip=0x%08x\n",
+ fprintf(logfile, "syscall r0=0x" REGX " r3=0x" REGX " r4=0x" REGX
+ " r5=0x" REGX " r6=0x" REGX " nip=0x" REGX "\n",
env->gpr[0], env->gpr[3], env->gpr[4],
env->gpr[5], env->gpr[6], env->nip);
}
/* XXX: this is to be suppressed */
#define regs (env)
-#define Ts0 (int32_t)T0
-#define Ts1 (int32_t)T1
-#define Ts2 (int32_t)T2
#define FT0 (env->ft0)
#define FT1 (env->ft1)
PPC_OP(set_T0)
{
- T0 = PARAM(1);
+ T0 = (uint32_t)PARAM1;
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO op_set_T0_64 (void)
+{
+ T0 = ((uint64_t)PARAM1 << 32) | (uint64_t)PARAM2;
+ RETURN();
+}
+#endif
+
PPC_OP(set_T1)
{
- T1 = PARAM(1);
+ T1 = (uint32_t)PARAM1;
+ RETURN();
+}
+
+#if defined(TARGET_PPC64)
+void OPPROTO op_set_T1_64 (void)
+{
+ T1 = ((uint64_t)PARAM1 << 32) | (uint64_t)PARAM2;
RETURN();
}
+#endif
#if 0 // unused
PPC_OP(set_T2)
RETURN();
}
+void OPPROTO op_move_T2_T0 (void)
+{
+ T2 = T0;
+ RETURN();
+}
+
/* Generate exceptions */
PPC_OP(raise_exception_err)
{
PPC_OP(update_nip)
{
- env->nip = PARAM(1);
+ env->nip = (uint32_t)PARAM1;
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO op_update_nip_64 (void)
+{
+ env->nip = ((uint64_t)PARAM1 << 32) | (uint64_t)PARAM2;
+ RETURN();
+}
+#endif
+
PPC_OP(debug)
{
do_raise_exception(EXCP_DEBUG);
}
-
PPC_OP(exit_tb)
{
EXIT_TB();
RETURN();
}
+#if defined (TARGET_PPC64)
+void OPPROTO op_load_asr (void)
+{
+ T0 = env->asr;
+ RETURN();
+}
+
+void OPPROTO op_store_asr (void)
+{
+ ppc_store_asr(env, T0);
+ RETURN();
+}
+#endif
+
PPC_OP(load_msr)
{
T0 = do_load_msr(env);
do_store_msr(env, T0);
RETURN();
}
+
+#if defined (TARGET_PPC64)
+void OPPROTO op_store_msr_32 (void)
+{
+ ppc_store_msr_32(env, T0);
+ RETURN();
+}
+#endif
#endif
/* SPR */
PPC_OP(setcrfbit)
{
- T1 = (T1 & PARAM(1)) | (T0 << PARAM(2));
+ T1 = (T1 & PARAM(1)) | (T0 << PARAM(2));
RETURN();
}
PPC_OP(setlr)
{
- regs->lr = PARAM1;
+ regs->lr = (uint32_t)PARAM1;
RETURN();
}
+#if defined (TARGET_PPC64)
+void OPPROTO op_setlr_64 (void)
+{
+ regs->lr = ((uint64_t)PARAM1 << 32) | (uint64_t)PARAM2;
+ RETURN();
+}
+#endif
+
PPC_OP(goto_tb0)
{
GOTO_TB(op_goto_tb0, PARAM1, 0);
GOTO_TB(op_goto_tb1, PARAM1, 1);
}
-PPC_OP(b_T1)
+void OPPROTO op_b_T1 (void)
{
- regs->nip = T1 & ~3;
+ regs->nip = (uint32_t)(T1 & ~3);
RETURN();
}
+#if defined (TARGET_PPC64)
+void OPPROTO op_b_T1_64 (void)
+{
+ regs->nip = (uint64_t)(T1 & ~3);
+ RETURN();
+}
+#endif
+
PPC_OP(jz_T0)
{
if (!T0)
RETURN();
}
-PPC_OP(btest_T1)
+void OPPROTO op_btest_T1 (void)
{
if (T0) {
- regs->nip = T1 & ~3;
+ regs->nip = (uint32_t)(T1 & ~3);
} else {
- regs->nip = PARAM1;
+ regs->nip = (uint32_t)PARAM1;
}
RETURN();
}
+#if defined (TARGET_PPC64)
+void OPPROTO op_btest_T1_64 (void)
+{
+ if (T0) {
+ regs->nip = (uint64_t)(T1 & ~3);
+ } else {
+ regs->nip = ((uint64_t)PARAM1 << 32) | (uint64_t)PARAM2;
+ }
+ RETURN();
+}
+#endif
+
PPC_OP(movl_T1_ctr)
{
T1 = regs->ctr;
}
/* tests with result in T0 */
+void OPPROTO op_test_ctr (void)
+{
+ T0 = (uint32_t)regs->ctr;
+ RETURN();
+}
-PPC_OP(test_ctr)
+#if defined(TARGET_PPC64)
+void OPPROTO op_test_ctr_64 (void)
{
- T0 = regs->ctr;
+ T0 = (uint64_t)regs->ctr;
+ RETURN();
+}
+#endif
+
+void OPPROTO op_test_ctr_true (void)
+{
+ T0 = ((uint32_t)regs->ctr != 0 && (T0 & PARAM1) != 0);
RETURN();
}
-PPC_OP(test_ctr_true)
+#if defined(TARGET_PPC64)
+void OPPROTO op_test_ctr_true_64 (void)
{
- T0 = (regs->ctr != 0 && (T0 & PARAM(1)) != 0);
+ T0 = ((uint64_t)regs->ctr != 0 && (T0 & PARAM1) != 0);
RETURN();
}
+#endif
-PPC_OP(test_ctr_false)
+void OPPROTO op_test_ctr_false (void)
{
- T0 = (regs->ctr != 0 && (T0 & PARAM(1)) == 0);
+ T0 = ((uint32_t)regs->ctr != 0 && (T0 & PARAM1) == 0);
RETURN();
}
-PPC_OP(test_ctrz)
+#if defined(TARGET_PPC64)
+void OPPROTO op_test_ctr_false_64 (void)
{
- T0 = (regs->ctr == 0);
+ T0 = ((uint64_t)regs->ctr != 0 && (T0 & PARAM1) == 0);
RETURN();
}
+#endif
+
+void OPPROTO op_test_ctrz (void)
+{
+ T0 = ((uint32_t)regs->ctr == 0);
+ RETURN();
+}
+
+#if defined(TARGET_PPC64)
+void OPPROTO op_test_ctrz_64 (void)
+{
+ T0 = ((uint64_t)regs->ctr == 0);
+ RETURN();
+}
+#endif
+
+void OPPROTO op_test_ctrz_true (void)
+{
+ T0 = ((uint32_t)regs->ctr == 0 && (T0 & PARAM1) != 0);
+ RETURN();
+}
+
+#if defined(TARGET_PPC64)
+void OPPROTO op_test_ctrz_true_64 (void)
+{
+ T0 = ((uint64_t)regs->ctr == 0 && (T0 & PARAM1) != 0);
+ RETURN();
+}
+#endif
-PPC_OP(test_ctrz_true)
+void OPPROTO op_test_ctrz_false (void)
{
- T0 = (regs->ctr == 0 && (T0 & PARAM(1)) != 0);
+ T0 = ((uint32_t)regs->ctr == 0 && (T0 & PARAM1) == 0);
RETURN();
}
-PPC_OP(test_ctrz_false)
+#if defined(TARGET_PPC64)
+void OPPROTO op_test_ctrz_false_64 (void)
{
- T0 = (regs->ctr == 0 && (T0 & PARAM(1)) == 0);
+ T0 = ((uint64_t)regs->ctr == 0 && (T0 & PARAM1) == 0);
RETURN();
}
+#endif
PPC_OP(test_true)
{
RETURN();
}
-void OPPROTO op_addo (void)
+void OPPROTO op_check_addo (void)
{
- do_addo();
- RETURN();
+ if (likely(!(((uint32_t)T2 ^ (uint32_t)T1 ^ UINT32_MAX) &
+ ((uint32_t)T2 ^ (uint32_t)T0) & (1UL << 31)))) {
+ xer_ov = 0;
+ } else {
+ xer_so = 1;
+ xer_ov = 1;
+ }
}
-/* add carrying */
-PPC_OP(addc)
+#if defined(TARGET_PPC64)
+void OPPROTO op_check_addo_64 (void)
{
- T2 = T0;
- T0 += T1;
- if (T0 < T2) {
- xer_ca = 1;
+ if (likely(!(((uint64_t)T2 ^ (uint64_t)T1 ^ UINT64_MAX) &
+ ((uint64_t)T2 ^ (uint64_t)T0) & (1UL << 63)))) {
+ xer_ov = 0;
} else {
+ xer_so = 1;
+ xer_ov = 1;
+ }
+}
+#endif
+
+/* add carrying */
+void OPPROTO op_check_addc (void)
+{
+ if (likely((uint32_t)T0 >= (uint32_t)T2)) {
xer_ca = 0;
+ } else {
+ xer_ca = 1;
}
RETURN();
}
-void OPPROTO op_addco (void)
+#if defined(TARGET_PPC64)
+void OPPROTO op_check_addc_64 (void)
{
- do_addco();
+ if (likely((uint64_t)T0 >= (uint64_t)T2)) {
+ xer_ca = 0;
+ } else {
+ xer_ca = 1;
+ }
RETURN();
}
+#endif
/* add extended */
void OPPROTO op_adde (void)
RETURN();
}
-PPC_OP(addeo)
+#if defined(TARGET_PPC64)
+void OPPROTO op_adde_64 (void)
{
- do_addeo();
+ do_adde_64();
RETURN();
}
+#endif
/* add immediate */
PPC_OP(addi)
RETURN();
}
-/* add immediate carrying */
-PPC_OP(addic)
+/* add to minus one extended */
+void OPPROTO op_add_me (void)
{
- T1 = T0;
- T0 += PARAM(1);
- if (T0 < T1) {
+ T0 += xer_ca + (-1);
+ if (likely((uint32_t)T1 != 0))
xer_ca = 1;
- } else {
- xer_ca = 0;
- }
RETURN();
}
-/* add to minus one extended */
-PPC_OP(addme)
+#if defined(TARGET_PPC64)
+void OPPROTO op_add_me_64 (void)
{
- T1 = T0;
T0 += xer_ca + (-1);
- if (T1 != 0)
+ if (likely((uint64_t)T1 != 0))
xer_ca = 1;
RETURN();
}
+#endif
void OPPROTO op_addmeo (void)
{
RETURN();
}
+void OPPROTO op_addmeo_64 (void)
+{
+ do_addmeo();
+ RETURN();
+}
+
/* add to zero extended */
-PPC_OP(addze)
+void OPPROTO op_add_ze (void)
{
- T1 = T0;
T0 += xer_ca;
- if (T0 < T1) {
- xer_ca = 1;
- } else {
- xer_ca = 0;
- }
RETURN();
}
-void OPPROTO op_addzeo (void)
+/* divide word */
+void OPPROTO op_divw (void)
{
- do_addzeo();
+ if (unlikely(((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) ||
+ (int32_t)T1 == 0)) {
+ T0 = (int32_t)((-1) * ((uint32_t)T0 >> 31));
+ } else {
+ T0 = (int32_t)T0 / (int32_t)T1;
+ }
RETURN();
}
-/* divide word */
-PPC_OP(divw)
+#if defined(TARGET_PPC64)
+void OPPROTO op_divd (void)
{
- if ((Ts0 == INT32_MIN && Ts1 == -1) || Ts1 == 0) {
- T0 = (int32_t)((-1) * (T0 >> 31));
+ if (unlikely(((int64_t)T0 == INT64_MIN && (int64_t)T1 == -1) ||
+ (int64_t)T1 == 0)) {
+ T0 = (int64_t)((-1ULL) * ((uint64_t)T0 >> 63));
} else {
- T0 = (Ts0 / Ts1);
+ T0 = (int64_t)T0 / (int64_t)T1;
}
RETURN();
}
+#endif
void OPPROTO op_divwo (void)
{
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO op_divdo (void)
+{
+ do_divdo();
+ RETURN();
+}
+#endif
+
/* divide word unsigned */
-PPC_OP(divwu)
+void OPPROTO op_divwu (void)
+{
+ if (unlikely(T1 == 0)) {
+ T0 = 0;
+ } else {
+ T0 = (uint32_t)T0 / (uint32_t)T1;
+ }
+ RETURN();
+}
+
+#if defined(TARGET_PPC64)
+void OPPROTO op_divdu (void)
{
- if (T1 == 0) {
+ if (unlikely(T1 == 0)) {
T0 = 0;
} else {
T0 /= T1;
}
RETURN();
}
+#endif
void OPPROTO op_divwuo (void)
{
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO op_divduo (void)
+{
+ do_divduo();
+ RETURN();
+}
+#endif
+
/* multiply high word */
-PPC_OP(mulhw)
+void OPPROTO op_mulhw (void)
{
- T0 = ((int64_t)Ts0 * (int64_t)Ts1) >> 32;
+ T0 = ((int64_t)((int32_t)T0) * (int64_t)((int32_t)T1)) >> 32;
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO op_mulhd (void)
+{
+ uint64_t tl, th;
+
+ do_imul64(&tl, &th);
+ T0 = th;
+ RETURN();
+}
+#endif
+
/* multiply high word unsigned */
-PPC_OP(mulhwu)
+void OPPROTO op_mulhwu (void)
{
- T0 = ((uint64_t)T0 * (uint64_t)T1) >> 32;
+ T0 = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1) >> 32;
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO op_mulhdu (void)
+{
+ uint64_t tl, th;
+
+ do_mul64(&tl, &th);
+ T0 = th;
+ RETURN();
+}
+#endif
+
/* multiply low immediate */
PPC_OP(mulli)
{
- T0 = (Ts0 * SPARAM(1));
+ T0 = ((int32_t)T0 * (int32_t)PARAM1);
RETURN();
}
/* multiply low word */
PPC_OP(mullw)
+{
+ T0 = (int32_t)(T0 * T1);
+ RETURN();
+}
+
+#if defined(TARGET_PPC64)
+void OPPROTO op_mulld (void)
{
T0 *= T1;
RETURN();
}
+#endif
void OPPROTO op_mullwo (void)
{
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO op_mulldo (void)
+{
+ do_mulldo();
+ RETURN();
+}
+#endif
+
/* negate */
-PPC_OP(neg)
+void OPPROTO op_neg (void)
{
- if (T0 != 0x80000000) {
- T0 = -Ts0;
+ if (likely(T0 != INT32_MIN)) {
+ T0 = -(int32_t)T0;
}
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO op_neg_64 (void)
+{
+ if (likely(T0 != INT64_MIN)) {
+ T0 = -(int64_t)T0;
+ }
+ RETURN();
+}
+#endif
+
void OPPROTO op_nego (void)
{
do_nego();
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO op_nego_64 (void)
+{
+ do_nego_64();
+ RETURN();
+}
+#endif
+
/* substract from */
PPC_OP(subf)
{
RETURN();
}
-void OPPROTO op_subfo (void)
+void OPPROTO op_check_subfo (void)
+{
+ if (likely(!(((uint32_t)(~T2) ^ (uint32_t)T1 ^ UINT32_MAX) &
+ ((uint32_t)(~T2) ^ (uint32_t)T0) & (1UL << 31)))) {
+ xer_ov = 0;
+ } else {
+ xer_so = 1;
+ xer_ov = 1;
+ }
+ RETURN();
+}
+
+#if defined(TARGET_PPC64)
+void OPPROTO op_check_subfo_64 (void)
{
- do_subfo();
+ if (likely(!(((uint64_t)(~T2) ^ (uint64_t)T1 ^ UINT64_MAX) &
+ ((uint64_t)(~T2) ^ (uint64_t)T0) & (1ULL << 63)))) {
+ xer_ov = 0;
+ } else {
+ xer_so = 1;
+ xer_ov = 1;
+ }
RETURN();
}
+#endif
/* substract from carrying */
-PPC_OP(subfc)
+void OPPROTO op_check_subfc (void)
{
- T0 = T1 - T0;
- if (T0 <= T1) {
- xer_ca = 1;
- } else {
+ if (likely((uint32_t)T0 > (uint32_t)T1)) {
xer_ca = 0;
+ } else {
+ xer_ca = 1;
}
RETURN();
}
-void OPPROTO op_subfco (void)
+#if defined(TARGET_PPC64)
+void OPPROTO op_check_subfc_64 (void)
{
- do_subfco();
+ if (likely((uint64_t)T0 > (uint64_t)T1)) {
+ xer_ca = 0;
+ } else {
+ xer_ca = 1;
+ }
RETURN();
}
+#endif
/* substract from extended */
void OPPROTO op_subfe (void)
RETURN();
}
-PPC_OP(subfeo)
+#if defined(TARGET_PPC64)
+void OPPROTO op_subfe_64 (void)
{
- do_subfeo();
+ do_subfe_64();
RETURN();
}
+#endif
/* substract from immediate carrying */
-PPC_OP(subfic)
+void OPPROTO op_subfic (void)
{
- T0 = PARAM(1) + ~T0 + 1;
- if (T0 <= PARAM(1)) {
+ T0 = PARAM1 + ~T0 + 1;
+ if ((uint32_t)T0 <= (uint32_t)PARAM1) {
xer_ca = 1;
} else {
xer_ca = 0;
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO op_subfic_64 (void)
+{
+ T0 = PARAM1 + ~T0 + 1;
+ if ((uint64_t)T0 <= (uint64_t)PARAM1) {
+ xer_ca = 1;
+ } else {
+ xer_ca = 0;
+ }
+ RETURN();
+}
+#endif
+
/* substract from minus one extended */
-PPC_OP(subfme)
+void OPPROTO op_subfme (void)
{
T0 = ~T0 + xer_ca - 1;
+ if (likely((uint32_t)T0 != (uint32_t)-1))
+ xer_ca = 1;
+ RETURN();
+}
- if (T0 != -1)
+#if defined(TARGET_PPC64)
+void OPPROTO op_subfme_64 (void)
+{
+ T0 = ~T0 + xer_ca - 1;
+ if (likely((uint64_t)T0 != (uint64_t)-1))
xer_ca = 1;
RETURN();
}
+#endif
void OPPROTO op_subfmeo (void)
{
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO op_subfmeo_64 (void)
+{
+ do_subfmeo_64();
+ RETURN();
+}
+#endif
+
/* substract from zero extended */
-PPC_OP(subfze)
+void OPPROTO op_subfze (void)
{
T1 = ~T0;
T0 = T1 + xer_ca;
- if (T0 < T1) {
+ if ((uint32_t)T0 < (uint32_t)T1) {
xer_ca = 1;
} else {
xer_ca = 0;
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO op_subfze_64 (void)
+{
+ T1 = ~T0;
+ T0 = T1 + xer_ca;
+ if ((uint64_t)T0 < (uint64_t)T1) {
+ xer_ca = 1;
+ } else {
+ xer_ca = 0;
+ }
+ RETURN();
+}
+#endif
+
void OPPROTO op_subfzeo (void)
{
do_subfzeo();
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO op_subfzeo_64 (void)
+{
+ do_subfzeo_64();
+ RETURN();
+}
+#endif
+
/*** Integer comparison ***/
/* compare */
-PPC_OP(cmp)
+void OPPROTO op_cmp (void)
+{
+ if ((int32_t)T0 < (int32_t)T1) {
+ T0 = 0x08;
+ } else if ((int32_t)T0 > (int32_t)T1) {
+ T0 = 0x04;
+ } else {
+ T0 = 0x02;
+ }
+ RETURN();
+}
+
+#if defined(TARGET_PPC64)
+void OPPROTO op_cmp_64 (void)
{
- if (Ts0 < Ts1) {
+ if ((int64_t)T0 < (int64_t)T1) {
T0 = 0x08;
- } else if (Ts0 > Ts1) {
+ } else if ((int64_t)T0 > (int64_t)T1) {
T0 = 0x04;
} else {
T0 = 0x02;
}
RETURN();
}
+#endif
/* compare immediate */
-PPC_OP(cmpi)
+void OPPROTO op_cmpi (void)
{
- if (Ts0 < SPARAM(1)) {
+ if ((int32_t)T0 < (int32_t)PARAM1) {
T0 = 0x08;
- } else if (Ts0 > SPARAM(1)) {
+ } else if ((int32_t)T0 > (int32_t)PARAM1) {
T0 = 0x04;
} else {
T0 = 0x02;
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO op_cmpi_64 (void)
+{
+ if ((int64_t)T0 < (int64_t)((int32_t)PARAM1)) {
+ T0 = 0x08;
+ } else if ((int64_t)T0 > (int64_t)((int32_t)PARAM1)) {
+ T0 = 0x04;
+ } else {
+ T0 = 0x02;
+ }
+ RETURN();
+}
+#endif
+
/* compare logical */
-PPC_OP(cmpl)
+void OPPROTO op_cmpl (void)
{
- if (T0 < T1) {
+ if ((uint32_t)T0 < (uint32_t)T1) {
T0 = 0x08;
- } else if (T0 > T1) {
+ } else if ((uint32_t)T0 > (uint32_t)T1) {
T0 = 0x04;
} else {
T0 = 0x02;
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO op_cmpl_64 (void)
+{
+ if ((uint64_t)T0 < (uint64_t)T1) {
+ T0 = 0x08;
+ } else if ((uint64_t)T0 > (uint64_t)T1) {
+ T0 = 0x04;
+ } else {
+ T0 = 0x02;
+ }
+ RETURN();
+}
+#endif
+
/* compare logical immediate */
-PPC_OP(cmpli)
+void OPPROTO op_cmpli (void)
+{
+ if ((uint32_t)T0 < (uint32_t)PARAM1) {
+ T0 = 0x08;
+ } else if ((uint32_t)T0 > (uint32_t)PARAM1) {
+ T0 = 0x04;
+ } else {
+ T0 = 0x02;
+ }
+ RETURN();
+}
+
+#if defined(TARGET_PPC64)
+void OPPROTO op_cmpli_64 (void)
{
- if (T0 < PARAM(1)) {
+ if ((uint64_t)T0 < (uint64_t)PARAM1) {
T0 = 0x08;
- } else if (T0 > PARAM(1)) {
+ } else if ((uint64_t)T0 > (uint64_t)PARAM1) {
T0 = 0x04;
} else {
T0 = 0x02;
}
RETURN();
}
+#endif
+
+void OPPROTO op_isel (void)
+{
+ if (T0)
+ T0 = T1;
+ else
+ T0 = T2;
+ RETURN();
+}
+
+void OPPROTO op_popcntb (void)
+{
+ do_popcntb();
+ RETURN();
+}
+
+#if defined(TARGET_PPC64)
+void OPPROTO op_popcntb_64 (void)
+{
+ do_popcntb_64();
+ RETURN();
+}
+#endif
/*** Integer logical ***/
/* and */
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO op_cntlzd (void)
+{
+#if HOST_LONG_BITS == 64
+ int cnt;
+
+ cnt = 0;
+ if (!(T0 & 0xFFFFFFFF00000000ULL)) {
+ cnt += 32;
+ T0 <<= 32;
+ }
+ if (!(T0 & 0xFFFF000000000000ULL)) {
+ cnt += 16;
+ T0 <<= 16;
+ }
+ if (!(T0 & 0xFF00000000000000ULL)) {
+ cnt += 8;
+ T0 <<= 8;
+ }
+ if (!(T0 & 0xF000000000000000ULL)) {
+ cnt += 4;
+ T0 <<= 4;
+ }
+ if (!(T0 & 0xC000000000000000ULL)) {
+ cnt += 2;
+ T0 <<= 2;
+ }
+ if (!(T0 & 0x8000000000000000ULL)) {
+ cnt++;
+ T0 <<= 1;
+ }
+ if (!(T0 & 0x8000000000000000ULL)) {
+ cnt++;
+ }
+ T0 = cnt;
+#else
+ uint32_t tmp;
+
+ /* Make it easier on 32 bits host machines */
+ if (!(T0 >> 32)) {
+ tmp = T0;
+ T0 = 32;
+ } else {
+ tmp = T0 >> 32;
+ T0 = 0;
+ }
+ if (!(tmp & 0xFFFF0000UL)) {
+ T0 += 16;
+ tmp <<= 16;
+ }
+ if (!(tmp & 0xFF000000UL)) {
+ T0 += 8;
+ tmp <<= 8;
+ }
+ if (!(tmp & 0xF0000000UL)) {
+ T0 += 4;
+ tmp <<= 4;
+ }
+ if (!(tmp & 0xC0000000UL)) {
+ T0 += 2;
+ tmp <<= 2;
+ }
+ if (!(tmp & 0x80000000UL)) {
+ T0++;
+ tmp <<= 1;
+ }
+ if (!(tmp & 0x80000000UL)) {
+ T0++;
+ }
+#endif
+ RETURN();
+}
+#endif
+
/* eqv */
PPC_OP(eqv)
{
}
/* extend sign byte */
-PPC_OP(extsb)
+void OPPROTO op_extsb (void)
{
- T0 = (int32_t)((int8_t)(Ts0));
+#if defined (TARGET_PPC64)
+ T0 = (int64_t)((int8_t)T0);
+#else
+ T0 = (int32_t)((int8_t)T0);
+#endif
RETURN();
}
/* extend sign half word */
-PPC_OP(extsh)
+void OPPROTO op_extsh (void)
{
- T0 = (int32_t)((int16_t)(Ts0));
+#if defined (TARGET_PPC64)
+ T0 = (int64_t)((int16_t)T0);
+#else
+ T0 = (int32_t)((int16_t)T0);
+#endif
RETURN();
}
+#if defined (TARGET_PPC64)
+void OPPROTO op_extsw (void)
+{
+ T0 = (int64_t)((int32_t)T0);
+ RETURN();
+}
+#endif
+
/* nand */
PPC_OP(nand)
{
/*** Integer shift ***/
/* shift left word */
-PPC_OP(slw)
+void OPPROTO op_slw (void)
{
if (T1 & 0x20) {
T0 = 0;
+ } else {
+ T0 = (uint32_t)(T0 << T1);
+ }
+ RETURN();
+}
+
+#if defined(TARGET_PPC64)
+void OPPROTO op_sld (void)
+{
+ if (T1 & 0x40) {
+ T0 = 0;
} else {
T0 = T0 << T1;
}
RETURN();
}
+#endif
/* shift right algebraic word */
void OPPROTO op_sraw (void)
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO op_srad (void)
+{
+ do_srad();
+ RETURN();
+}
+#endif
+
/* shift right algebraic word immediate */
-PPC_OP(srawi)
+void OPPROTO op_srawi (void)
{
- T1 = T0;
- T0 = (Ts0 >> PARAM(1));
- if (Ts1 < 0 && (Ts1 & PARAM(2)) != 0) {
+ uint32_t mask = (uint32_t)PARAM2;
+
+ T0 = (int32_t)T0 >> PARAM1;
+ if ((int32_t)T1 < 0 && (T1 & mask) != 0) {
xer_ca = 1;
} else {
xer_ca = 0;
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO op_sradi (void)
+{
+ uint64_t mask = ((uint64_t)PARAM2 << 32) | (uint64_t)PARAM3;
+
+ T0 = (int64_t)T0 >> PARAM1;
+ if ((int64_t)T1 < 0 && ((uint64_t)T1 & mask) != 0) {
+ xer_ca = 1;
+ } else {
+ xer_ca = 0;
+ }
+ RETURN();
+}
+#endif
+
/* shift right word */
-PPC_OP(srw)
+void OPPROTO op_srw (void)
{
if (T1 & 0x20) {
T0 = 0;
} else {
- T0 = T0 >> T1;
+ T0 = (uint32_t)T0 >> T1;
+ }
+ RETURN();
+}
+
+#if defined(TARGET_PPC64)
+void OPPROTO op_srd (void)
+{
+ if (T1 & 0x40) {
+ T0 = 0;
+ } else {
+ T0 = (uint64_t)T0 >> T1;
}
RETURN();
}
+#endif
void OPPROTO op_sl_T0_T1 (void)
{
void OPPROTO op_srl_T0_T1 (void)
{
- T0 = T0 >> T1;
+ T0 = (uint32_t)T0 >> T1;
+ RETURN();
+}
+
+#if defined(TARGET_PPC64)
+void OPPROTO op_srl_T0_T1_64 (void)
+{
+ T0 = (uint32_t)T0 >> T1;
RETURN();
}
+#endif
void OPPROTO op_srli_T0 (void)
{
- T0 = T0 >> PARAM1;
+ T0 = (uint32_t)T0 >> PARAM1;
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO op_srli_T0_64 (void)
+{
+ T0 = (uint64_t)T0 >> PARAM1;
+ RETURN();
+}
+#endif
+
void OPPROTO op_srli_T1 (void)
{
- T1 = T1 >> PARAM1;
+ T1 = (uint32_t)T1 >> PARAM1;
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO op_srli_T1_64 (void)
+{
+ T1 = (uint64_t)T1 >> PARAM1;
+ RETURN();
+}
+#endif
+
/*** Floating-Point arithmetic ***/
/* fadd - fadd. */
PPC_OP(fadd)
#endif
/* Special op to check and maybe clear reservation */
-PPC_OP(check_reservation)
+void OPPROTO op_check_reservation (void)
{
if ((uint32_t)env->reserve == (uint32_t)(T0 & ~0x00000003))
env->reserve = -1;
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO op_check_reservation_64 (void)
+{
+ if ((uint64_t)env->reserve == (uint64_t)(T0 & ~0x00000003))
+ env->reserve = -1;
+ RETURN();
+}
+#endif
+
/* Return from interrupt */
#if !defined(CONFIG_USER_ONLY)
void OPPROTO op_rfi (void)
do_rfi();
RETURN();
}
+
+#if defined(TARGET_PPC64)
+void OPPROTO op_rfi_32 (void)
+{
+ do_rfi_32();
+ RETURN();
+}
+#endif
#endif
/* Trap word */
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO op_td (void)
+{
+ do_td(PARAM1);
+ RETURN();
+}
+#endif
+
/* Instruction cache block invalidate */
-PPC_OP(icbi)
+void OPPROTO op_icbi (void)
{
do_icbi();
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO op_icbi_64 (void)
+{
+ do_icbi_64();
+ RETURN();
+}
+#endif
+
#if !defined(CONFIG_USER_ONLY)
/* tlbia */
PPC_OP(tlbia)
}
/* tlbie */
-PPC_OP(tlbie)
+void OPPROTO op_tlbie (void)
{
do_tlbie();
RETURN();
}
+
+#if defined(TARGET_PPC64)
+void OPPROTO op_tlbie_64 (void)
+{
+ do_tlbie_64();
+ RETURN();
+}
+#endif
+
+#if defined(TARGET_PPC64)
+void OPPROTO op_slbia (void)
+{
+ do_slbia();
+ RETURN();
+}
+
+void OPPROTO op_slbie (void)
+{
+ do_slbie();
+ RETURN();
+}
+#endif
#endif
/* PowerPC 602/603/755 software TLB load instructions */
#endif
/* 601 specific */
-uint32_t cpu_ppc601_load_rtcl (CPUState *env);
void OPPROTO op_load_601_rtcl (void)
{
T0 = cpu_ppc601_load_rtcl(env);
RETURN();
}
-uint32_t cpu_ppc601_load_rtcu (CPUState *env);
void OPPROTO op_load_601_rtcu (void)
{
T0 = cpu_ppc601_load_rtcu(env);
}
#if !defined(CONFIG_USER_ONLY)
-void cpu_ppc601_store_rtcl (CPUState *env, uint32_t value);
void OPPROTO op_store_601_rtcl (void)
{
cpu_ppc601_store_rtcl(env, T0);
RETURN();
}
-void cpu_ppc601_store_rtcu (CPUState *env, uint32_t value);
void OPPROTO op_store_601_rtcu (void)
{
cpu_ppc601_store_rtcu(env, T0);
void OPPROTO op_POWER_doz (void)
{
- if (Ts1 > Ts0)
+ if ((int32_t)T1 > (int32_t)T0)
T0 = T1 - T0;
else
T0 = 0;
if (T1 & 0x20UL)
T0 = -1L;
else
- T0 = Ts0 >> T1;
+ T0 = (int32_t)T0 >> T1;
RETURN();
}
{
T1 &= 0x1FUL;
env->spr[SPR_MQ] = rotl32(T0, 32 - T1);
- T0 = Ts0 >> T1;
+ T0 = (int32_t)T0 >> T1;
RETURN();
}
{
T1 &= 0x1FUL;
env->spr[SPR_MQ] = T0 >> T1;
- T0 = Ts0 >> T1;
+ T0 = (int32_t)T0 >> T1;
RETURN();
}
RETURN();
}
-target_ulong load_40x_pit (CPUState *env);
void OPPROTO op_load_40x_pit (void)
{
T0 = load_40x_pit(env);
RETURN();
}
-void store_40x_pit (CPUState *env, target_ulong val);
void OPPROTO op_store_40x_pit (void)
{
store_40x_pit(env, T0);
RETURN();
}
-void store_booke_tcr (CPUState *env, target_ulong val);
void OPPROTO op_store_booke_tcr (void)
{
store_booke_tcr(env, T0);
RETURN();
}
-void store_booke_tsr (CPUState *env, target_ulong val);
void OPPROTO op_store_booke_tsr (void)
{
store_booke_tsr(env, T0);
//#define DEBUG_SOFTWARE_TLB
//#define FLUSH_ALL_TLBS
-#define Ts0 (long)((target_long)T0)
-#define Ts1 (long)((target_long)T1)
-#define Ts2 (long)((target_long)T2)
-
/*****************************************************************************/
/* Exceptions processing helpers */
void cpu_loop_exit (void)
xer_ov = (T0 >> XER_OV) & 0x01;
xer_ca = (T0 >> XER_CA) & 0x01;
xer_cmp = (T0 >> XER_CMP) & 0xFF;
- xer_bc = (T0 >> XER_BC) & 0x3F;
+ xer_bc = (T0 >> XER_BC) & 0x7F;
}
void do_load_fpscr (void)
} u;
int i;
-#ifdef WORDS_BIGENDIAN
+#if defined(WORDS_BIGENDIAN)
#define WORD0 0
#define WORD1 1
#else
/*****************************************************************************/
/* Fixed point operations helpers */
-void do_addo (void)
+#if defined(TARGET_PPC64)
+static void add128 (uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
{
- T2 = T0;
- T0 += T1;
- if (likely(!((T2 ^ T1 ^ (-1)) & (T2 ^ T0) & (1 << 31)))) {
- xer_ov = 0;
- } else {
- xer_so = 1;
- xer_ov = 1;
- }
+ *plow += a;
+ /* carry test */
+ if (*plow < a)
+ (*phigh)++;
+ *phigh += b;
}
-void do_addco (void)
+static void neg128 (uint64_t *plow, uint64_t *phigh)
{
- T2 = T0;
- T0 += T1;
- if (likely(T0 >= T2)) {
- xer_ca = 0;
- } else {
- xer_ca = 1;
- }
- if (likely(!((T2 ^ T1 ^ (-1)) & (T2 ^ T0) & (1 << 31)))) {
- xer_ov = 0;
- } else {
- xer_so = 1;
- xer_ov = 1;
+ *plow = ~ *plow;
+ *phigh = ~ *phigh;
+ add128(plow, phigh, 1, 0);
+}
+
+static void mul64 (uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
+{
+ uint32_t a0, a1, b0, b1;
+ uint64_t v;
+
+ a0 = a;
+ a1 = a >> 32;
+
+ b0 = b;
+ b1 = b >> 32;
+
+ v = (uint64_t)a0 * (uint64_t)b0;
+ *plow = v;
+ *phigh = 0;
+
+ v = (uint64_t)a0 * (uint64_t)b1;
+ add128(plow, phigh, v << 32, v >> 32);
+
+ v = (uint64_t)a1 * (uint64_t)b0;
+ add128(plow, phigh, v << 32, v >> 32);
+
+ v = (uint64_t)a1 * (uint64_t)b1;
+ *phigh += v;
+#if defined(DEBUG_MULDIV)
+ printf("mul: 0x%016llx * 0x%016llx = 0x%016llx%016llx\n",
+ a, b, *phigh, *plow);
+#endif
+}
+
+void do_mul64 (uint64_t *plow, uint64_t *phigh)
+{
+ mul64(plow, phigh, T0, T1);
+}
+
+static void imul64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
+{
+ int sa, sb;
+ sa = (a < 0);
+ if (sa)
+ a = -a;
+ sb = (b < 0);
+ if (sb)
+ b = -b;
+ mul64(plow, phigh, a, b);
+ if (sa ^ sb) {
+ neg128(plow, phigh);
}
}
+void do_imul64 (uint64_t *plow, uint64_t *phigh)
+{
+ imul64(plow, phigh, T0, T1);
+}
+#endif
+
void do_adde (void)
{
T2 = T0;
T0 += T1 + xer_ca;
- if (likely(!(T0 < T2 || (xer_ca == 1 && T0 == T2)))) {
+ if (likely(!((uint32_t)T0 < (uint32_t)T2 ||
+ (xer_ca == 1 && (uint32_t)T0 == (uint32_t)T2)))) {
xer_ca = 0;
} else {
xer_ca = 1;
}
}
-void do_addeo (void)
+#if defined(TARGET_PPC64)
+void do_adde_64 (void)
{
T2 = T0;
T0 += T1 + xer_ca;
- if (likely(!(T0 < T2 || (xer_ca == 1 && T0 == T2)))) {
+ if (likely(!((uint64_t)T0 < (uint64_t)T2 ||
+ (xer_ca == 1 && (uint64_t)T0 == (uint64_t)T2)))) {
xer_ca = 0;
} else {
xer_ca = 1;
}
- if (likely(!((T2 ^ T1 ^ (-1)) & (T2 ^ T0) & (1 << 31)))) {
- xer_ov = 0;
- } else {
- xer_so = 1;
- xer_ov = 1;
- }
}
+#endif
void do_addmeo (void)
{
T1 = T0;
T0 += xer_ca + (-1);
- if (likely(!(T1 & (T1 ^ T0) & (1 << 31)))) {
+ if (likely(!((uint32_t)T1 &
+ ((uint32_t)T1 ^ (uint32_t)T0) & (1UL << 31)))) {
xer_ov = 0;
} else {
xer_so = 1;
xer_ca = 1;
}
-void do_addzeo (void)
+#if defined(TARGET_PPC64)
+void do_addmeo_64 (void)
{
T1 = T0;
- T0 += xer_ca;
- if (likely(!((T1 ^ (-1)) & (T1 ^ T0) & (1 << 31)))) {
+ T0 += xer_ca + (-1);
+ if (likely(!((uint64_t)T1 &
+ ((uint64_t)T1 ^ (uint64_t)T0) & (1ULL << 63)))) {
xer_ov = 0;
} else {
xer_so = 1;
xer_ov = 1;
}
- if (likely(T0 >= T1)) {
- xer_ca = 0;
- } else {
+ if (likely(T1 != 0))
xer_ca = 1;
- }
}
+#endif
void do_divwo (void)
{
- if (likely(!((Ts0 == INT32_MIN && Ts1 == -1) || Ts1 == 0))) {
+ if (likely(!(((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) ||
+ (int32_t)T1 == 0))) {
xer_ov = 0;
- T0 = (Ts0 / Ts1);
+ T0 = (int32_t)T0 / (int32_t)T1;
} else {
xer_so = 1;
xer_ov = 1;
}
}
+#if defined(TARGET_PPC64)
+void do_divdo (void)
+{
+ if (likely(!(((int64_t)T0 == INT64_MIN && (int64_t)T1 == -1ULL) ||
+ (int64_t)T1 == 0))) {
+ xer_ov = 0;
+ T0 = (int64_t)T0 / (int64_t)T1;
+ } else {
+ xer_so = 1;
+ xer_ov = 1;
+ T0 = (-1ULL) * ((uint64_t)T0 >> 63);
+ }
+}
+#endif
+
void do_divwuo (void)
{
if (likely((uint32_t)T1 != 0)) {
}
}
+#if defined(TARGET_PPC64)
+void do_divduo (void)
+{
+ if (likely((uint64_t)T1 != 0)) {
+ xer_ov = 0;
+ T0 = (uint64_t)T0 / (uint64_t)T1;
+ } else {
+ xer_so = 1;
+ xer_ov = 1;
+ T0 = 0;
+ }
+}
+#endif
+
void do_mullwo (void)
{
- int64_t res = (int64_t)Ts0 * (int64_t)Ts1;
+ int64_t res = (int64_t)T0 * (int64_t)T1;
if (likely((int32_t)res == res)) {
xer_ov = 0;
T0 = (int32_t)res;
}
-void do_nego (void)
+#if defined(TARGET_PPC64)
+void do_mulldo (void)
{
- if (likely(T0 != INT32_MIN)) {
+ int64_t th;
+ uint64_t tl;
+
+ do_imul64(&tl, &th);
+ if (likely(th == 0)) {
xer_ov = 0;
- T0 = -Ts0;
} else {
xer_ov = 1;
xer_so = 1;
}
+ T0 = (int64_t)tl;
}
+#endif
-void do_subfo (void)
+void do_nego (void)
{
- T2 = T0;
- T0 = T1 - T0;
- if (likely(!(((~T2) ^ T1 ^ (-1)) & ((~T2) ^ T0) & (1 << 31)))) {
+ if (likely((int32_t)T0 != INT32_MIN)) {
xer_ov = 0;
+ T0 = -(int32_t)T0;
} else {
- xer_so = 1;
xer_ov = 1;
+ xer_so = 1;
}
- RETURN();
}
-void do_subfco (void)
+#if defined(TARGET_PPC64)
+void do_nego_64 (void)
{
- T2 = T0;
- T0 = T1 - T0;
- if (likely(T0 > T1)) {
- xer_ca = 0;
- } else {
- xer_ca = 1;
- }
- if (likely(!(((~T2) ^ T1 ^ (-1)) & ((~T2) ^ T0) & (1 << 31)))) {
+ if (likely((int64_t)T0 != INT64_MIN)) {
xer_ov = 0;
+ T0 = -(int64_t)T0;
} else {
- xer_so = 1;
xer_ov = 1;
+ xer_so = 1;
}
}
+#endif
void do_subfe (void)
{
T0 = T1 + ~T0 + xer_ca;
- if (likely(T0 >= T1 && (xer_ca == 0 || T0 != T1))) {
+ if (likely((uint32_t)T0 >= (uint32_t)T1 &&
+ (xer_ca == 0 || (uint32_t)T0 != (uint32_t)T1))) {
xer_ca = 0;
} else {
xer_ca = 1;
}
}
-void do_subfeo (void)
+#if defined(TARGET_PPC64)
+void do_subfe_64 (void)
{
- T2 = T0;
T0 = T1 + ~T0 + xer_ca;
- if (likely(!((~T2 ^ T1 ^ (-1)) & (~T2 ^ T0) & (1 << 31)))) {
+ if (likely((uint64_t)T0 >= (uint64_t)T1 &&
+ (xer_ca == 0 || (uint64_t)T0 != (uint64_t)T1))) {
+ xer_ca = 0;
+ } else {
+ xer_ca = 1;
+ }
+}
+#endif
+
+void do_subfmeo (void)
+{
+ T1 = T0;
+ T0 = ~T0 + xer_ca - 1;
+ if (likely(!((uint32_t)~T1 & ((uint32_t)~T1 ^ (uint32_t)T0) &
+ (1UL << 31)))) {
xer_ov = 0;
} else {
xer_so = 1;
xer_ov = 1;
}
- if (likely(T0 >= T1 && (xer_ca == 0 || T0 != T1))) {
- xer_ca = 0;
- } else {
+ if (likely((uint32_t)T1 != UINT32_MAX))
xer_ca = 1;
- }
}
-void do_subfmeo (void)
+#if defined(TARGET_PPC64)
+void do_subfmeo_64 (void)
{
T1 = T0;
T0 = ~T0 + xer_ca - 1;
- if (likely(!(~T1 & (~T1 ^ T0) & (1 << 31)))) {
+ if (likely(!((uint64_t)~T1 & ((uint64_t)~T1 ^ (uint64_t)T0) &
+ (1ULL << 63)))) {
xer_ov = 0;
} else {
xer_so = 1;
xer_ov = 1;
}
- if (likely(T1 != -1))
+ if (likely((uint64_t)T1 != UINT64_MAX))
xer_ca = 1;
}
+#endif
void do_subfzeo (void)
{
T1 = T0;
T0 = ~T0 + xer_ca;
- if (likely(!((~T1 ^ (-1)) & ((~T1) ^ T0) & (1 << 31)))) {
+ if (likely(!(((uint32_t)~T1 ^ UINT32_MAX) &
+ ((uint32_t)(~T1) ^ (uint32_t)T0) & (1UL << 31)))) {
xer_ov = 0;
} else {
xer_ov = 1;
xer_so = 1;
}
- if (likely(T0 >= ~T1)) {
+ if (likely((uint32_t)T0 >= (uint32_t)~T1)) {
xer_ca = 0;
} else {
xer_ca = 1;
}
}
+#if defined(TARGET_PPC64)
+void do_subfzeo_64 (void)
+{
+ T1 = T0;
+ T0 = ~T0 + xer_ca;
+ if (likely(!(((uint64_t)~T1 ^ UINT64_MAX) &
+ ((uint64_t)(~T1) ^ (uint64_t)T0) & (1ULL << 63)))) {
+ xer_ov = 0;
+ } else {
+ xer_ov = 1;
+ xer_so = 1;
+ }
+ if (likely((uint64_t)T0 >= (uint64_t)~T1)) {
+ xer_ca = 0;
+ } else {
+ xer_ca = 1;
+ }
+}
+#endif
+
/* shift right arithmetic helper */
void do_sraw (void)
{
int32_t ret;
if (likely(!(T1 & 0x20UL))) {
- if (likely(T1 != 0)) {
+ if (likely((uint32_t)T1 != 0)) {
ret = (int32_t)T0 >> (T1 & 0x1fUL);
if (likely(ret >= 0 || ((int32_t)T0 & ((1 << T1) - 1)) == 0)) {
xer_ca = 0;
T0 = ret;
}
+#if defined(TARGET_PPC64)
+void do_srad (void)
+{
+ int64_t ret;
+
+ if (likely(!(T1 & 0x40UL))) {
+ if (likely((uint64_t)T1 != 0)) {
+ ret = (int64_t)T0 >> (T1 & 0x3FUL);
+ if (likely(ret >= 0 || ((int64_t)T0 & ((1 << T1) - 1)) == 0)) {
+ xer_ca = 0;
+ } else {
+ xer_ca = 1;
+ }
+ } else {
+ ret = T0;
+ xer_ca = 0;
+ }
+ } else {
+ ret = (-1) * ((uint64_t)T0 >> 63);
+ if (likely(ret >= 0 || ((uint64_t)T0 & ~0x8000000000000000ULL) == 0)) {
+ xer_ca = 0;
+ } else {
+ xer_ca = 1;
+ }
+ }
+ T0 = ret;
+}
+#endif
+
+static inline int popcnt (uint32_t val)
+{
+ int i;
+
+ for (i = 0; val != 0;)
+ val = val ^ (val - 1);
+
+ return i;
+}
+
+void do_popcntb (void)
+{
+ uint32_t ret;
+ int i;
+
+ ret = 0;
+ for (i = 0; i < 32; i += 8)
+ ret |= popcnt((T0 >> i) & 0xFF) << i;
+ T0 = ret;
+}
+
+#if defined(TARGET_PPC64)
+void do_popcntb_64 (void)
+{
+ uint64_t ret;
+ int i;
+
+ ret = 0;
+ for (i = 0; i < 64; i += 8)
+ ret |= popcnt((T0 >> i) & 0xFF) << i;
+ T0 = ret;
+}
+#endif
+
/*****************************************************************************/
/* Floating point operations helpers */
void do_fctiw (void)
} p;
/* XXX: higher bits are not supposed to be significant.
- * to make tests easier, return the same as a real PowerPC 750 (aka G3)
+ * to make tests easier, return the same as a real PowerPC 750 (aka G3)
*/
p.i = float64_to_int32_round_to_zero(FT0, &env->fp_status);
p.i |= 0xFFF80000ULL << 32;
#if !defined (CONFIG_USER_ONLY)
void do_rfi (void)
{
- env->nip = env->spr[SPR_SRR0] & ~0x00000003;
- T0 = env->spr[SPR_SRR1] & ~0xFFFF0000UL;
+ env->nip = (target_ulong)(env->spr[SPR_SRR0] & ~0x00000003);
+ T0 = (target_ulong)(env->spr[SPR_SRR1] & ~0xFFFF0000UL);
do_store_msr(env, T0);
#if defined (DEBUG_OP)
dump_rfi();
#endif
env->interrupt_request |= CPU_INTERRUPT_EXITTB;
}
+
+#if defined(TARGET_PPC64)
+void do_rfi_32 (void)
+{
+ env->nip = (uint32_t)(env->spr[SPR_SRR0] & ~0x00000003);
+ T0 = (uint32_t)(env->spr[SPR_SRR1] & ~0xFFFF0000UL);
+ do_store_msr(env, T0);
+#if defined (DEBUG_OP)
+ dump_rfi();
+#endif
+ env->interrupt_request |= CPU_INTERRUPT_EXITTB;
+}
+#endif
#endif
void do_tw (int flags)
{
- if (!likely(!((Ts0 < Ts1 && (flags & 0x10)) ||
- (Ts0 > Ts1 && (flags & 0x08)) ||
- (Ts0 == Ts1 && (flags & 0x04)) ||
- (T0 < T1 && (flags & 0x02)) ||
- (T0 > T1 && (flags & 0x01)))))
+ if (!likely(!(((int32_t)T0 < (int32_t)T1 && (flags & 0x10)) ||
+ ((int32_t)T0 > (int32_t)T1 && (flags & 0x08)) ||
+ ((int32_t)T0 == (int32_t)T1 && (flags & 0x04)) ||
+ ((uint32_t)T0 < (uint32_t)T1 && (flags & 0x02)) ||
+ ((uint32_t)T0 > (uint32_t)T1 && (flags & 0x01)))))
do_raise_exception_err(EXCP_PROGRAM, EXCP_TRAP);
}
+#if defined(TARGET_PPC64)
+void do_td (int flags)
+{
+ if (!likely(!(((int64_t)T0 < (int64_t)T1 && (flags & 0x10)) ||
+ ((int64_t)T0 > (int64_t)T1 && (flags & 0x08)) ||
+ ((int64_t)T0 == (int64_t)T1 && (flags & 0x04)) ||
+ ((uint64_t)T0 < (uint64_t)T1 && (flags & 0x02)) ||
+ ((uint64_t)T0 > (uint64_t)T1 && (flags & 0x01)))))
+ do_raise_exception_err(EXCP_PROGRAM, EXCP_TRAP);
+}
+#endif
+
/* Instruction cache invalidation helper */
void do_icbi (void)
{
* (not a fetch) by the MMU. To be sure it will be so,
* do the load "by hand".
*/
+ tmp = ldl_kernel((uint32_t)T0);
+ T0 &= ~(ICACHE_LINE_SIZE - 1);
+ tb_invalidate_page_range((uint32_t)T0, (uint32_t)(T0 + ICACHE_LINE_SIZE));
+}
+
#if defined(TARGET_PPC64)
- if (!msr_sf)
- T0 &= 0xFFFFFFFFULL;
-#endif
- tmp = ldl_kernel(T0);
+void do_icbi_64 (void)
+{
+ uint64_t tmp;
+ /* Invalidate one cache line :
+ * PowerPC specification says this is to be treated like a load
+ * (not a fetch) by the MMU. To be sure it will be so,
+ * do the load "by hand".
+ */
+ tmp = ldq_kernel((uint64_t)T0);
T0 &= ~(ICACHE_LINE_SIZE - 1);
- tb_invalidate_page_range(T0, T0 + ICACHE_LINE_SIZE);
+ tb_invalidate_page_range((uint64_t)T0, (uint64_t)(T0 + ICACHE_LINE_SIZE));
}
+#endif
/*****************************************************************************/
/* PowerPC 601 specific instructions (POWER bridge) */
void do_POWER_abso (void)
{
- if (T0 == INT32_MIN) {
+ if ((uint32_t)T0 == INT32_MIN) {
T0 = INT32_MAX;
xer_ov = 1;
xer_so = 1;
{
uint64_t tmp;
- if ((Ts0 == INT32_MIN && Ts1 == -1) || Ts1 == 0) {
+ if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) || (int32_t)T1 == 0) {
T0 = (long)((-1) * (T0 >> 31));
env->spr[SPR_MQ] = 0;
} else {
tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ];
env->spr[SPR_MQ] = tmp % T1;
- T0 = tmp / Ts1;
+ T0 = tmp / (int32_t)T1;
}
}
{
int64_t tmp;
- if ((Ts0 == INT32_MIN && Ts1 == -1) || Ts1 == 0) {
+ if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) || (int32_t)T1 == 0) {
T0 = (long)((-1) * (T0 >> 31));
env->spr[SPR_MQ] = 0;
xer_ov = 1;
} else {
tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ];
env->spr[SPR_MQ] = tmp % T1;
- tmp /= Ts1;
+ tmp /= (int32_t)T1;
if (tmp > (int64_t)INT32_MAX || tmp < (int64_t)INT32_MIN) {
xer_ov = 1;
xer_so = 1;
void do_POWER_divs (void)
{
- if ((Ts0 == INT32_MIN && Ts1 == -1) || Ts1 == 0) {
+ if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) || (int32_t)T1 == 0) {
T0 = (long)((-1) * (T0 >> 31));
env->spr[SPR_MQ] = 0;
} else {
env->spr[SPR_MQ] = T0 % T1;
- T0 = Ts0 / Ts1;
+ T0 = (int32_t)T0 / (int32_t)T1;
}
}
void do_POWER_divso (void)
{
- if ((Ts0 == INT32_MIN && Ts1 == -1) || Ts1 == 0) {
+ if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) || (int32_t)T1 == 0) {
T0 = (long)((-1) * (T0 >> 31));
env->spr[SPR_MQ] = 0;
xer_ov = 1;
xer_so = 1;
} else {
- T0 = Ts0 / Ts1;
- env->spr[SPR_MQ] = Ts0 % Ts1;
+ T0 = (int32_t)T0 / (int32_t)T1;
+ env->spr[SPR_MQ] = (int32_t)T0 % (int32_t)T1;
xer_ov = 0;
}
}
void do_POWER_dozo (void)
{
- if (Ts1 > Ts0) {
+ if ((int32_t)T1 > (int32_t)T0) {
T2 = T0;
T0 = T1 - T0;
- if (((~T2) ^ T1 ^ (-1)) & ((~T2) ^ T0) & (1 << 31)) {
+ if (((uint32_t)(~T2) ^ (uint32_t)T1 ^ UINT32_MAX) &
+ ((uint32_t)(~T2) ^ (uint32_t)T0) & (1UL << 31)) {
xer_so = 1;
xer_ov = 1;
} else {
{
uint32_t ret;
- if (T0 == T1 + 1) {
+ if ((uint32_t)T0 == (uint32_t)(T1 + 1)) {
ret = -1;
} else {
- ret = (((uint32_t)(-1)) >> (T0)) ^
- (((uint32_t)(-1) >> (T1)) >> 1);
- if (T0 > T1)
+ ret = (((uint32_t)(-1)) >> ((uint32_t)T0)) ^
+ (((uint32_t)(-1) >> ((uint32_t)T1)) >> 1);
+ if ((uint32_t)T0 > (uint32_t)T1)
ret = ~ret;
}
T0 = ret;
/* PowerPC 601 BAT management helper */
void do_store_601_batu (int nr)
{
- do_store_ibatu(env, nr, T0);
+ do_store_ibatu(env, nr, (uint32_t)T0);
env->DBAT[0][nr] = env->IBAT[0][nr];
env->DBAT[1][nr] = env->IBAT[1][nr];
}
void do_op_602_mfrom (void)
{
if (likely(T0 < 602)) {
-#ifdef USE_MFROM_ROM_TABLE
+#if defined(USE_MFROM_ROM_TABLE)
#include "mfrom_table.c"
T0 = mfrom_ROM_table[T0];
#else
/* Embedded PowerPC specific helpers */
void do_405_check_ov (void)
{
- if (likely(((T1 ^ T2) >> 31) || !((T0 ^ T2) >> 31))) {
+ if (likely((((uint32_t)T1 ^ (uint32_t)T2) >> 31) ||
+ !(((uint32_t)T0 ^ (uint32_t)T2) >> 31))) {
xer_ov = 0;
} else {
xer_ov = 1;
void do_405_check_sat (void)
{
- if (!likely(((T1 ^ T2) >> 31) || !((T0 ^ T2) >> 31))) {
+ if (!likely((((uint32_t)T1 ^ (uint32_t)T2) >> 31) ||
+ !(((uint32_t)T0 ^ (uint32_t)T2) >> 31))) {
/* Saturate result */
if (T2 >> 31) {
T0 = INT32_MIN;
void do_tlbie (void)
{
+ T0 = (uint32_t)T0;
#if !defined(FLUSH_ALL_TLBS)
if (unlikely(PPC_MMU(env) == PPC_FLAGS_MMU_SOFT_6xx)) {
ppc6xx_tlb_invalidate_virt(env, T0 & TARGET_PAGE_MASK, 0);
#endif
}
+#if defined(TARGET_PPC64)
+void do_tlbie_64 (void)
+{
+ T0 = (uint64_t)T0;
+#if !defined(FLUSH_ALL_TLBS)
+ if (unlikely(PPC_MMU(env) == PPC_FLAGS_MMU_SOFT_6xx)) {
+ ppc6xx_tlb_invalidate_virt(env, T0 & TARGET_PAGE_MASK, 0);
+ if (env->id_tlbs == 1)
+ ppc6xx_tlb_invalidate_virt(env, T0 & TARGET_PAGE_MASK, 1);
+ } else if (unlikely(PPC_MMU(env) == PPC_FLAGS_MMU_SOFT_4xx)) {
+ /* XXX: TODO */
+#if 0
+ ppcbooke_tlb_invalidate_virt(env, T0 & TARGET_PAGE_MASK,
+ env->spr[SPR_BOOKE_PID]);
+#endif
+ } else {
+ /* tlbie invalidate TLBs for all segments
+ * As we have 2^36 segments, invalidate all qemu TLBs
+ */
+#if 0
+ T0 &= TARGET_PAGE_MASK;
+ T0 &= ~((target_ulong)-1 << 28);
+ /* XXX: this case should be optimized,
+ * giving a mask to tlb_flush_page
+ */
+ tlb_flush_page(env, T0 | (0x0 << 28));
+ tlb_flush_page(env, T0 | (0x1 << 28));
+ tlb_flush_page(env, T0 | (0x2 << 28));
+ tlb_flush_page(env, T0 | (0x3 << 28));
+ tlb_flush_page(env, T0 | (0x4 << 28));
+ tlb_flush_page(env, T0 | (0x5 << 28));
+ tlb_flush_page(env, T0 | (0x6 << 28));
+ tlb_flush_page(env, T0 | (0x7 << 28));
+ tlb_flush_page(env, T0 | (0x8 << 28));
+ tlb_flush_page(env, T0 | (0x9 << 28));
+ tlb_flush_page(env, T0 | (0xA << 28));
+ tlb_flush_page(env, T0 | (0xB << 28));
+ tlb_flush_page(env, T0 | (0xC << 28));
+ tlb_flush_page(env, T0 | (0xD << 28));
+ tlb_flush_page(env, T0 | (0xE << 28));
+ tlb_flush_page(env, T0 | (0xF << 28));
+#else
+ tlb_flush(env, 1);
+#endif
+ }
+#else
+ do_tlbia();
+#endif
+}
+#endif
+
+#if defined(TARGET_PPC64)
+void do_slbia (void)
+{
+ /* XXX: TODO */
+ tlb_flush(env, 1);
+}
+
+void do_slbie (void)
+{
+ /* XXX: TODO */
+ tlb_flush(env, 1);
+}
+#endif
+
/* Software driven TLBs management */
/* PowerPC 602/603 software TLB load instructions helpers */
void do_load_6xx_tlb (int is_code)
{
target_ulong RPN, CMP, EPN;
int way;
-
+
RPN = env->spr[SPR_RPA];
if (is_code) {
CMP = env->spr[SPR_ICMP];
}
#endif
/* Store this TLB */
- ppc6xx_tlb_store(env, T0 & TARGET_PAGE_MASK, way, is_code, CMP, RPN);
+ ppc6xx_tlb_store(env, (uint32_t)(T0 & TARGET_PAGE_MASK),
+ way, is_code, CMP, RPN);
}
/* Helpers for 4xx TLB management */
void glue(do_POWER2_stfq, MEMSUFFIX) (void);
void glue(do_POWER2_stfq_le, MEMSUFFIX) (void);
+#if defined(TARGET_PPC64)
+void glue(do_lsw_64, MEMSUFFIX) (int dst);
+void glue(do_lsw_le_64, MEMSUFFIX) (int dst);
+void glue(do_stsw_64, MEMSUFFIX) (int src);
+void glue(do_stsw_le_64, MEMSUFFIX) (int src);
+void glue(do_lmw_64, MEMSUFFIX) (int dst);
+void glue(do_lmw_le_64, MEMSUFFIX) (int dst);
+void glue(do_stmw_64, MEMSUFFIX) (int src);
+void glue(do_stmw_le_64, MEMSUFFIX) (int src);
+#endif
+
#else
/* Registers load and stores */
void do_store_fpscr (uint32_t mask);
/* Integer arithmetic helpers */
-void do_addo (void);
-void do_addco (void);
void do_adde (void);
-void do_addeo (void);
void do_addmeo (void);
-void do_addzeo (void);
void do_divwo (void);
void do_divwuo (void);
void do_mullwo (void);
void do_nego (void);
-void do_subfo (void);
-void do_subfco (void);
void do_subfe (void);
-void do_subfeo (void);
void do_subfmeo (void);
void do_subfzeo (void);
-void do_sraw(void);
+void do_sraw (void);
+#if defined(TARGET_PPC64)
+void do_adde_64 (void);
+void do_addmeo_64 (void);
+void do_imul64 (uint64_t *tl, uint64_t *th);
+void do_mul64 (uint64_t *tl, uint64_t *th);
+void do_divdo (void);
+void do_divduo (void);
+void do_mulldo (void);
+void do_nego_64 (void);
+void do_subfe_64 (void);
+void do_subfmeo_64 (void);
+void do_subfzeo_64 (void);
+void do_srad (void);
+#endif
+void do_popcntb (void);
+#if defined(TARGET_PPC64)
+void do_popcntb_64 (void);
+#endif
/* Floating-point arithmetic helpers */
void do_fsqrt (void);
void do_fcmpo (void);
void do_tw (int flags);
+#if defined(TARGET_PPC64)
+void do_td (int flags);
+#endif
void do_icbi (void);
+#if defined(TARGET_PPC64)
+void do_icbi_64 (void);
+#endif
#if !defined(CONFIG_USER_ONLY)
void do_rfi (void);
+#if defined(TARGET_PPC64)
+void do_rfi_32 (void);
+#endif
void do_tlbia (void);
void do_tlbie (void);
+#if defined(TARGET_PPC64)
+void do_tlbie_64 (void);
+#endif
void do_load_6xx_tlb (int is_code);
+#if defined(TARGET_PPC64)
+void do_slbia (void);
+void do_slbie (void);
+#endif
#endif
/* POWER / PowerPC 601 specific helpers */
/*
* PowerPC emulation micro-operations helpers for qemu.
- *
+ *
* Copyright (c) 2003-2007 Jocelyn Mayer
*
* This library is free software; you can redistribute it and/or
void glue(do_lmw, MEMSUFFIX) (int dst)
{
for (; dst < 32; dst++, T0 += 4) {
- ugpr(dst) = glue(ldl, MEMSUFFIX)(T0);
+ ugpr(dst) = glue(ldl, MEMSUFFIX)((uint32_t)T0);
+ }
+}
+
+#if defined(TARGET_PPC64)
+void glue(do_lmw_64, MEMSUFFIX) (int dst)
+{
+ for (; dst < 32; dst++, T0 += 4) {
+ ugpr(dst) = glue(ldl, MEMSUFFIX)((uint64_t)T0);
}
}
+#endif
void glue(do_stmw, MEMSUFFIX) (int src)
{
for (; src < 32; src++, T0 += 4) {
- glue(stl, MEMSUFFIX)(T0, ugpr(src));
+ glue(stl, MEMSUFFIX)((uint32_t)T0, ugpr(src));
+ }
+}
+
+#if defined(TARGET_PPC64)
+void glue(do_stmw_64, MEMSUFFIX) (int src)
+{
+ for (; src < 32; src++, T0 += 4) {
+ glue(stl, MEMSUFFIX)((uint64_t)T0, ugpr(src));
}
}
+#endif
void glue(do_lmw_le, MEMSUFFIX) (int dst)
{
for (; dst < 32; dst++, T0 += 4) {
- ugpr(dst) = glue(ld32r, MEMSUFFIX)(T0);
+ ugpr(dst) = glue(ld32r, MEMSUFFIX)((uint32_t)T0);
+ }
+}
+
+#if defined(TARGET_PPC64)
+void glue(do_lmw_le_64, MEMSUFFIX) (int dst)
+{
+ for (; dst < 32; dst++, T0 += 4) {
+ ugpr(dst) = glue(ld32r, MEMSUFFIX)((uint64_t)T0);
}
}
+#endif
void glue(do_stmw_le, MEMSUFFIX) (int src)
{
for (; src < 32; src++, T0 += 4) {
- glue(st32r, MEMSUFFIX)(T0, ugpr(src));
+ glue(st32r, MEMSUFFIX)((uint32_t)T0, ugpr(src));
}
}
+#if defined(TARGET_PPC64)
+void glue(do_stmw_le_64, MEMSUFFIX) (int src)
+{
+ for (; src < 32; src++, T0 += 4) {
+ glue(st32r, MEMSUFFIX)((uint64_t)T0, ugpr(src));
+ }
+}
+#endif
+
void glue(do_lsw, MEMSUFFIX) (int dst)
{
uint32_t tmp;
int sh;
for (; T1 > 3; T1 -= 4, T0 += 4) {
- ugpr(dst++) = glue(ldl, MEMSUFFIX)(T0);
+ ugpr(dst++) = glue(ldl, MEMSUFFIX)((uint32_t)T0);
if (unlikely(dst == 32))
dst = 0;
}
if (unlikely(T1 != 0)) {
tmp = 0;
for (sh = 24; T1 > 0; T1--, T0++, sh -= 8) {
- tmp |= glue(ldub, MEMSUFFIX)(T0) << sh;
+ tmp |= glue(ldub, MEMSUFFIX)((uint32_t)T0) << sh;
}
ugpr(dst) = tmp;
}
}
+#if defined(TARGET_PPC64)
+void glue(do_lsw_64, MEMSUFFIX) (int dst)
+{
+ uint32_t tmp;
+ int sh;
+
+ for (; T1 > 3; T1 -= 4, T0 += 4) {
+ ugpr(dst++) = glue(ldl, MEMSUFFIX)((uint64_t)T0);
+ if (unlikely(dst == 32))
+ dst = 0;
+ }
+ if (unlikely(T1 != 0)) {
+ tmp = 0;
+ for (sh = 24; T1 > 0; T1--, T0++, sh -= 8) {
+ tmp |= glue(ldub, MEMSUFFIX)((uint64_t)T0) << sh;
+ }
+ ugpr(dst) = tmp;
+ }
+}
+#endif
+
void glue(do_stsw, MEMSUFFIX) (int src)
{
int sh;
for (; T1 > 3; T1 -= 4, T0 += 4) {
- glue(stl, MEMSUFFIX)(T0, ugpr(src++));
+ glue(stl, MEMSUFFIX)((uint32_t)T0, ugpr(src++));
if (unlikely(src == 32))
src = 0;
}
if (unlikely(T1 != 0)) {
for (sh = 24; T1 > 0; T1--, T0++, sh -= 8)
- glue(stb, MEMSUFFIX)(T0, (ugpr(src) >> sh) & 0xFF);
+ glue(stb, MEMSUFFIX)((uint32_t)T0, (ugpr(src) >> sh) & 0xFF);
}
}
+#if defined(TARGET_PPC64)
+void glue(do_stsw_64, MEMSUFFIX) (int src)
+{
+ int sh;
+
+ for (; T1 > 3; T1 -= 4, T0 += 4) {
+ glue(stl, MEMSUFFIX)((uint64_t)T0, ugpr(src++));
+ if (unlikely(src == 32))
+ src = 0;
+ }
+ if (unlikely(T1 != 0)) {
+ for (sh = 24; T1 > 0; T1--, T0++, sh -= 8)
+ glue(stb, MEMSUFFIX)((uint64_t)T0, (ugpr(src) >> sh) & 0xFF);
+ }
+}
+#endif
+
void glue(do_lsw_le, MEMSUFFIX) (int dst)
{
uint32_t tmp;
int sh;
for (; T1 > 3; T1 -= 4, T0 += 4) {
- ugpr(dst++) = glue(ld32r, MEMSUFFIX)(T0);
+ ugpr(dst++) = glue(ld32r, MEMSUFFIX)((uint32_t)T0);
+ if (unlikely(dst == 32))
+ dst = 0;
+ }
+ if (unlikely(T1 != 0)) {
+ tmp = 0;
+ for (sh = 0; T1 > 0; T1--, T0++, sh += 8) {
+ tmp |= glue(ldub, MEMSUFFIX)((uint32_t)T0) << sh;
+ }
+ ugpr(dst) = tmp;
+ }
+}
+
+#if defined(TARGET_PPC64)
+void glue(do_lsw_le_64, MEMSUFFIX) (int dst)
+{
+ uint32_t tmp;
+ int sh;
+
+ for (; T1 > 3; T1 -= 4, T0 += 4) {
+ ugpr(dst++) = glue(ld32r, MEMSUFFIX)((uint64_t)T0);
if (unlikely(dst == 32))
dst = 0;
}
if (unlikely(T1 != 0)) {
tmp = 0;
for (sh = 0; T1 > 0; T1--, T0++, sh += 8) {
- tmp |= glue(ldub, MEMSUFFIX)(T0) << sh;
+ tmp |= glue(ldub, MEMSUFFIX)((uint64_t)T0) << sh;
}
ugpr(dst) = tmp;
}
}
+#endif
void glue(do_stsw_le, MEMSUFFIX) (int src)
{
int sh;
for (; T1 > 3; T1 -= 4, T0 += 4) {
- glue(st32r, MEMSUFFIX)(T0, ugpr(src++));
+ glue(st32r, MEMSUFFIX)((uint32_t)T0, ugpr(src++));
+ if (unlikely(src == 32))
+ src = 0;
+ }
+ if (unlikely(T1 != 0)) {
+ for (sh = 0; T1 > 0; T1--, T0++, sh += 8)
+ glue(stb, MEMSUFFIX)((uint32_t)T0, (ugpr(src) >> sh) & 0xFF);
+ }
+}
+
+#if defined(TARGET_PPC64)
+void glue(do_stsw_le_64, MEMSUFFIX) (int src)
+{
+ int sh;
+
+ for (; T1 > 3; T1 -= 4, T0 += 4) {
+ glue(st32r, MEMSUFFIX)((uint64_t)T0, ugpr(src++));
if (unlikely(src == 32))
src = 0;
}
if (unlikely(T1 != 0)) {
for (sh = 0; T1 > 0; T1--, T0++, sh += 8)
- glue(stb, MEMSUFFIX)(T0, (ugpr(src) >> sh) & 0xFF);
+ glue(stb, MEMSUFFIX)((uint64_t)T0, (ugpr(src) >> sh) & 0xFF);
}
}
+#endif
/* PPC 601 specific instructions (POWER bridge) */
// XXX: to be tested
d = 24;
reg = dest;
for (i = 0; i < T1; i++) {
- c = glue(ldub, MEMSUFFIX)(T0++);
+ c = glue(ldub, MEMSUFFIX)((uint32_t)T0++);
/* ra (if not 0) and rb are never modified */
if (likely(reg != rb && (ra == 0 || reg != ra))) {
ugpr(reg) = (ugpr(reg) & ~(0xFF << d)) | (c << d);
/* XXX: TAGs are not managed */
void glue(do_POWER2_lfq, MEMSUFFIX) (void)
{
- FT0 = glue(ldfq, MEMSUFFIX)(T0);
- FT1 = glue(ldfq, MEMSUFFIX)(T0 + 4);
+ FT0 = glue(ldfq, MEMSUFFIX)((uint32_t)T0);
+ FT1 = glue(ldfq, MEMSUFFIX)((uint32_t)(T0 + 4));
}
static inline double glue(ldfqr, MEMSUFFIX) (target_ulong EA)
void glue(do_POWER2_lfq_le, MEMSUFFIX) (void)
{
- FT0 = glue(ldfqr, MEMSUFFIX)(T0 + 4);
- FT1 = glue(ldfqr, MEMSUFFIX)(T0);
+ FT0 = glue(ldfqr, MEMSUFFIX)((uint32_t)(T0 + 4));
+ FT1 = glue(ldfqr, MEMSUFFIX)((uint32_t)T0);
}
void glue(do_POWER2_stfq, MEMSUFFIX) (void)
{
- glue(stfq, MEMSUFFIX)(T0, FT0);
- glue(stfq, MEMSUFFIX)(T0 + 4, FT1);
+ glue(stfq, MEMSUFFIX)((uint32_t)T0, FT0);
+ glue(stfq, MEMSUFFIX)((uint32_t)(T0 + 4), FT1);
}
static inline void glue(stfqr, MEMSUFFIX) (target_ulong EA, double d)
void glue(do_POWER2_stfq_le, MEMSUFFIX) (void)
{
- glue(stfqr, MEMSUFFIX)(T0 + 4, FT0);
- glue(stfqr, MEMSUFFIX)(T0, FT1);
+ glue(stfqr, MEMSUFFIX)((uint32_t)(T0 + 4), FT0);
+ glue(stfqr, MEMSUFFIX)((uint32_t)T0, FT1);
}
#undef MEMSUFFIX
((tmp & 0x0000FF00) << 8) | ((tmp & 0x000000FF) << 24);
}
+#if defined(TARGET_PPC64)
+static inline int64_t glue(ldsl, MEMSUFFIX) (target_ulong EA)
+{
+ return (int32_t)glue(ldl, MEMSUFFIX)(EA);
+}
+
+static inline uint64_t glue(ld64r, MEMSUFFIX) (target_ulong EA)
+{
+ uint64_t tmp = glue(ldq, MEMSUFFIX)(EA);
+ return ((tmp & 0xFF00000000000000ULL) >> 56) |
+ ((tmp & 0x00FF000000000000ULL) >> 40) |
+ ((tmp & 0x0000FF0000000000ULL) >> 24) |
+ ((tmp & 0x000000FF00000000ULL) >> 8) |
+ ((tmp & 0x00000000FF000000ULL) << 8) |
+ ((tmp & 0x0000000000FF0000ULL) << 24) |
+ ((tmp & 0x000000000000FF00ULL) << 40) |
+ ((tmp & 0x00000000000000FFULL) << 54);
+}
+
+static inline int64_t glue(ld32rs, MEMSUFFIX) (target_ulong EA)
+{
+ uint32_t tmp = glue(ldl, MEMSUFFIX)(EA);
+ return (int32_t)((tmp & 0xFF000000) >> 24) | ((tmp & 0x00FF0000) >> 8) |
+ ((tmp & 0x0000FF00) << 8) | ((tmp & 0x000000FF) << 24);
+}
+#endif
+
static inline void glue(st16r, MEMSUFFIX) (target_ulong EA, uint16_t data)
{
uint16_t tmp = ((data & 0xFF00) >> 8) | ((data & 0x00FF) << 8);
glue(stl, MEMSUFFIX)(EA, tmp);
}
+#if defined(TARGET_PPC64)
+static inline void glue(st64r, MEMSUFFIX) (target_ulong EA, uint64_t data)
+{
+ uint64_t tmp = ((data & 0xFF00000000000000ULL) >> 56) |
+ ((data & 0x00FF000000000000ULL) >> 40) |
+ ((data & 0x0000FF0000000000ULL) >> 24) |
+ ((data & 0x000000FF00000000ULL) >> 8) |
+ ((data & 0x00000000FF000000ULL) << 8) |
+ ((data & 0x0000000000FF0000ULL) << 24) |
+ ((data & 0x000000000000FF00ULL) << 40) |
+ ((data & 0x00000000000000FFULL) << 56);
+ glue(stq, MEMSUFFIX)(EA, tmp);
+}
+#endif
+
/*** Integer load ***/
#define PPC_LD_OP(name, op) \
-PPC_OP(glue(glue(l, name), MEMSUFFIX)) \
+void OPPROTO glue(glue(op_l, name), MEMSUFFIX) (void) \
{ \
- T1 = glue(op, MEMSUFFIX)(T0); \
+ T1 = glue(op, MEMSUFFIX)((uint32_t)T0); \
RETURN(); \
}
+#if defined(TARGET_PPC64)
+#define PPC_LD_OP_64(name, op) \
+void OPPROTO glue(glue(glue(op_l, name), _64), MEMSUFFIX) (void) \
+{ \
+ T1 = glue(op, MEMSUFFIX)((uint64_t)T0); \
+ RETURN(); \
+}
+#endif
+
#define PPC_ST_OP(name, op) \
-PPC_OP(glue(glue(st, name), MEMSUFFIX)) \
+void OPPROTO glue(glue(op_st, name), MEMSUFFIX) (void) \
{ \
- glue(op, MEMSUFFIX)(T0, T1); \
+ glue(op, MEMSUFFIX)((uint32_t)T0, T1); \
RETURN(); \
}
+#if defined(TARGET_PPC64)
+#define PPC_ST_OP_64(name, op) \
+void OPPROTO glue(glue(glue(op_st, name), _64), MEMSUFFIX) (void) \
+{ \
+ glue(op, MEMSUFFIX)((uint64_t)T0, T1); \
+ RETURN(); \
+}
+#endif
+
PPC_LD_OP(bz, ldub);
PPC_LD_OP(ha, ldsw);
PPC_LD_OP(hz, lduw);
PPC_LD_OP(wz, ldl);
+#if defined(TARGET_PPC64)
+PPC_LD_OP(d, ldq);
+PPC_LD_OP(wa, ldsl);
+PPC_LD_OP_64(d, ldq);
+PPC_LD_OP_64(wa, ldsl);
+PPC_LD_OP_64(bz, ldub);
+PPC_LD_OP_64(ha, ldsw);
+PPC_LD_OP_64(hz, lduw);
+PPC_LD_OP_64(wz, ldl);
+#endif
PPC_LD_OP(ha_le, ld16rs);
PPC_LD_OP(hz_le, ld16r);
PPC_LD_OP(wz_le, ld32r);
+#if defined(TARGET_PPC64)
+PPC_LD_OP(d_le, ld64r);
+PPC_LD_OP(wa_le, ld32rs);
+PPC_LD_OP_64(d_le, ld64r);
+PPC_LD_OP_64(wa_le, ld32rs);
+PPC_LD_OP_64(ha_le, ld16rs);
+PPC_LD_OP_64(hz_le, ld16r);
+PPC_LD_OP_64(wz_le, ld32r);
+#endif
/*** Integer store ***/
PPC_ST_OP(b, stb);
PPC_ST_OP(h, stw);
PPC_ST_OP(w, stl);
+#if defined(TARGET_PPC64)
+PPC_ST_OP(d, stq);
+PPC_ST_OP_64(d, stq);
+PPC_ST_OP_64(b, stb);
+PPC_ST_OP_64(h, stw);
+PPC_ST_OP_64(w, stl);
+#endif
PPC_ST_OP(h_le, st16r);
PPC_ST_OP(w_le, st32r);
+#if defined(TARGET_PPC64)
+PPC_ST_OP(d_le, st64r);
+PPC_ST_OP_64(d_le, st64r);
+PPC_ST_OP_64(h_le, st16r);
+PPC_ST_OP_64(w_le, st32r);
+#endif
/*** Integer load and store with byte reverse ***/
PPC_LD_OP(hbr, ld16r);
PPC_LD_OP(wbr, ld32r);
PPC_ST_OP(hbr, st16r);
PPC_ST_OP(wbr, st32r);
+#if defined(TARGET_PPC64)
+PPC_LD_OP_64(hbr, ld16r);
+PPC_LD_OP_64(wbr, ld32r);
+PPC_ST_OP_64(hbr, st16r);
+PPC_ST_OP_64(wbr, st32r);
+#endif
PPC_LD_OP(hbr_le, lduw);
PPC_LD_OP(wbr_le, ldl);
PPC_ST_OP(hbr_le, stw);
PPC_ST_OP(wbr_le, stl);
+#if defined(TARGET_PPC64)
+PPC_LD_OP_64(hbr_le, lduw);
+PPC_LD_OP_64(wbr_le, ldl);
+PPC_ST_OP_64(hbr_le, stw);
+PPC_ST_OP_64(wbr_le, stl);
+#endif
/*** Integer load and store multiple ***/
-PPC_OP(glue(lmw, MEMSUFFIX))
+void OPPROTO glue(op_lmw, MEMSUFFIX) (void)
{
glue(do_lmw, MEMSUFFIX)(PARAM1);
RETURN();
}
-PPC_OP(glue(lmw_le, MEMSUFFIX))
+#if defined(TARGET_PPC64)
+void OPPROTO glue(op_lmw_64, MEMSUFFIX) (void)
+{
+ glue(do_lmw_64, MEMSUFFIX)(PARAM1);
+ RETURN();
+}
+#endif
+
+void OPPROTO glue(op_lmw_le, MEMSUFFIX) (void)
{
glue(do_lmw_le, MEMSUFFIX)(PARAM1);
RETURN();
}
-PPC_OP(glue(stmw, MEMSUFFIX))
+#if defined(TARGET_PPC64)
+void OPPROTO glue(op_lmw_le_64, MEMSUFFIX) (void)
+{
+ glue(do_lmw_le_64, MEMSUFFIX)(PARAM1);
+ RETURN();
+}
+#endif
+
+void OPPROTO glue(op_stmw, MEMSUFFIX) (void)
{
glue(do_stmw, MEMSUFFIX)(PARAM1);
RETURN();
}
-PPC_OP(glue(stmw_le, MEMSUFFIX))
+#if defined(TARGET_PPC64)
+void OPPROTO glue(op_stmw_64, MEMSUFFIX) (void)
+{
+ glue(do_stmw_64, MEMSUFFIX)(PARAM1);
+ RETURN();
+}
+#endif
+
+void OPPROTO glue(op_stmw_le, MEMSUFFIX) (void)
{
glue(do_stmw_le, MEMSUFFIX)(PARAM1);
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO glue(op_stmw_le_64, MEMSUFFIX) (void)
+{
+ glue(do_stmw_le_64, MEMSUFFIX)(PARAM1);
+ RETURN();
+}
+#endif
+
/*** Integer load and store strings ***/
-PPC_OP(glue(lswi, MEMSUFFIX))
+void OPPROTO glue(op_lswi, MEMSUFFIX) (void)
+{
+ glue(do_lsw, MEMSUFFIX)(PARAM1);
+ RETURN();
+}
+
+#if defined(TARGET_PPC64)
+void OPPROTO glue(op_lswi_64, MEMSUFFIX) (void)
{
- glue(do_lsw, MEMSUFFIX)(PARAM(1));
+ glue(do_lsw_64, MEMSUFFIX)(PARAM1);
RETURN();
}
+#endif
-PPC_OP(glue(lswi_le, MEMSUFFIX))
+void OPPROTO glue(op_lswi_le, MEMSUFFIX) (void)
{
- glue(do_lsw_le, MEMSUFFIX)(PARAM(1));
+ glue(do_lsw_le, MEMSUFFIX)(PARAM1);
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO glue(op_lswi_le_64, MEMSUFFIX) (void)
+{
+ glue(do_lsw_le_64, MEMSUFFIX)(PARAM1);
+ RETURN();
+}
+#endif
+
/* PPC32 specification says we must generate an exception if
* rA is in the range of registers to be loaded.
* In an other hand, IBM says this is valid, but rA won't be loaded.
* For now, I'll follow the spec...
*/
-PPC_OP(glue(lswx, MEMSUFFIX))
+void OPPROTO glue(op_lswx, MEMSUFFIX) (void)
+{
+ /* Note: T1 comes from xer_bc then no cast is needed */
+ if (likely(T1 != 0)) {
+ if (unlikely((PARAM1 < PARAM2 && (PARAM1 + T1) > PARAM2) ||
+ (PARAM1 < PARAM3 && (PARAM1 + T1) > PARAM3))) {
+ do_raise_exception_err(EXCP_PROGRAM, EXCP_INVAL | EXCP_INVAL_LSWX);
+ } else {
+ glue(do_lsw, MEMSUFFIX)(PARAM1);
+ }
+ }
+ RETURN();
+}
+
+#if defined(TARGET_PPC64)
+void OPPROTO glue(op_lswx_64, MEMSUFFIX) (void)
+{
+ /* Note: T1 comes from xer_bc then no cast is needed */
+ if (likely(T1 != 0)) {
+ if (unlikely((PARAM1 < PARAM2 && (PARAM1 + T1) > PARAM2) ||
+ (PARAM1 < PARAM3 && (PARAM1 + T1) > PARAM3))) {
+ do_raise_exception_err(EXCP_PROGRAM, EXCP_INVAL | EXCP_INVAL_LSWX);
+ } else {
+ glue(do_lsw_64, MEMSUFFIX)(PARAM1);
+ }
+ }
+ RETURN();
+}
+#endif
+
+void OPPROTO glue(op_lswx_le, MEMSUFFIX) (void)
{
- if (unlikely(T1 > 0)) {
+ /* Note: T1 comes from xer_bc then no cast is needed */
+ if (likely(T1 != 0)) {
if (unlikely((PARAM1 < PARAM2 && (PARAM1 + T1) > PARAM2) ||
(PARAM1 < PARAM3 && (PARAM1 + T1) > PARAM3))) {
do_raise_exception_err(EXCP_PROGRAM, EXCP_INVAL | EXCP_INVAL_LSWX);
} else {
- glue(do_lsw, MEMSUFFIX)(PARAM(1));
+ glue(do_lsw_le, MEMSUFFIX)(PARAM1);
}
}
RETURN();
}
-PPC_OP(glue(lswx_le, MEMSUFFIX))
+#if defined(TARGET_PPC64)
+void OPPROTO glue(op_lswx_le_64, MEMSUFFIX) (void)
{
- if (unlikely(T1 > 0)) {
+ /* Note: T1 comes from xer_bc then no cast is needed */
+ if (likely(T1 != 0)) {
if (unlikely((PARAM1 < PARAM2 && (PARAM1 + T1) > PARAM2) ||
(PARAM1 < PARAM3 && (PARAM1 + T1) > PARAM3))) {
do_raise_exception_err(EXCP_PROGRAM, EXCP_INVAL | EXCP_INVAL_LSWX);
} else {
- glue(do_lsw_le, MEMSUFFIX)(PARAM(1));
+ glue(do_lsw_le_64, MEMSUFFIX)(PARAM1);
}
}
RETURN();
}
+#endif
+
+void OPPROTO glue(op_stsw, MEMSUFFIX) (void)
+{
+ glue(do_stsw, MEMSUFFIX)(PARAM1);
+ RETURN();
+}
+
+#if defined(TARGET_PPC64)
+void OPPROTO glue(op_stsw_64, MEMSUFFIX) (void)
+{
+ glue(do_stsw_64, MEMSUFFIX)(PARAM1);
+ RETURN();
+}
+#endif
-PPC_OP(glue(stsw, MEMSUFFIX))
+void OPPROTO glue(op_stsw_le, MEMSUFFIX) (void)
{
- glue(do_stsw, MEMSUFFIX)(PARAM(1));
+ glue(do_stsw_le, MEMSUFFIX)(PARAM1);
RETURN();
}
-PPC_OP(glue(stsw_le, MEMSUFFIX))
+#if defined(TARGET_PPC64)
+void OPPROTO glue(op_stsw_le_64, MEMSUFFIX) (void)
{
- glue(do_stsw_le, MEMSUFFIX)(PARAM(1));
+ glue(do_stsw_le_64, MEMSUFFIX)(PARAM1);
RETURN();
}
+#endif
/*** Floating-point store ***/
#define PPC_STF_OP(name, op) \
-PPC_OP(glue(glue(st, name), MEMSUFFIX)) \
+void OPPROTO glue(glue(op_st, name), MEMSUFFIX) (void) \
+{ \
+ glue(op, MEMSUFFIX)((uint32_t)T0, FT0); \
+ RETURN(); \
+}
+
+#if defined(TARGET_PPC64)
+#define PPC_STF_OP_64(name, op) \
+void OPPROTO glue(glue(glue(op_st, name), _64), MEMSUFFIX) (void) \
{ \
- glue(op, MEMSUFFIX)(T0, FT0); \
+ glue(op, MEMSUFFIX)((uint64_t)T0, FT0); \
RETURN(); \
}
+#endif
PPC_STF_OP(fd, stfq);
PPC_STF_OP(fs, stfl);
+#if defined(TARGET_PPC64)
+PPC_STF_OP_64(fd, stfq);
+PPC_STF_OP_64(fs, stfl);
+#endif
static inline void glue(stfqr, MEMSUFFIX) (target_ulong EA, double d)
{
PPC_STF_OP(fd_le, stfqr);
PPC_STF_OP(fs_le, stflr);
+#if defined(TARGET_PPC64)
+PPC_STF_OP_64(fd_le, stfqr);
+PPC_STF_OP_64(fs_le, stflr);
+#endif
/*** Floating-point load ***/
#define PPC_LDF_OP(name, op) \
-PPC_OP(glue(glue(l, name), MEMSUFFIX)) \
+void OPPROTO glue(glue(op_l, name), MEMSUFFIX) (void) \
+{ \
+ FT0 = glue(op, MEMSUFFIX)((uint32_t)T0); \
+ RETURN(); \
+}
+
+#if defined(TARGET_PPC64)
+#define PPC_LDF_OP_64(name, op) \
+void OPPROTO glue(glue(glue(op_l, name), _64), MEMSUFFIX) (void) \
{ \
- FT0 = glue(op, MEMSUFFIX)(T0); \
+ FT0 = glue(op, MEMSUFFIX)((uint64_t)T0); \
RETURN(); \
}
+#endif
PPC_LDF_OP(fd, ldfq);
PPC_LDF_OP(fs, ldfl);
+#if defined(TARGET_PPC64)
+PPC_LDF_OP_64(fd, ldfq);
+PPC_LDF_OP_64(fs, ldfl);
+#endif
static inline double glue(ldfqr, MEMSUFFIX) (target_ulong EA)
{
PPC_LDF_OP(fd_le, ldfqr);
PPC_LDF_OP(fs_le, ldflr);
+#if defined(TARGET_PPC64)
+PPC_LDF_OP_64(fd_le, ldfqr);
+PPC_LDF_OP_64(fs_le, ldflr);
+#endif
/* Load and set reservation */
-PPC_OP(glue(lwarx, MEMSUFFIX))
+void OPPROTO glue(op_lwarx, MEMSUFFIX) (void)
+{
+ if (unlikely(T0 & 0x03)) {
+ do_raise_exception(EXCP_ALIGN);
+ } else {
+ T1 = glue(ldl, MEMSUFFIX)((uint32_t)T0);
+ regs->reserve = (uint32_t)T0;
+ }
+ RETURN();
+}
+
+#if defined(TARGET_PPC64)
+void OPPROTO glue(op_lwarx_64, MEMSUFFIX) (void)
+{
+ if (unlikely(T0 & 0x03)) {
+ do_raise_exception(EXCP_ALIGN);
+ } else {
+ T1 = glue(ldl, MEMSUFFIX)((uint64_t)T0);
+ regs->reserve = (uint64_t)T0;
+ }
+ RETURN();
+}
+
+void OPPROTO glue(op_ldarx_64, MEMSUFFIX) (void)
+{
+ if (unlikely(T0 & 0x03)) {
+ do_raise_exception(EXCP_ALIGN);
+ } else {
+ T1 = glue(ldq, MEMSUFFIX)((uint64_t)T0);
+ regs->reserve = (uint64_t)T0;
+ }
+ RETURN();
+}
+#endif
+
+void OPPROTO glue(op_lwarx_le, MEMSUFFIX) (void)
+{
+ if (unlikely(T0 & 0x03)) {
+ do_raise_exception(EXCP_ALIGN);
+ } else {
+ T1 = glue(ld32r, MEMSUFFIX)((uint32_t)T0);
+ regs->reserve = (uint32_t)T0;
+ }
+ RETURN();
+}
+
+#if defined(TARGET_PPC64)
+void OPPROTO glue(op_lwarx_le_64, MEMSUFFIX) (void)
{
if (unlikely(T0 & 0x03)) {
do_raise_exception(EXCP_ALIGN);
} else {
- T1 = glue(ldl, MEMSUFFIX)(T0);
- regs->reserve = T0;
+ T1 = glue(ld32r, MEMSUFFIX)((uint64_t)T0);
+ regs->reserve = (uint64_t)T0;
}
RETURN();
}
-PPC_OP(glue(lwarx_le, MEMSUFFIX))
+void OPPROTO glue(op_ldarx_le_64, MEMSUFFIX) (void)
{
if (unlikely(T0 & 0x03)) {
do_raise_exception(EXCP_ALIGN);
} else {
- T1 = glue(ld32r, MEMSUFFIX)(T0);
- regs->reserve = T0;
+ T1 = glue(ld64r, MEMSUFFIX)((uint64_t)T0);
+ regs->reserve = (uint64_t)T0;
}
RETURN();
}
+#endif
/* Store with reservation */
-PPC_OP(glue(stwcx, MEMSUFFIX))
+void OPPROTO glue(op_stwcx, MEMSUFFIX) (void)
+{
+ if (unlikely(T0 & 0x03)) {
+ do_raise_exception(EXCP_ALIGN);
+ } else {
+ if (unlikely(regs->reserve != (uint32_t)T0)) {
+ env->crf[0] = xer_ov;
+ } else {
+ glue(stl, MEMSUFFIX)((uint32_t)T0, T1);
+ env->crf[0] = xer_ov | 0x02;
+ }
+ }
+ regs->reserve = -1;
+ RETURN();
+}
+
+#if defined(TARGET_PPC64)
+void OPPROTO glue(op_stwcx_64, MEMSUFFIX) (void)
+{
+ if (unlikely(T0 & 0x03)) {
+ do_raise_exception(EXCP_ALIGN);
+ } else {
+ if (unlikely(regs->reserve != (uint64_t)T0)) {
+ env->crf[0] = xer_ov;
+ } else {
+ glue(stl, MEMSUFFIX)((uint64_t)T0, T1);
+ env->crf[0] = xer_ov | 0x02;
+ }
+ }
+ regs->reserve = -1;
+ RETURN();
+}
+
+void OPPROTO glue(op_stdcx_64, MEMSUFFIX) (void)
{
if (unlikely(T0 & 0x03)) {
do_raise_exception(EXCP_ALIGN);
} else {
- if (unlikely(regs->reserve != T0)) {
+ if (unlikely(regs->reserve != (uint64_t)T0)) {
env->crf[0] = xer_ov;
} else {
- glue(stl, MEMSUFFIX)(T0, T1);
+ glue(stq, MEMSUFFIX)((uint64_t)T0, T1);
+ env->crf[0] = xer_ov | 0x02;
+ }
+ }
+ regs->reserve = -1;
+ RETURN();
+}
+#endif
+
+void OPPROTO glue(op_stwcx_le, MEMSUFFIX) (void)
+{
+ if (unlikely(T0 & 0x03)) {
+ do_raise_exception(EXCP_ALIGN);
+ } else {
+ if (unlikely(regs->reserve != (uint32_t)T0)) {
+ env->crf[0] = xer_ov;
+ } else {
+ glue(st32r, MEMSUFFIX)((uint32_t)T0, T1);
env->crf[0] = xer_ov | 0x02;
}
}
RETURN();
}
-PPC_OP(glue(stwcx_le, MEMSUFFIX))
+#if defined(TARGET_PPC64)
+void OPPROTO glue(op_stwcx_le_64, MEMSUFFIX) (void)
{
if (unlikely(T0 & 0x03)) {
do_raise_exception(EXCP_ALIGN);
} else {
- if (unlikely(regs->reserve != T0)) {
+ if (unlikely(regs->reserve != (uint64_t)T0)) {
env->crf[0] = xer_ov;
} else {
- glue(st32r, MEMSUFFIX)(T0, T1);
+ glue(st32r, MEMSUFFIX)((uint64_t)T0, T1);
env->crf[0] = xer_ov | 0x02;
}
}
RETURN();
}
-PPC_OP(glue(dcbz, MEMSUFFIX))
+void OPPROTO glue(op_stdcx_le_64, MEMSUFFIX) (void)
+{
+ if (unlikely(T0 & 0x03)) {
+ do_raise_exception(EXCP_ALIGN);
+ } else {
+ if (unlikely(regs->reserve != (uint64_t)T0)) {
+ env->crf[0] = xer_ov;
+ } else {
+ glue(st64r, MEMSUFFIX)((uint64_t)T0, T1);
+ env->crf[0] = xer_ov | 0x02;
+ }
+ }
+ regs->reserve = -1;
+ RETURN();
+}
+#endif
+
+void OPPROTO glue(op_dcbz, MEMSUFFIX) (void)
+{
+ glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x00), 0);
+ glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x04), 0);
+ glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x08), 0);
+ glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x0C), 0);
+ glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x10), 0);
+ glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x14), 0);
+ glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x18), 0);
+ glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x1C), 0);
+#if DCACHE_LINE_SIZE == 64
+ /* XXX: cache line size should be 64 for POWER & PowerPC 601 */
+ glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x20UL), 0);
+ glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x24UL), 0);
+ glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x28UL), 0);
+ glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x2CUL), 0);
+ glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x30UL), 0);
+ glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x34UL), 0);
+ glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x38UL), 0);
+ glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x3CUL), 0);
+#endif
+ RETURN();
+}
+
+#if defined(TARGET_PPC64)
+void OPPROTO glue(op_dcbz_64, MEMSUFFIX) (void)
{
- glue(stl, MEMSUFFIX)(T0 + 0x00, 0);
- glue(stl, MEMSUFFIX)(T0 + 0x04, 0);
- glue(stl, MEMSUFFIX)(T0 + 0x08, 0);
- glue(stl, MEMSUFFIX)(T0 + 0x0C, 0);
- glue(stl, MEMSUFFIX)(T0 + 0x10, 0);
- glue(stl, MEMSUFFIX)(T0 + 0x14, 0);
- glue(stl, MEMSUFFIX)(T0 + 0x18, 0);
- glue(stl, MEMSUFFIX)(T0 + 0x1C, 0);
+ glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x00), 0);
+ glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x04), 0);
+ glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x08), 0);
+ glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x0C), 0);
+ glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x10), 0);
+ glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x14), 0);
+ glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x18), 0);
+ glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x1C), 0);
#if DCACHE_LINE_SIZE == 64
/* XXX: cache line size should be 64 for POWER & PowerPC 601 */
- glue(stl, MEMSUFFIX)(T0 + 0x20UL, 0);
- glue(stl, MEMSUFFIX)(T0 + 0x24UL, 0);
- glue(stl, MEMSUFFIX)(T0 + 0x28UL, 0);
- glue(stl, MEMSUFFIX)(T0 + 0x2CUL, 0);
- glue(stl, MEMSUFFIX)(T0 + 0x30UL, 0);
- glue(stl, MEMSUFFIX)(T0 + 0x34UL, 0);
- glue(stl, MEMSUFFIX)(T0 + 0x38UL, 0);
- glue(stl, MEMSUFFIX)(T0 + 0x3CUL, 0);
+ glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x20UL), 0);
+ glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x24UL), 0);
+ glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x28UL), 0);
+ glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x2CUL), 0);
+ glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x30UL), 0);
+ glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x34UL), 0);
+ glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x38UL), 0);
+ glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x3CUL), 0);
#endif
RETURN();
}
+#endif
/* External access */
-PPC_OP(glue(eciwx, MEMSUFFIX))
+void OPPROTO glue(op_eciwx, MEMSUFFIX) (void)
+{
+ T1 = glue(ldl, MEMSUFFIX)((uint32_t)T0);
+ RETURN();
+}
+
+#if defined(TARGET_PPC64)
+void OPPROTO glue(op_eciwx_64, MEMSUFFIX) (void)
+{
+ T1 = glue(ldl, MEMSUFFIX)((uint64_t)T0);
+ RETURN();
+}
+#endif
+
+void OPPROTO glue(op_ecowx, MEMSUFFIX) (void)
+{
+ glue(stl, MEMSUFFIX)((uint32_t)T0, T1);
+ RETURN();
+}
+
+#if defined(TARGET_PPC64)
+void OPPROTO glue(op_ecowx_64, MEMSUFFIX) (void)
{
- T1 = glue(ldl, MEMSUFFIX)(T0);
+ glue(stl, MEMSUFFIX)((uint64_t)T0, T1);
RETURN();
}
+#endif
-PPC_OP(glue(ecowx, MEMSUFFIX))
+void OPPROTO glue(op_eciwx_le, MEMSUFFIX) (void)
{
- glue(stl, MEMSUFFIX)(T0, T1);
+ T1 = glue(ld32r, MEMSUFFIX)((uint32_t)T0);
RETURN();
}
-PPC_OP(glue(eciwx_le, MEMSUFFIX))
+#if defined(TARGET_PPC64)
+void OPPROTO glue(op_eciwx_le_64, MEMSUFFIX) (void)
{
- T1 = glue(ld32r, MEMSUFFIX)(T0);
+ T1 = glue(ld32r, MEMSUFFIX)((uint64_t)T0);
RETURN();
}
+#endif
-PPC_OP(glue(ecowx_le, MEMSUFFIX))
+void OPPROTO glue(op_ecowx_le, MEMSUFFIX) (void)
{
- glue(st32r, MEMSUFFIX)(T0, T1);
+ glue(st32r, MEMSUFFIX)((uint32_t)T0, T1);
RETURN();
}
+#if defined(TARGET_PPC64)
+void OPPROTO glue(op_ecowx_le_64, MEMSUFFIX) (void)
+{
+ glue(st32r, MEMSUFFIX)((uint64_t)T0, T1);
+ RETURN();
+}
+#endif
+
/* XXX: those micro-ops need tests ! */
/* PowerPC 601 specific instructions (POWER bridge) */
void OPPROTO glue(op_POWER_lscbx, MEMSUFFIX) (void)
{
/* When byte count is 0, do nothing */
- if (likely(T1 > 0)) {
+ if (likely(T1 != 0)) {
glue(do_POWER_lscbx, MEMSUFFIX)(PARAM1, PARAM2, PARAM3);
}
RETURN();
//#define PPC_DEBUG_DISAS
//#define DO_PPC_STATISTICS
-#ifdef USE_DIRECT_JUMP
+#if defined(USE_DIRECT_JUMP)
#define TBPARAM(x)
#else
#define TBPARAM(x) (long)(x)
#include "gen-op.h"
-#define GEN8(func, NAME) \
+static inline void gen_set_T0 (target_ulong val)
+{
+#if defined(TARGET_PPC64)
+ if (val >> 32)
+ gen_op_set_T0_64(val >> 32, val);
+ else
+#endif
+ gen_op_set_T0(val);
+}
+
+static inline void gen_set_T1 (target_ulong val)
+{
+#if defined(TARGET_PPC64)
+ if (val >> 32)
+ gen_op_set_T1_64(val >> 32, val);
+ else
+#endif
+ gen_op_set_T1(val);
+}
+
+#define GEN8(func, NAME) \
static GenOpFunc *NAME ## _table [8] = { \
NAME ## 0, NAME ## 1, NAME ## 2, NAME ## 3, \
NAME ## 4, NAME ## 5, NAME ## 6, NAME ## 7, \
NAME ## _table[n](); \
}
-#define GEN32(func, NAME) \
+#define GEN32(func, NAME) \
static GenOpFunc *NAME ## _table [32] = { \
NAME ## 0, NAME ## 1, NAME ## 2, NAME ## 3, \
NAME ## 4, NAME ## 5, NAME ## 6, NAME ## 7, \
/* Translation flags */
#if !defined(CONFIG_USER_ONLY)
int supervisor;
+#endif
+#if defined(TARGET_PPC64)
+ int sf_mode;
#endif
int fpu_enabled;
ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
static inline void gen_set_Rc0 (DisasContext *ctx)
{
- gen_op_cmpi(0);
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ gen_op_cmpi_64(0);
+ else
+#endif
+ gen_op_cmpi(0);
gen_op_set_Rc0();
}
+static inline void gen_update_nip (DisasContext *ctx, target_ulong nip)
+{
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ gen_op_update_nip_64(nip >> 32, nip);
+ else
+#endif
+ gen_op_update_nip(nip);
+}
+
#define RET_EXCP(ctx, excp, error) \
do { \
if ((ctx)->exception == EXCP_NONE) { \
- gen_op_update_nip((ctx)->nip); \
+ gen_update_nip(ctx, (ctx)->nip); \
} \
gen_op_raise_exception_err((excp), (error)); \
ctx->exception = (excp); \
/* Stop translation */
static inline void RET_STOP (DisasContext *ctx)
{
- gen_op_update_nip((ctx)->nip);
+ gen_update_nip(ctx, ctx->nip);
ctx->exception = EXCP_MTMSR;
}
/*** Instruction decoding ***/
#define EXTRACT_HELPER(name, shift, nb) \
-static inline target_ulong name (uint32_t opcode) \
+static inline uint32_t name (uint32_t opcode) \
{ \
return (opcode >> (shift)) & ((1 << (nb)) - 1); \
}
#define EXTRACT_SHELPER(name, shift, nb) \
-static inline target_long name (uint32_t opcode) \
+static inline int32_t name (uint32_t opcode) \
{ \
return (int16_t)((opcode >> (shift)) & ((1 << (nb)) - 1)); \
}
#define OPC_ALIGN 4
#endif
#if defined(__APPLE__)
-#define OPCODES_SECTION \
+#define OPCODES_SECTION \
__attribute__ ((section("__TEXT,__opcodes"), unused, aligned (OPC_ALIGN) ))
#else
-#define OPCODES_SECTION \
+#define OPCODES_SECTION \
__attribute__ ((section(".opcodes"), unused, aligned (OPC_ALIGN) ))
#endif
};
/*** Integer arithmetic ***/
-#define __GEN_INT_ARITH2(name, opc1, opc2, opc3, inval) \
-GEN_HANDLER(name, opc1, opc2, opc3, inval, PPC_INTEGER) \
+#define __GEN_INT_ARITH2(name, opc1, opc2, opc3, inval, type) \
+GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
{ \
gen_op_load_gpr_T0(rA(ctx->opcode)); \
gen_op_load_gpr_T1(rB(ctx->opcode)); \
gen_set_Rc0(ctx); \
}
-#define __GEN_INT_ARITH2_O(name, opc1, opc2, opc3, inval) \
-GEN_HANDLER(name, opc1, opc2, opc3, inval, PPC_INTEGER) \
+#define __GEN_INT_ARITH2_O(name, opc1, opc2, opc3, inval, type) \
+GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
{ \
gen_op_load_gpr_T0(rA(ctx->opcode)); \
gen_op_load_gpr_T1(rB(ctx->opcode)); \
gen_set_Rc0(ctx); \
}
-#define __GEN_INT_ARITH1(name, opc1, opc2, opc3) \
-GEN_HANDLER(name, opc1, opc2, opc3, 0x0000F800, PPC_INTEGER) \
+#define __GEN_INT_ARITH1(name, opc1, opc2, opc3, type) \
+GEN_HANDLER(name, opc1, opc2, opc3, 0x0000F800, type) \
{ \
gen_op_load_gpr_T0(rA(ctx->opcode)); \
gen_op_##name(); \
if (unlikely(Rc(ctx->opcode) != 0)) \
gen_set_Rc0(ctx); \
}
-#define __GEN_INT_ARITH1_O(name, opc1, opc2, opc3) \
-GEN_HANDLER(name, opc1, opc2, opc3, 0x0000F800, PPC_INTEGER) \
+#define __GEN_INT_ARITH1_O(name, opc1, opc2, opc3, type) \
+GEN_HANDLER(name, opc1, opc2, opc3, 0x0000F800, type) \
{ \
gen_op_load_gpr_T0(rA(ctx->opcode)); \
gen_op_##name(); \
}
/* Two operands arithmetic functions */
-#define GEN_INT_ARITH2(name, opc1, opc2, opc3) \
-__GEN_INT_ARITH2(name, opc1, opc2, opc3, 0x00000000) \
-__GEN_INT_ARITH2_O(name##o, opc1, opc2, opc3 | 0x10, 0x00000000)
+#define GEN_INT_ARITH2(name, opc1, opc2, opc3, type) \
+__GEN_INT_ARITH2(name, opc1, opc2, opc3, 0x00000000, type) \
+__GEN_INT_ARITH2_O(name##o, opc1, opc2, opc3 | 0x10, 0x00000000, type)
+
+/* Two operands arithmetic functions with no overflow allowed */
+#define GEN_INT_ARITHN(name, opc1, opc2, opc3, type) \
+__GEN_INT_ARITH2(name, opc1, opc2, opc3, 0x00000400, type)
+
+/* One operand arithmetic functions */
+#define GEN_INT_ARITH1(name, opc1, opc2, opc3, type) \
+__GEN_INT_ARITH1(name, opc1, opc2, opc3, type) \
+__GEN_INT_ARITH1_O(name##o, opc1, opc2, opc3 | 0x10, type)
+
+#if defined(TARGET_PPC64)
+#define __GEN_INT_ARITH2_64(name, opc1, opc2, opc3, inval, type) \
+GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
+{ \
+ gen_op_load_gpr_T0(rA(ctx->opcode)); \
+ gen_op_load_gpr_T1(rB(ctx->opcode)); \
+ if (ctx->sf_mode) \
+ gen_op_##name##_64(); \
+ else \
+ gen_op_##name(); \
+ gen_op_store_T0_gpr(rD(ctx->opcode)); \
+ if (unlikely(Rc(ctx->opcode) != 0)) \
+ gen_set_Rc0(ctx); \
+}
+
+#define __GEN_INT_ARITH2_O_64(name, opc1, opc2, opc3, inval, type) \
+GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
+{ \
+ gen_op_load_gpr_T0(rA(ctx->opcode)); \
+ gen_op_load_gpr_T1(rB(ctx->opcode)); \
+ if (ctx->sf_mode) \
+ gen_op_##name##_64(); \
+ else \
+ gen_op_##name(); \
+ gen_op_store_T0_gpr(rD(ctx->opcode)); \
+ if (unlikely(Rc(ctx->opcode) != 0)) \
+ gen_set_Rc0(ctx); \
+}
+
+#define __GEN_INT_ARITH1_64(name, opc1, opc2, opc3, type) \
+GEN_HANDLER(name, opc1, opc2, opc3, 0x0000F800, type) \
+{ \
+ gen_op_load_gpr_T0(rA(ctx->opcode)); \
+ if (ctx->sf_mode) \
+ gen_op_##name##_64(); \
+ else \
+ gen_op_##name(); \
+ gen_op_store_T0_gpr(rD(ctx->opcode)); \
+ if (unlikely(Rc(ctx->opcode) != 0)) \
+ gen_set_Rc0(ctx); \
+}
+#define __GEN_INT_ARITH1_O_64(name, opc1, opc2, opc3, type) \
+GEN_HANDLER(name, opc1, opc2, opc3, 0x0000F800, type) \
+{ \
+ gen_op_load_gpr_T0(rA(ctx->opcode)); \
+ if (ctx->sf_mode) \
+ gen_op_##name##_64(); \
+ else \
+ gen_op_##name(); \
+ gen_op_store_T0_gpr(rD(ctx->opcode)); \
+ if (unlikely(Rc(ctx->opcode) != 0)) \
+ gen_set_Rc0(ctx); \
+}
+
+/* Two operands arithmetic functions */
+#define GEN_INT_ARITH2_64(name, opc1, opc2, opc3, type) \
+__GEN_INT_ARITH2_64(name, opc1, opc2, opc3, 0x00000000, type) \
+__GEN_INT_ARITH2_O_64(name##o, opc1, opc2, opc3 | 0x10, 0x00000000, type)
/* Two operands arithmetic functions with no overflow allowed */
-#define GEN_INT_ARITHN(name, opc1, opc2, opc3) \
-__GEN_INT_ARITH2(name, opc1, opc2, opc3, 0x00000400)
+#define GEN_INT_ARITHN_64(name, opc1, opc2, opc3, type) \
+__GEN_INT_ARITH2_64(name, opc1, opc2, opc3, 0x00000400, type)
/* One operand arithmetic functions */
-#define GEN_INT_ARITH1(name, opc1, opc2, opc3) \
-__GEN_INT_ARITH1(name, opc1, opc2, opc3) \
-__GEN_INT_ARITH1_O(name##o, opc1, opc2, opc3 | 0x10)
+#define GEN_INT_ARITH1_64(name, opc1, opc2, opc3, type) \
+__GEN_INT_ARITH1_64(name, opc1, opc2, opc3, type) \
+__GEN_INT_ARITH1_O_64(name##o, opc1, opc2, opc3 | 0x10, type)
+#else
+#define GEN_INT_ARITH2_64 GEN_INT_ARITH2
+#define GEN_INT_ARITHN_64 GEN_INT_ARITHN
+#define GEN_INT_ARITH1_64 GEN_INT_ARITH1
+#endif
/* add add. addo addo. */
-GEN_INT_ARITH2 (add, 0x1F, 0x0A, 0x08);
+static inline void gen_op_addo (void)
+{
+ gen_op_move_T2_T0();
+ gen_op_add();
+ gen_op_check_addo();
+}
+#if defined(TARGET_PPC64)
+#define gen_op_add_64 gen_op_add
+static inline void gen_op_addo_64 (void)
+{
+ gen_op_move_T2_T0();
+ gen_op_add();
+ gen_op_check_addo_64();
+}
+#endif
+GEN_INT_ARITH2_64 (add, 0x1F, 0x0A, 0x08, PPC_INTEGER);
/* addc addc. addco addco. */
-GEN_INT_ARITH2 (addc, 0x1F, 0x0A, 0x00);
+static inline void gen_op_addc (void)
+{
+ gen_op_move_T2_T0();
+ gen_op_add();
+ gen_op_check_addc();
+}
+static inline void gen_op_addco (void)
+{
+ gen_op_move_T2_T0();
+ gen_op_add();
+ gen_op_check_addc();
+ gen_op_check_addo();
+}
+#if defined(TARGET_PPC64)
+static inline void gen_op_addc_64 (void)
+{
+ gen_op_move_T2_T0();
+ gen_op_add();
+ gen_op_check_addc_64();
+}
+static inline void gen_op_addco_64 (void)
+{
+ gen_op_move_T2_T0();
+ gen_op_add();
+ gen_op_check_addc_64();
+ gen_op_check_addo_64();
+}
+#endif
+GEN_INT_ARITH2_64 (addc, 0x1F, 0x0A, 0x00, PPC_INTEGER);
/* adde adde. addeo addeo. */
-GEN_INT_ARITH2 (adde, 0x1F, 0x0A, 0x04);
+static inline void gen_op_addeo (void)
+{
+ gen_op_move_T2_T0();
+ gen_op_adde();
+ gen_op_check_addo();
+}
+#if defined(TARGET_PPC64)
+static inline void gen_op_addeo_64 (void)
+{
+ gen_op_move_T2_T0();
+ gen_op_adde_64();
+ gen_op_check_addo_64();
+}
+#endif
+GEN_INT_ARITH2_64 (adde, 0x1F, 0x0A, 0x04, PPC_INTEGER);
/* addme addme. addmeo addmeo. */
-GEN_INT_ARITH1 (addme, 0x1F, 0x0A, 0x07);
+static inline void gen_op_addme (void)
+{
+ gen_op_move_T1_T0();
+ gen_op_add_me();
+}
+#if defined(TARGET_PPC64)
+static inline void gen_op_addme_64 (void)
+{
+ gen_op_move_T1_T0();
+ gen_op_add_me_64();
+}
+#endif
+GEN_INT_ARITH1_64 (addme, 0x1F, 0x0A, 0x07, PPC_INTEGER);
/* addze addze. addzeo addzeo. */
-GEN_INT_ARITH1 (addze, 0x1F, 0x0A, 0x06);
+static inline void gen_op_addze (void)
+{
+ gen_op_move_T2_T0();
+ gen_op_add_ze();
+ gen_op_check_addc();
+}
+static inline void gen_op_addzeo (void)
+{
+ gen_op_move_T2_T0();
+ gen_op_add_ze();
+ gen_op_check_addc();
+ gen_op_check_addo();
+}
+#if defined(TARGET_PPC64)
+static inline void gen_op_addze_64 (void)
+{
+ gen_op_move_T2_T0();
+ gen_op_add_ze();
+ gen_op_check_addc_64();
+}
+static inline void gen_op_addzeo_64 (void)
+{
+ gen_op_move_T2_T0();
+ gen_op_add_ze();
+ gen_op_check_addc_64();
+ gen_op_check_addo_64();
+}
+#endif
+GEN_INT_ARITH1_64 (addze, 0x1F, 0x0A, 0x06, PPC_INTEGER);
/* divw divw. divwo divwo. */
-GEN_INT_ARITH2 (divw, 0x1F, 0x0B, 0x0F);
+GEN_INT_ARITH2 (divw, 0x1F, 0x0B, 0x0F, PPC_INTEGER);
/* divwu divwu. divwuo divwuo. */
-GEN_INT_ARITH2 (divwu, 0x1F, 0x0B, 0x0E);
+GEN_INT_ARITH2 (divwu, 0x1F, 0x0B, 0x0E, PPC_INTEGER);
/* mulhw mulhw. */
-GEN_INT_ARITHN (mulhw, 0x1F, 0x0B, 0x02);
+GEN_INT_ARITHN (mulhw, 0x1F, 0x0B, 0x02, PPC_INTEGER);
/* mulhwu mulhwu. */
-GEN_INT_ARITHN (mulhwu, 0x1F, 0x0B, 0x00);
+GEN_INT_ARITHN (mulhwu, 0x1F, 0x0B, 0x00, PPC_INTEGER);
/* mullw mullw. mullwo mullwo. */
-GEN_INT_ARITH2 (mullw, 0x1F, 0x0B, 0x07);
+GEN_INT_ARITH2 (mullw, 0x1F, 0x0B, 0x07, PPC_INTEGER);
/* neg neg. nego nego. */
-GEN_INT_ARITH1 (neg, 0x1F, 0x08, 0x03);
+GEN_INT_ARITH1_64 (neg, 0x1F, 0x08, 0x03, PPC_INTEGER);
/* subf subf. subfo subfo. */
-GEN_INT_ARITH2 (subf, 0x1F, 0x08, 0x01);
+static inline void gen_op_subfo (void)
+{
+ gen_op_move_T2_T0();
+ gen_op_subf();
+ gen_op_check_subfo();
+}
+#if defined(TARGET_PPC64)
+#define gen_op_subf_64 gen_op_subf
+static inline void gen_op_subfo_64 (void)
+{
+ gen_op_move_T2_T0();
+ gen_op_subf();
+ gen_op_check_subfo_64();
+}
+#endif
+GEN_INT_ARITH2_64 (subf, 0x1F, 0x08, 0x01, PPC_INTEGER);
/* subfc subfc. subfco subfco. */
-GEN_INT_ARITH2 (subfc, 0x1F, 0x08, 0x00);
+static inline void gen_op_subfc (void)
+{
+ gen_op_subf();
+ gen_op_check_subfc();
+}
+static inline void gen_op_subfco (void)
+{
+ gen_op_move_T2_T0();
+ gen_op_subf();
+ gen_op_check_subfc();
+ gen_op_check_subfo();
+}
+#if defined(TARGET_PPC64)
+static inline void gen_op_subfc_64 (void)
+{
+ gen_op_subf();
+ gen_op_check_subfc_64();
+}
+static inline void gen_op_subfco_64 (void)
+{
+ gen_op_move_T2_T0();
+ gen_op_subf();
+ gen_op_check_subfc_64();
+ gen_op_check_subfo_64();
+}
+#endif
+GEN_INT_ARITH2_64 (subfc, 0x1F, 0x08, 0x00, PPC_INTEGER);
/* subfe subfe. subfeo subfeo. */
-GEN_INT_ARITH2 (subfe, 0x1F, 0x08, 0x04);
+static inline void gen_op_subfeo (void)
+{
+ gen_op_move_T2_T0();
+ gen_op_subfe();
+ gen_op_check_subfo();
+}
+#if defined(TARGET_PPC64)
+#define gen_op_subfe_64 gen_op_subfe
+static inline void gen_op_subfeo_64 (void)
+{
+ gen_op_move_T2_T0();
+ gen_op_subfe_64();
+ gen_op_check_subfo_64();
+}
+#endif
+GEN_INT_ARITH2_64 (subfe, 0x1F, 0x08, 0x04, PPC_INTEGER);
/* subfme subfme. subfmeo subfmeo. */
-GEN_INT_ARITH1 (subfme, 0x1F, 0x08, 0x07);
+GEN_INT_ARITH1_64 (subfme, 0x1F, 0x08, 0x07, PPC_INTEGER);
/* subfze subfze. subfzeo subfzeo. */
-GEN_INT_ARITH1 (subfze, 0x1F, 0x08, 0x06);
+GEN_INT_ARITH1_64 (subfze, 0x1F, 0x08, 0x06, PPC_INTEGER);
/* addi */
GEN_HANDLER(addi, 0x0E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
{
if (rA(ctx->opcode) == 0) {
/* li case */
- gen_op_set_T0(simm);
+ gen_set_T0(simm);
} else {
gen_op_load_gpr_T0(rA(ctx->opcode));
if (likely(simm != 0))
target_long simm = SIMM(ctx->opcode);
gen_op_load_gpr_T0(rA(ctx->opcode));
- if (likely(simm != 0))
- gen_op_addic(SIMM(ctx->opcode));
+ if (likely(simm != 0)) {
+ gen_op_move_T2_T0();
+ gen_op_addi(simm);
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ gen_op_check_addc_64();
+ else
+#endif
+ gen_op_check_addc();
+ }
gen_op_store_T0_gpr(rD(ctx->opcode));
}
/* addic. */
target_long simm = SIMM(ctx->opcode);
gen_op_load_gpr_T0(rA(ctx->opcode));
- if (likely(simm != 0))
- gen_op_addic(SIMM(ctx->opcode));
+ if (likely(simm != 0)) {
+ gen_op_move_T2_T0();
+ gen_op_addi(simm);
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ gen_op_check_addc_64();
+ else
+#endif
+ gen_op_check_addc();
+ }
gen_op_store_T0_gpr(rD(ctx->opcode));
gen_set_Rc0(ctx);
}
if (rA(ctx->opcode) == 0) {
/* lis case */
- gen_op_set_T0(simm << 16);
+ gen_set_T0(simm << 16);
} else {
gen_op_load_gpr_T0(rA(ctx->opcode));
if (likely(simm != 0))
GEN_HANDLER(subfic, 0x08, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
{
gen_op_load_gpr_T0(rA(ctx->opcode));
- gen_op_subfic(SIMM(ctx->opcode));
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ gen_op_subfic_64(SIMM(ctx->opcode));
+ else
+#endif
+ gen_op_subfic(SIMM(ctx->opcode));
gen_op_store_T0_gpr(rD(ctx->opcode));
}
+#if defined(TARGET_PPC64)
+/* mulhd mulhd. */
+GEN_INT_ARITHN (mulhd, 0x1F, 0x09, 0x02, PPC_INTEGER);
+/* mulhdu mulhdu. */
+GEN_INT_ARITHN (mulhdu, 0x1F, 0x09, 0x00, PPC_INTEGER);
+/* mulld mulld. mulldo mulldo. */
+GEN_INT_ARITH2 (mulld, 0x1F, 0x09, 0x07, PPC_INTEGER);
+/* divd divd. divdo divdo. */
+GEN_INT_ARITH2 (divd, 0x1F, 0x09, 0x0F, PPC_INTEGER);
+/* divdu divdu. divduo divduo. */
+GEN_INT_ARITH2 (divdu, 0x1F, 0x09, 0x0E, PPC_INTEGER);
+#endif
+
/*** Integer comparison ***/
-#define GEN_CMP(name, opc) \
-GEN_HANDLER(name, 0x1F, 0x00, opc, 0x00400000, PPC_INTEGER) \
+#if defined(TARGET_PPC64)
+#define GEN_CMP(name, opc, type) \
+GEN_HANDLER(name, 0x1F, 0x00, opc, 0x00400000, type) \
+{ \
+ gen_op_load_gpr_T0(rA(ctx->opcode)); \
+ gen_op_load_gpr_T1(rB(ctx->opcode)); \
+ if (ctx->sf_mode) \
+ gen_op_##name##_64(); \
+ else \
+ gen_op_##name(); \
+ gen_op_store_T0_crf(crfD(ctx->opcode)); \
+}
+#else
+#define GEN_CMP(name, opc, type) \
+GEN_HANDLER(name, 0x1F, 0x00, opc, 0x00400000, type) \
{ \
gen_op_load_gpr_T0(rA(ctx->opcode)); \
gen_op_load_gpr_T1(rB(ctx->opcode)); \
gen_op_##name(); \
gen_op_store_T0_crf(crfD(ctx->opcode)); \
}
+#endif
/* cmp */
-GEN_CMP(cmp, 0x00);
+GEN_CMP(cmp, 0x00, PPC_INTEGER);
/* cmpi */
GEN_HANDLER(cmpi, 0x0B, 0xFF, 0xFF, 0x00400000, PPC_INTEGER)
{
gen_op_load_gpr_T0(rA(ctx->opcode));
- gen_op_cmpi(SIMM(ctx->opcode));
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ gen_op_cmpi_64(SIMM(ctx->opcode));
+ else
+#endif
+ gen_op_cmpi(SIMM(ctx->opcode));
gen_op_store_T0_crf(crfD(ctx->opcode));
}
/* cmpl */
-GEN_CMP(cmpl, 0x01);
+GEN_CMP(cmpl, 0x01, PPC_INTEGER);
/* cmpli */
GEN_HANDLER(cmpli, 0x0A, 0xFF, 0xFF, 0x00400000, PPC_INTEGER)
{
gen_op_load_gpr_T0(rA(ctx->opcode));
- gen_op_cmpli(UIMM(ctx->opcode));
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ gen_op_cmpli_64(UIMM(ctx->opcode));
+ else
+#endif
+ gen_op_cmpli(UIMM(ctx->opcode));
gen_op_store_T0_crf(crfD(ctx->opcode));
}
+/* isel (PowerPC 2.03 specification) */
+GEN_HANDLER(isel, 0x1F, 0x0F, 0x00, 0x00000001, PPC_203)
+{
+ uint32_t bi = rC(ctx->opcode);
+ uint32_t mask;
+
+ if (rA(ctx->opcode) == 0) {
+ gen_set_T0(0);
+ } else {
+ gen_op_load_gpr_T1(rA(ctx->opcode));
+ }
+ gen_op_load_gpr_T2(rB(ctx->opcode));
+ mask = 1 << (3 - (bi & 0x03));
+ gen_op_load_crf_T0(bi >> 2);
+ gen_op_test_true(mask);
+ gen_op_isel();
+ gen_op_store_T0_gpr(rD(ctx->opcode));
+}
+
/*** Integer logical ***/
-#define __GEN_LOGICAL2(name, opc2, opc3) \
-GEN_HANDLER(name, 0x1F, opc2, opc3, 0x00000000, PPC_INTEGER) \
+#define __GEN_LOGICAL2(name, opc2, opc3, type) \
+GEN_HANDLER(name, 0x1F, opc2, opc3, 0x00000000, type) \
{ \
gen_op_load_gpr_T0(rS(ctx->opcode)); \
gen_op_load_gpr_T1(rB(ctx->opcode)); \
if (unlikely(Rc(ctx->opcode) != 0)) \
gen_set_Rc0(ctx); \
}
-#define GEN_LOGICAL2(name, opc) \
-__GEN_LOGICAL2(name, 0x1C, opc)
+#define GEN_LOGICAL2(name, opc, type) \
+__GEN_LOGICAL2(name, 0x1C, opc, type)
-#define GEN_LOGICAL1(name, opc) \
-GEN_HANDLER(name, 0x1F, 0x1A, opc, 0x00000000, PPC_INTEGER) \
+#define GEN_LOGICAL1(name, opc, type) \
+GEN_HANDLER(name, 0x1F, 0x1A, opc, 0x00000000, type) \
{ \
gen_op_load_gpr_T0(rS(ctx->opcode)); \
gen_op_##name(); \
}
/* and & and. */
-GEN_LOGICAL2(and, 0x00);
+GEN_LOGICAL2(and, 0x00, PPC_INTEGER);
/* andc & andc. */
-GEN_LOGICAL2(andc, 0x01);
+GEN_LOGICAL2(andc, 0x01, PPC_INTEGER);
/* andi. */
GEN_HANDLER(andi_, 0x1C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
{
}
/* cntlzw */
-GEN_LOGICAL1(cntlzw, 0x00);
+GEN_LOGICAL1(cntlzw, 0x00, PPC_INTEGER);
/* eqv & eqv. */
-GEN_LOGICAL2(eqv, 0x08);
+GEN_LOGICAL2(eqv, 0x08, PPC_INTEGER);
/* extsb & extsb. */
-GEN_LOGICAL1(extsb, 0x1D);
+GEN_LOGICAL1(extsb, 0x1D, PPC_INTEGER);
/* extsh & extsh. */
-GEN_LOGICAL1(extsh, 0x1C);
+GEN_LOGICAL1(extsh, 0x1C, PPC_INTEGER);
/* nand & nand. */
-GEN_LOGICAL2(nand, 0x0E);
+GEN_LOGICAL2(nand, 0x0E, PPC_INTEGER);
/* nor & nor. */
-GEN_LOGICAL2(nor, 0x03);
+GEN_LOGICAL2(nor, 0x03, PPC_INTEGER);
/* or & or. */
GEN_HANDLER(or, 0x1F, 0x1C, 0x0D, 0x00000000, PPC_INTEGER)
}
/* orc & orc. */
-GEN_LOGICAL2(orc, 0x0C);
+GEN_LOGICAL2(orc, 0x0C, PPC_INTEGER);
/* xor & xor. */
GEN_HANDLER(xor, 0x1F, 0x1C, 0x09, 0x00000000, PPC_INTEGER)
{
gen_op_store_T0_gpr(rA(ctx->opcode));
}
+/* popcntb : PowerPC 2.03 specification */
+GEN_HANDLER(popcntb, 0x1F, 0x03, 0x03, 0x0000F801, PPC_203)
+{
+ gen_op_load_gpr_T0(rS(ctx->opcode));
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ gen_op_popcntb_64();
+ else
+#endif
+ gen_op_popcntb();
+ gen_op_store_T0_gpr(rA(ctx->opcode));
+}
+
+#if defined(TARGET_PPC64)
+/* extsw & extsw. */
+GEN_LOGICAL1(extsw, 0x1E, PPC_64B);
+/* cntlzd */
+GEN_LOGICAL1(cntlzd, 0x01, PPC_64B);
+#endif
+
/*** Integer rotate ***/
/* rlwimi & rlwimi. */
GEN_HANDLER(rlwimi, 0x14, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
gen_set_Rc0(ctx);
}
+#if defined(TARGET_PPC64)
+#define GEN_PPC64_R2(name, opc1, opc2) \
+GEN_HANDLER(name##0, opc1, opc2, 0xFF, 0x00000000, PPC_64B) \
+{ \
+ gen_##name(ctx, 0); \
+} \
+GEN_HANDLER(name##1, opc1, opc2 | 0x10, 0xFF, 0x00000000, PPC_64B) \
+{ \
+ gen_##name(ctx, 1); \
+}
+#define GEN_PPC64_R4(name, opc1, opc2) \
+GEN_HANDLER(name##0, opc1, opc2, 0xFF, 0x00000000, PPC_64B) \
+{ \
+ gen_##name(ctx, 0, 0); \
+} \
+GEN_HANDLER(name##1, opc1, opc2 | 0x01, 0xFF, 0x00000000, PPC_64B) \
+{ \
+ gen_##name(ctx, 0, 1); \
+} \
+GEN_HANDLER(name##2, opc1, opc2 | 0x10, 0xFF, 0x00000000, PPC_64B) \
+{ \
+ gen_##name(ctx, 1, 0); \
+} \
+GEN_HANDLER(name##3, opc1, opc2 | 0x11, 0xFF, 0x00000000, PPC_64B) \
+{ \
+ gen_##name(ctx, 1, 1); \
+}
+/* rldicl - rldicl. */
+static inline void gen_rldicl (DisasContext *ctx, int mbn, int shn)
+{
+ int sh, mb;
+
+ sh = SH(ctx->opcode) | (1 << shn);
+ mb = (MB(ctx->opcode) << 1) | mbn;
+ /* XXX: TODO */
+ RET_INVAL(ctx);
+}
+GEN_PPC64_R4(rldicl, 0x1E, 0x00)
+/* rldicr - rldicr. */
+static inline void gen_rldicr (DisasContext *ctx, int men, int shn)
+{
+ int sh, me;
+
+ sh = SH(ctx->opcode) | (1 << shn);
+ me = (MB(ctx->opcode) << 1) | men;
+ /* XXX: TODO */
+ RET_INVAL(ctx);
+}
+GEN_PPC64_R4(rldicr, 0x1E, 0x02)
+/* rldic - rldic. */
+static inline void gen_rldic (DisasContext *ctx, int mbn, int shn)
+{
+ int sh, mb;
+
+ sh = SH(ctx->opcode) | (1 << shn);
+ mb = (MB(ctx->opcode) << 1) | mbn;
+ /* XXX: TODO */
+ RET_INVAL(ctx);
+}
+GEN_PPC64_R4(rldic, 0x1E, 0x04)
+/* rldcl - rldcl. */
+static inline void gen_rldcl (DisasContext *ctx, int mbn)
+{
+ int mb;
+
+ mb = (MB(ctx->opcode) << 1) | mbn;
+ /* XXX: TODO */
+ RET_INVAL(ctx);
+}
+GEN_PPC64_R2(rldcl, 0x1E, 0x08)
+/* rldcr - rldcr. */
+static inline void gen_rldcr (DisasContext *ctx, int men)
+{
+ int me;
+
+ me = (MB(ctx->opcode) << 1) | men;
+ /* XXX: TODO */
+ RET_INVAL(ctx);
+}
+GEN_PPC64_R2(rldcr, 0x1E, 0x09)
+/* rldimi - rldimi. */
+static inline void gen_rldimi (DisasContext *ctx, int mbn, int shn)
+{
+ int sh, mb;
+
+ sh = SH(ctx->opcode) | (1 << shn);
+ mb = (MB(ctx->opcode) << 1) | mbn;
+ /* XXX: TODO */
+ RET_INVAL(ctx);
+}
+GEN_PPC64_R4(rldimi, 0x1E, 0x06)
+#endif
+
/*** Integer shift ***/
/* slw & slw. */
-__GEN_LOGICAL2(slw, 0x18, 0x00);
+__GEN_LOGICAL2(slw, 0x18, 0x00, PPC_INTEGER);
/* sraw & sraw. */
-__GEN_LOGICAL2(sraw, 0x18, 0x18);
+__GEN_LOGICAL2(sraw, 0x18, 0x18, PPC_INTEGER);
/* srawi & srawi. */
GEN_HANDLER(srawi, 0x1F, 0x18, 0x19, 0x00000000, PPC_INTEGER)
{
+ int mb, me;
gen_op_load_gpr_T0(rS(ctx->opcode));
- if (SH(ctx->opcode) != 0)
- gen_op_srawi(SH(ctx->opcode), MASK(32 - SH(ctx->opcode), 31));
+ if (SH(ctx->opcode) != 0) {
+ gen_op_move_T1_T0();
+ mb = 32 - SH(ctx->opcode);
+ me = 31;
+#if defined(TARGET_PPC64)
+ mb += 32;
+ me += 32;
+#endif
+ gen_op_srawi(SH(ctx->opcode), MASK(mb, me));
+ }
gen_op_store_T0_gpr(rA(ctx->opcode));
if (unlikely(Rc(ctx->opcode) != 0))
gen_set_Rc0(ctx);
}
/* srw & srw. */
-__GEN_LOGICAL2(srw, 0x18, 0x10);
+__GEN_LOGICAL2(srw, 0x18, 0x10, PPC_INTEGER);
+
+#if defined(TARGET_PPC64)
+/* sld & sld. */
+__GEN_LOGICAL2(sld, 0x1B, 0x00, PPC_64B);
+/* srad & srad. */
+__GEN_LOGICAL2(srad, 0x1A, 0x18, PPC_64B);
+/* sradi & sradi. */
+static inline void gen_sradi (DisasContext *ctx, int n)
+{
+ uint64_t mask;
+ int sh, mb, me;
+
+ gen_op_load_gpr_T0(rS(ctx->opcode));
+ sh = SH(ctx->opcode) + (n << 5);
+ if (sh != 0) {
+ gen_op_move_T1_T0();
+ mb = 64 - SH(ctx->opcode);
+ me = 63;
+ mask = MASK(mb, me);
+ gen_op_sradi(sh, mask >> 32, mask);
+ }
+ gen_op_store_T0_gpr(rA(ctx->opcode));
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx);
+}
+GEN_HANDLER(sradi0, 0x1F, 0x1A, 0x19, 0x00000000, PPC_64B)
+{
+ gen_sradi(ctx, 0);
+}
+GEN_HANDLER(sradi1, 0x1F, 0x1B, 0x19, 0x00000000, PPC_64B)
+{
+ gen_sradi(ctx, 1);
+}
+/* srd & srd. */
+__GEN_LOGICAL2(srd, 0x1B, 0x10, PPC_64B);
+#endif
/*** Floating-Point arithmetic ***/
#define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat) \
target_long simm = SIMM(ctx->opcode);
if (rA(ctx->opcode) == 0) {
- gen_op_set_T0(simm);
+ gen_set_T0(simm);
} else {
gen_op_load_gpr_T0(rA(ctx->opcode));
if (likely(simm != 0))
/*** Integer load ***/
#define op_ldst(name) (*gen_op_##name[ctx->mem_idx])()
#if defined(CONFIG_USER_ONLY)
+#if defined(TARGET_PPC64)
#define OP_LD_TABLE(width) \
static GenOpFunc *gen_op_l##width[] = { \
&gen_op_l##width##_raw, \
&gen_op_l##width##_le_raw, \
+ &gen_op_l##width##_64_raw, \
+ &gen_op_l##width##_le_64_raw, \
};
#define OP_ST_TABLE(width) \
static GenOpFunc *gen_op_st##width[] = { \
&gen_op_st##width##_raw, \
&gen_op_st##width##_le_raw, \
+ &gen_op_st##width##_64_raw, \
+ &gen_op_st##width##_le_64_raw, \
};
/* Byte access routine are endian safe */
+#define gen_op_stb_le_64_raw gen_op_stb_64_raw
+#define gen_op_lbz_le_64_raw gen_op_lbz_64_raw
+#else
+#define OP_LD_TABLE(width) \
+static GenOpFunc *gen_op_l##width[] = { \
+ &gen_op_l##width##_raw, \
+ &gen_op_l##width##_le_raw, \
+};
+#define OP_ST_TABLE(width) \
+static GenOpFunc *gen_op_st##width[] = { \
+ &gen_op_st##width##_raw, \
+ &gen_op_st##width##_le_raw, \
+};
+#endif
+/* Byte access routine are endian safe */
#define gen_op_stb_le_raw gen_op_stb_raw
#define gen_op_lbz_le_raw gen_op_lbz_raw
#else
+#if defined(TARGET_PPC64)
#define OP_LD_TABLE(width) \
static GenOpFunc *gen_op_l##width[] = { \
&gen_op_l##width##_user, \
&gen_op_l##width##_le_user, \
&gen_op_l##width##_kernel, \
&gen_op_l##width##_le_kernel, \
+ &gen_op_l##width##_64_user, \
+ &gen_op_l##width##_le_64_user, \
+ &gen_op_l##width##_64_kernel, \
+ &gen_op_l##width##_le_64_kernel, \
};
#define OP_ST_TABLE(width) \
static GenOpFunc *gen_op_st##width[] = { \
&gen_op_st##width##_le_user, \
&gen_op_st##width##_kernel, \
&gen_op_st##width##_le_kernel, \
+ &gen_op_st##width##_64_user, \
+ &gen_op_st##width##_le_64_user, \
+ &gen_op_st##width##_64_kernel, \
+ &gen_op_st##width##_le_64_kernel, \
};
/* Byte access routine are endian safe */
+#define gen_op_stb_le_64_user gen_op_stb_64_user
+#define gen_op_lbz_le_64_user gen_op_lbz_64_user
+#define gen_op_stb_le_64_kernel gen_op_stb_64_kernel
+#define gen_op_lbz_le_64_kernel gen_op_lbz_64_kernel
+#else
+#define OP_LD_TABLE(width) \
+static GenOpFunc *gen_op_l##width[] = { \
+ &gen_op_l##width##_user, \
+ &gen_op_l##width##_le_user, \
+ &gen_op_l##width##_kernel, \
+ &gen_op_l##width##_le_kernel, \
+};
+#define OP_ST_TABLE(width) \
+static GenOpFunc *gen_op_st##width[] = { \
+ &gen_op_st##width##_user, \
+ &gen_op_st##width##_le_user, \
+ &gen_op_st##width##_kernel, \
+ &gen_op_st##width##_le_kernel, \
+};
+#endif
+/* Byte access routine are endian safe */
#define gen_op_stb_le_user gen_op_stb_user
#define gen_op_lbz_le_user gen_op_lbz_user
#define gen_op_stb_le_kernel gen_op_stb_kernel
#define gen_op_lbz_le_kernel gen_op_lbz_kernel
#endif
-#define GEN_LD(width, opc) \
-GEN_HANDLER(l##width, opc, 0xFF, 0xFF, 0x00000000, PPC_INTEGER) \
+#define GEN_LD(width, opc, type) \
+GEN_HANDLER(l##width, opc, 0xFF, 0xFF, 0x00000000, type) \
{ \
gen_addr_imm_index(ctx); \
op_ldst(l##width); \
gen_op_store_T1_gpr(rD(ctx->opcode)); \
}
-#define GEN_LDU(width, opc) \
-GEN_HANDLER(l##width##u, opc, 0xFF, 0xFF, 0x00000000, PPC_INTEGER) \
+#define GEN_LDU(width, opc, type) \
+GEN_HANDLER(l##width##u, opc, 0xFF, 0xFF, 0x00000000, type) \
{ \
if (unlikely(rA(ctx->opcode) == 0 || \
rA(ctx->opcode) == rD(ctx->opcode))) { \
gen_op_store_T0_gpr(rA(ctx->opcode)); \
}
-#define GEN_LDUX(width, opc) \
-GEN_HANDLER(l##width##ux, 0x1F, 0x17, opc, 0x00000001, PPC_INTEGER) \
+#define GEN_LDUX(width, opc2, opc3, type) \
+GEN_HANDLER(l##width##ux, 0x1F, opc2, opc3, 0x00000001, type) \
{ \
if (unlikely(rA(ctx->opcode) == 0 || \
rA(ctx->opcode) == rD(ctx->opcode))) { \
gen_op_store_T0_gpr(rA(ctx->opcode)); \
}
-#define GEN_LDX(width, opc2, opc3) \
-GEN_HANDLER(l##width##x, 0x1F, opc2, opc3, 0x00000001, PPC_INTEGER) \
+#define GEN_LDX(width, opc2, opc3, type) \
+GEN_HANDLER(l##width##x, 0x1F, opc2, opc3, 0x00000001, type) \
{ \
gen_addr_reg_index(ctx); \
op_ldst(l##width); \
gen_op_store_T1_gpr(rD(ctx->opcode)); \
}
-#define GEN_LDS(width, op) \
+#define GEN_LDS(width, op, type) \
OP_LD_TABLE(width); \
-GEN_LD(width, op | 0x20); \
-GEN_LDU(width, op | 0x21); \
-GEN_LDUX(width, op | 0x01); \
-GEN_LDX(width, 0x17, op | 0x00)
+GEN_LD(width, op | 0x20, type); \
+GEN_LDU(width, op | 0x21, type); \
+GEN_LDUX(width, 0x17, op | 0x01, type); \
+GEN_LDX(width, 0x17, op | 0x00, type)
/* lbz lbzu lbzux lbzx */
-GEN_LDS(bz, 0x02);
+GEN_LDS(bz, 0x02, PPC_INTEGER);
/* lha lhau lhaux lhax */
-GEN_LDS(ha, 0x0A);
+GEN_LDS(ha, 0x0A, PPC_INTEGER);
/* lhz lhzu lhzux lhzx */
-GEN_LDS(hz, 0x08);
+GEN_LDS(hz, 0x08, PPC_INTEGER);
/* lwz lwzu lwzux lwzx */
-GEN_LDS(wz, 0x00);
+GEN_LDS(wz, 0x00, PPC_INTEGER);
+#if defined(TARGET_PPC64)
+OP_LD_TABLE(wa);
+OP_LD_TABLE(d);
+/* lwaux */
+GEN_LDUX(wa, 0x15, 0x0B, PPC_64B);
+/* lwax */
+GEN_LDX(wa, 0x15, 0x0A, PPC_64B);
+/* ldux */
+GEN_LDUX(d, 0x15, 0x01, PPC_64B);
+/* ldx */
+GEN_LDX(d, 0x15, 0x00, PPC_64B);
+GEN_HANDLER(ld, 0x3A, 0xFF, 0xFF, 0x00000000, PPC_64B)
+{
+ if (Rc(ctx->opcode)) {
+ if (unlikely(rA(ctx->opcode) == 0 ||
+ rA(ctx->opcode) == rD(ctx->opcode))) {
+ RET_INVAL(ctx);
+ return;
+ }
+ }
+ gen_addr_imm_index(ctx);
+ if (ctx->opcode & 0x02) {
+ /* lwa (lwau is undefined) */
+ op_ldst(lwa);
+ } else {
+ /* ld - ldu */
+ op_ldst(ld);
+ }
+ gen_op_store_T1_gpr(rD(ctx->opcode));
+ if (Rc(ctx->opcode))
+ gen_op_store_T0_gpr(rA(ctx->opcode));
+}
+#endif
/*** Integer store ***/
-#define GEN_ST(width, opc) \
-GEN_HANDLER(st##width, opc, 0xFF, 0xFF, 0x00000000, PPC_INTEGER) \
+#define GEN_ST(width, opc, type) \
+GEN_HANDLER(st##width, opc, 0xFF, 0xFF, 0x00000000, type) \
{ \
gen_addr_imm_index(ctx); \
gen_op_load_gpr_T1(rS(ctx->opcode)); \
op_ldst(st##width); \
}
-#define GEN_STU(width, opc) \
-GEN_HANDLER(st##width##u, opc, 0xFF, 0xFF, 0x00000000, PPC_INTEGER) \
+#define GEN_STU(width, opc, type) \
+GEN_HANDLER(st##width##u, opc, 0xFF, 0xFF, 0x00000000, type) \
{ \
if (unlikely(rA(ctx->opcode) == 0)) { \
RET_INVAL(ctx); \
gen_op_store_T0_gpr(rA(ctx->opcode)); \
}
-#define GEN_STUX(width, opc) \
-GEN_HANDLER(st##width##ux, 0x1F, 0x17, opc, 0x00000001, PPC_INTEGER) \
+#define GEN_STUX(width, opc2, opc3, type) \
+GEN_HANDLER(st##width##ux, 0x1F, opc2, opc3, 0x00000001, type) \
{ \
if (unlikely(rA(ctx->opcode) == 0)) { \
RET_INVAL(ctx); \
gen_op_store_T0_gpr(rA(ctx->opcode)); \
}
-#define GEN_STX(width, opc2, opc3) \
-GEN_HANDLER(st##width##x, 0x1F, opc2, opc3, 0x00000001, PPC_INTEGER) \
+#define GEN_STX(width, opc2, opc3, type) \
+GEN_HANDLER(st##width##x, 0x1F, opc2, opc3, 0x00000001, type) \
{ \
gen_addr_reg_index(ctx); \
gen_op_load_gpr_T1(rS(ctx->opcode)); \
op_ldst(st##width); \
}
-#define GEN_STS(width, op) \
+#define GEN_STS(width, op, type) \
OP_ST_TABLE(width); \
-GEN_ST(width, op | 0x20); \
-GEN_STU(width, op | 0x21); \
-GEN_STUX(width, op | 0x01); \
-GEN_STX(width, 0x17, op | 0x00)
+GEN_ST(width, op | 0x20, type); \
+GEN_STU(width, op | 0x21, type); \
+GEN_STUX(width, 0x17, op | 0x01, type); \
+GEN_STX(width, 0x17, op | 0x00, type)
/* stb stbu stbux stbx */
-GEN_STS(b, 0x06);
+GEN_STS(b, 0x06, PPC_INTEGER);
/* sth sthu sthux sthx */
-GEN_STS(h, 0x0C);
+GEN_STS(h, 0x0C, PPC_INTEGER);
/* stw stwu stwux stwx */
-GEN_STS(w, 0x04);
-
+GEN_STS(w, 0x04, PPC_INTEGER);
+#if defined(TARGET_PPC64)
+OP_ST_TABLE(d);
+GEN_STUX(d, 0x15, 0x01, PPC_64B);
+GEN_STX(d, 0x15, 0x00, PPC_64B);
+GEN_HANDLER(std, 0x3E, 0xFF, 0xFF, 0x00000002, PPC_64B)
+{
+ if (Rc(ctx->opcode)) {
+ if (unlikely(rA(ctx->opcode) == 0)) {
+ RET_INVAL(ctx);
+ return;
+ }
+ }
+ gen_addr_imm_index(ctx);
+ gen_op_load_gpr_T1(rS(ctx->opcode));
+ op_ldst(std);
+ if (Rc(ctx->opcode))
+ gen_op_store_T0_gpr(rA(ctx->opcode));
+}
+#endif
/*** Integer load and store with byte reverse ***/
/* lhbrx */
OP_LD_TABLE(hbr);
-GEN_LDX(hbr, 0x16, 0x18);
+GEN_LDX(hbr, 0x16, 0x18, PPC_INTEGER);
/* lwbrx */
OP_LD_TABLE(wbr);
-GEN_LDX(wbr, 0x16, 0x10);
+GEN_LDX(wbr, 0x16, 0x10, PPC_INTEGER);
/* sthbrx */
OP_ST_TABLE(hbr);
-GEN_STX(hbr, 0x16, 0x1C);
+GEN_STX(hbr, 0x16, 0x1C, PPC_INTEGER);
/* stwbrx */
OP_ST_TABLE(wbr);
-GEN_STX(wbr, 0x16, 0x14);
+GEN_STX(wbr, 0x16, 0x14, PPC_INTEGER);
/*** Integer load and store multiple ***/
#define op_ldstm(name, reg) (*gen_op_##name[ctx->mem_idx])(reg)
+#if defined(TARGET_PPC64)
+#if defined(CONFIG_USER_ONLY)
+static GenOpFunc1 *gen_op_lmw[] = {
+ &gen_op_lmw_raw,
+ &gen_op_lmw_le_raw,
+ &gen_op_lmw_64_raw,
+ &gen_op_lmw_le_64_raw,
+};
+static GenOpFunc1 *gen_op_stmw[] = {
+ &gen_op_stmw_64_raw,
+ &gen_op_stmw_le_64_raw,
+};
+#else
+static GenOpFunc1 *gen_op_lmw[] = {
+ &gen_op_lmw_user,
+ &gen_op_lmw_le_user,
+ &gen_op_lmw_kernel,
+ &gen_op_lmw_le_kernel,
+ &gen_op_lmw_64_user,
+ &gen_op_lmw_le_64_user,
+ &gen_op_lmw_64_kernel,
+ &gen_op_lmw_le_64_kernel,
+};
+static GenOpFunc1 *gen_op_stmw[] = {
+ &gen_op_stmw_user,
+ &gen_op_stmw_le_user,
+ &gen_op_stmw_kernel,
+ &gen_op_stmw_le_kernel,
+ &gen_op_stmw_64_user,
+ &gen_op_stmw_le_64_user,
+ &gen_op_stmw_64_kernel,
+ &gen_op_stmw_le_64_kernel,
+};
+#endif
+#else
#if defined(CONFIG_USER_ONLY)
static GenOpFunc1 *gen_op_lmw[] = {
&gen_op_lmw_raw,
&gen_op_stmw_le_kernel,
};
#endif
+#endif
/* lmw */
GEN_HANDLER(lmw, 0x2E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
{
/* NIP cannot be restored if the memory exception comes from an helper */
- gen_op_update_nip(ctx->nip - 4);
+ gen_update_nip(ctx, ctx->nip - 4);
gen_addr_imm_index(ctx);
op_ldstm(lmw, rD(ctx->opcode));
}
GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
{
/* NIP cannot be restored if the memory exception comes from an helper */
- gen_op_update_nip(ctx->nip - 4);
+ gen_update_nip(ctx, ctx->nip - 4);
gen_addr_imm_index(ctx);
op_ldstm(stmw, rS(ctx->opcode));
}
/*** Integer load and store strings ***/
#define op_ldsts(name, start) (*gen_op_##name[ctx->mem_idx])(start)
#define op_ldstsx(name, rd, ra, rb) (*gen_op_##name[ctx->mem_idx])(rd, ra, rb)
+#if defined(TARGET_PPC64)
+#if defined(CONFIG_USER_ONLY)
+static GenOpFunc1 *gen_op_lswi[] = {
+ &gen_op_lswi_raw,
+ &gen_op_lswi_le_raw,
+ &gen_op_lswi_64_raw,
+ &gen_op_lswi_le_64_raw,
+};
+static GenOpFunc3 *gen_op_lswx[] = {
+ &gen_op_lswx_raw,
+ &gen_op_lswx_le_raw,
+ &gen_op_lswx_64_raw,
+ &gen_op_lswx_le_64_raw,
+};
+static GenOpFunc1 *gen_op_stsw[] = {
+ &gen_op_stsw_raw,
+ &gen_op_stsw_le_raw,
+ &gen_op_stsw_64_raw,
+ &gen_op_stsw_le_64_raw,
+};
+#else
+static GenOpFunc1 *gen_op_lswi[] = {
+ &gen_op_lswi_user,
+ &gen_op_lswi_le_user,
+ &gen_op_lswi_kernel,
+ &gen_op_lswi_le_kernel,
+ &gen_op_lswi_64_user,
+ &gen_op_lswi_le_64_user,
+ &gen_op_lswi_64_kernel,
+ &gen_op_lswi_le_64_kernel,
+};
+static GenOpFunc3 *gen_op_lswx[] = {
+ &gen_op_lswx_user,
+ &gen_op_lswx_le_user,
+ &gen_op_lswx_kernel,
+ &gen_op_lswx_le_kernel,
+ &gen_op_lswx_64_user,
+ &gen_op_lswx_le_64_user,
+ &gen_op_lswx_64_kernel,
+ &gen_op_lswx_le_64_kernel,
+};
+static GenOpFunc1 *gen_op_stsw[] = {
+ &gen_op_stsw_user,
+ &gen_op_stsw_le_user,
+ &gen_op_stsw_kernel,
+ &gen_op_stsw_le_kernel,
+ &gen_op_stsw_64_user,
+ &gen_op_stsw_le_64_user,
+ &gen_op_stsw_64_kernel,
+ &gen_op_stsw_le_64_kernel,
+};
+#endif
+#else
#if defined(CONFIG_USER_ONLY)
static GenOpFunc1 *gen_op_lswi[] = {
&gen_op_lswi_raw,
&gen_op_stsw_le_kernel,
};
#endif
+#endif
/* lswi */
/* PowerPC32 specification says we must generate an exception if
return;
}
/* NIP cannot be restored if the memory exception comes from an helper */
- gen_op_update_nip(ctx->nip - 4);
+ gen_update_nip(ctx, ctx->nip - 4);
gen_addr_register(ctx);
gen_op_set_T1(nb);
op_ldsts(lswi, start);
int rb = rB(ctx->opcode);
/* NIP cannot be restored if the memory exception comes from an helper */
- gen_op_update_nip(ctx->nip - 4);
+ gen_update_nip(ctx, ctx->nip - 4);
gen_addr_reg_index(ctx);
if (ra == 0) {
ra = rb;
int nb = NB(ctx->opcode);
/* NIP cannot be restored if the memory exception comes from an helper */
- gen_op_update_nip(ctx->nip - 4);
+ gen_update_nip(ctx, ctx->nip - 4);
gen_addr_register(ctx);
if (nb == 0)
nb = 32;
GEN_HANDLER(stswx, 0x1F, 0x15, 0x14, 0x00000001, PPC_INTEGER)
{
/* NIP cannot be restored if the memory exception comes from an helper */
- gen_op_update_nip(ctx->nip - 4);
+ gen_update_nip(ctx, ctx->nip - 4);
gen_addr_reg_index(ctx);
gen_op_load_xer_bc();
op_ldsts(stsw, rS(ctx->opcode));
#define op_lwarx() (*gen_op_lwarx[ctx->mem_idx])()
#define op_stwcx() (*gen_op_stwcx[ctx->mem_idx])()
+#if defined(TARGET_PPC64)
#if defined(CONFIG_USER_ONLY)
static GenOpFunc *gen_op_lwarx[] = {
&gen_op_lwarx_raw,
&gen_op_lwarx_le_raw,
+ &gen_op_lwarx_64_raw,
+ &gen_op_lwarx_le_64_raw,
};
static GenOpFunc *gen_op_stwcx[] = {
&gen_op_stwcx_raw,
&gen_op_stwcx_le_raw,
+ &gen_op_stwcx_64_raw,
+ &gen_op_stwcx_le_64_raw,
};
#else
static GenOpFunc *gen_op_lwarx[] = {
&gen_op_lwarx_le_user,
&gen_op_lwarx_kernel,
&gen_op_lwarx_le_kernel,
+ &gen_op_lwarx_64_user,
+ &gen_op_lwarx_le_64_user,
+ &gen_op_lwarx_64_kernel,
+ &gen_op_lwarx_le_64_kernel,
};
static GenOpFunc *gen_op_stwcx[] = {
&gen_op_stwcx_user,
&gen_op_stwcx_le_user,
&gen_op_stwcx_kernel,
&gen_op_stwcx_le_kernel,
+ &gen_op_stwcx_64_user,
+ &gen_op_stwcx_le_64_user,
+ &gen_op_stwcx_64_kernel,
+ &gen_op_stwcx_le_64_kernel,
};
#endif
+#else
+#if defined(CONFIG_USER_ONLY)
+static GenOpFunc *gen_op_lwarx[] = {
+ &gen_op_lwarx_raw,
+ &gen_op_lwarx_le_raw,
+};
+static GenOpFunc *gen_op_stwcx[] = {
+ &gen_op_stwcx_raw,
+ &gen_op_stwcx_le_raw,
+};
+#else
+static GenOpFunc *gen_op_lwarx[] = {
+ &gen_op_lwarx_user,
+ &gen_op_lwarx_le_user,
+ &gen_op_lwarx_kernel,
+ &gen_op_lwarx_le_kernel,
+};
+static GenOpFunc *gen_op_stwcx[] = {
+ &gen_op_stwcx_user,
+ &gen_op_stwcx_le_user,
+ &gen_op_stwcx_kernel,
+ &gen_op_stwcx_le_kernel,
+};
+#endif
+#endif
/* lwarx */
GEN_HANDLER(lwarx, 0x1F, 0x14, 0x00, 0x00000001, PPC_RES)
gen_op_goto_tb0(TBPARAM(tb));
else
gen_op_goto_tb1(TBPARAM(tb));
- gen_op_set_T1(dest);
- gen_op_b_T1();
+ gen_set_T1(dest);
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ gen_op_b_T1_64();
+ else
+#endif
+ gen_op_b_T1();
gen_op_set_T0((long)tb + n);
if (ctx->singlestep_enabled)
gen_op_debug();
gen_op_exit_tb();
} else {
- gen_op_set_T1(dest);
- gen_op_b_T1();
+ gen_set_T1(dest);
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ gen_op_b_T1_64();
+ else
+#endif
+ gen_op_b_T1();
gen_op_reset_T0();
if (ctx->singlestep_enabled)
gen_op_debug();
/* sign extend LI */
#if defined(TARGET_PPC64)
- li = ((target_long)LI(ctx->opcode) << 38) >> 38;
-#else
- li = ((target_long)LI(ctx->opcode) << 6) >> 6;
+ if (ctx->sf_mode)
+ li = ((int64_t)LI(ctx->opcode) << 38) >> 38;
+ else
#endif
+ li = ((int32_t)LI(ctx->opcode) << 6) >> 6;
if (likely(AA(ctx->opcode) == 0))
target = ctx->nip + li - 4;
else
target = li;
if (LK(ctx->opcode)) {
- gen_op_setlr(ctx->nip);
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ gen_op_setlr_64(ctx->nip >> 32, ctx->nip);
+ else
+#endif
+ gen_op_setlr(ctx->nip);
}
gen_goto_tb(ctx, 0, target);
ctx->exception = EXCP_BRANCH;
#define BCOND_LR 1
#define BCOND_CTR 2
-static inline void gen_bcond(DisasContext *ctx, int type)
-{
+static inline void gen_bcond(DisasContext *ctx, int type)
+{
target_ulong target = 0;
target_ulong li;
- uint32_t bo = BO(ctx->opcode);
- uint32_t bi = BI(ctx->opcode);
- uint32_t mask;
+ uint32_t bo = BO(ctx->opcode);
+ uint32_t bi = BI(ctx->opcode);
+ uint32_t mask;
if ((bo & 0x4) == 0)
- gen_op_dec_ctr();
+ gen_op_dec_ctr();
switch(type) {
case BCOND_IM:
li = (target_long)((int16_t)(BD(ctx->opcode)));
gen_op_movl_T1_lr();
break;
}
- if (LK(ctx->opcode)) {
- gen_op_setlr(ctx->nip);
+ if (LK(ctx->opcode)) {
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ gen_op_setlr_64(ctx->nip >> 32, ctx->nip);
+ else
+#endif
+ gen_op_setlr(ctx->nip);
}
if (bo & 0x10) {
- /* No CR condition */
- switch (bo & 0x6) {
- case 0:
- gen_op_test_ctr();
+ /* No CR condition */
+ switch (bo & 0x6) {
+ case 0:
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ gen_op_test_ctr_64();
+ else
+#endif
+ gen_op_test_ctr();
+ break;
+ case 2:
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ gen_op_test_ctrz_64();
+ else
+#endif
+ gen_op_test_ctrz();
break;
- case 2:
- gen_op_test_ctrz();
- break;
default:
- case 4:
- case 6:
+ case 4:
+ case 6:
if (type == BCOND_IM) {
gen_goto_tb(ctx, 0, target);
} else {
- gen_op_b_T1();
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ gen_op_b_T1_64();
+ else
+#endif
+ gen_op_b_T1();
gen_op_reset_T0();
}
goto no_test;
}
- } else {
- mask = 1 << (3 - (bi & 0x03));
- gen_op_load_crf_T0(bi >> 2);
- if (bo & 0x8) {
- switch (bo & 0x6) {
- case 0:
- gen_op_test_ctr_true(mask);
- break;
- case 2:
- gen_op_test_ctrz_true(mask);
- break;
- default:
- case 4:
- case 6:
+ } else {
+ mask = 1 << (3 - (bi & 0x03));
+ gen_op_load_crf_T0(bi >> 2);
+ if (bo & 0x8) {
+ switch (bo & 0x6) {
+ case 0:
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ gen_op_test_ctr_true_64(mask);
+ else
+#endif
+ gen_op_test_ctr_true(mask);
+ break;
+ case 2:
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ gen_op_test_ctrz_true_64(mask);
+ else
+#endif
+ gen_op_test_ctrz_true(mask);
+ break;
+ default:
+ case 4:
+ case 6:
gen_op_test_true(mask);
- break;
- }
- } else {
- switch (bo & 0x6) {
- case 0:
- gen_op_test_ctr_false(mask);
- break;
- case 2:
- gen_op_test_ctrz_false(mask);
- break;
+ break;
+ }
+ } else {
+ switch (bo & 0x6) {
+ case 0:
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ gen_op_test_ctr_false_64(mask);
+ else
+#endif
+ gen_op_test_ctr_false(mask);
+ break;
+ case 2:
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ gen_op_test_ctrz_false_64(mask);
+ else
+#endif
+ gen_op_test_ctrz_false(mask);
+ break;
default:
- case 4:
- case 6:
+ case 4:
+ case 6:
gen_op_test_false(mask);
- break;
- }
- }
- }
+ break;
+ }
+ }
+ }
if (type == BCOND_IM) {
int l1 = gen_new_label();
gen_op_jz_T0(l1);
gen_set_label(l1);
gen_goto_tb(ctx, 1, ctx->nip);
} else {
- gen_op_btest_T1(ctx->nip);
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ gen_op_btest_T1_64(ctx->nip >> 32, ctx->nip);
+ else
+#endif
+ gen_op_btest_T1(ctx->nip);
gen_op_reset_T0();
}
no_test:
if (ctx->singlestep_enabled)
gen_op_debug();
gen_op_exit_tb();
- ctx->exception = EXCP_BRANCH;
+ ctx->exception = EXCP_BRANCH;
}
GEN_HANDLER(bc, 0x10, 0xFF, 0xFF, 0x00000000, PPC_FLOW)
-{
+{
gen_bcond(ctx, BCOND_IM);
}
GEN_HANDLER(bcctr, 0x13, 0x10, 0x10, 0x00000000, PPC_FLOW)
-{
+{
gen_bcond(ctx, BCOND_CTR);
}
GEN_HANDLER(bclr, 0x13, 0x10, 0x00, 0x00000000, PPC_FLOW)
-{
+{
gen_bcond(ctx, BCOND_LR);
}
RET_PRIVOPC(ctx);
return;
}
- gen_op_rfi();
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode)
+ gen_op_rfi_32();
+ else
+#endif
+ gen_op_rfi();
RET_CHG_FLOW(ctx);
#endif
}
gen_op_load_gpr_T0(rA(ctx->opcode));
gen_op_load_gpr_T1(rB(ctx->opcode));
/* Update the nip since this might generate a trap exception */
- gen_op_update_nip(ctx->nip);
+ gen_update_nip(ctx, ctx->nip);
gen_op_tw(TO(ctx->opcode));
}
GEN_HANDLER(twi, 0x03, 0xFF, 0xFF, 0x00000000, PPC_FLOW)
{
gen_op_load_gpr_T0(rA(ctx->opcode));
- gen_op_set_T1(SIMM(ctx->opcode));
+ gen_set_T1(SIMM(ctx->opcode));
+ /* Update the nip since this might generate a trap exception */
+ gen_update_nip(ctx, ctx->nip);
gen_op_tw(TO(ctx->opcode));
}
+#if defined(TARGET_PPC64)
+/* td */
+GEN_HANDLER(td, 0x1F, 0x04, 0x02, 0x00000001, PPC_64B)
+{
+ gen_op_load_gpr_T0(rA(ctx->opcode));
+ gen_op_load_gpr_T1(rB(ctx->opcode));
+ /* Update the nip since this might generate a trap exception */
+ gen_update_nip(ctx, ctx->nip);
+ gen_op_td(TO(ctx->opcode));
+}
+
+/* tdi */
+GEN_HANDLER(tdi, 0x02, 0xFF, 0xFF, 0x00000000, PPC_64B)
+{
+ gen_op_load_gpr_T0(rA(ctx->opcode));
+ gen_set_T1(SIMM(ctx->opcode));
+ /* Update the nip since this might generate a trap exception */
+ gen_update_nip(ctx, ctx->nip);
+ gen_op_td(TO(ctx->opcode));
+}
+#endif
+
/*** Processor control ***/
/* mcrxr */
GEN_HANDLER(mcrxr, 0x1F, 0x00, 0x10, 0x007FF801, PPC_MISC)
/* mfcr */
GEN_HANDLER(mfcr, 0x1F, 0x13, 0x00, 0x00000801, PPC_MISC)
{
-#if 0 // XXX: to be tested
uint32_t crm, crn;
if (likely(ctx->opcode & 0x00100000)) {
crn = ffs(crm);
gen_op_load_cro(7 - crn);
}
- } else
-#endif
- {
- gen_op_load_cr();
- }
+ } else {
+ gen_op_load_cr();
+ }
gen_op_store_T0_gpr(rD(ctx->opcode));
}
RET_PRIVREG(ctx);
return;
}
- gen_op_update_nip((ctx)->nip);
+ gen_update_nip(ctx, ctx->nip);
gen_op_load_gpr_T0(rS(ctx->opcode));
- gen_op_store_msr();
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode)
+ gen_op_store_msr_32();
+ else
+#endif
+ gen_op_store_msr();
/* Must stop the translation as machine state (may have) changed */
RET_CHG_FLOW(ctx);
#endif
/* dcbz */
#define op_dcbz() (*gen_op_dcbz[ctx->mem_idx])()
+#if defined(TARGET_PPC64)
+#if defined(CONFIG_USER_ONLY)
+static GenOpFunc *gen_op_dcbz[] = {
+ &gen_op_dcbz_raw,
+ &gen_op_dcbz_raw,
+ &gen_op_dcbz_64_raw,
+ &gen_op_dcbz_64_raw,
+};
+#else
+static GenOpFunc *gen_op_dcbz[] = {
+ &gen_op_dcbz_user,
+ &gen_op_dcbz_user,
+ &gen_op_dcbz_kernel,
+ &gen_op_dcbz_kernel,
+ &gen_op_dcbz_64_user,
+ &gen_op_dcbz_64_user,
+ &gen_op_dcbz_64_kernel,
+ &gen_op_dcbz_64_kernel,
+};
+#endif
+#else
#if defined(CONFIG_USER_ONLY)
static GenOpFunc *gen_op_dcbz[] = {
&gen_op_dcbz_raw,
&gen_op_dcbz_kernel,
};
#endif
+#endif
GEN_HANDLER(dcbz, 0x1F, 0x16, 0x1F, 0x03E00001, PPC_CACHE)
{
GEN_HANDLER(icbi, 0x1F, 0x16, 0x1E, 0x03E00001, PPC_CACHE)
{
/* NIP cannot be restored if the memory exception comes from an helper */
- gen_op_update_nip(ctx->nip - 4);
+ gen_update_nip(ctx, ctx->nip - 4);
gen_addr_reg_index(ctx);
- gen_op_icbi();
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ gen_op_icbi_64();
+ else
+#endif
+ gen_op_icbi();
RET_STOP(ctx);
}
return;
}
gen_op_load_gpr_T0(rB(ctx->opcode));
- gen_op_tlbie();
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ gen_op_tlbie_64();
+ else
+#endif
+ gen_op_tlbie();
RET_STOP(ctx);
#endif
}
/* Optional: */
#define op_eciwx() (*gen_op_eciwx[ctx->mem_idx])()
#define op_ecowx() (*gen_op_ecowx[ctx->mem_idx])()
+#if defined(TARGET_PPC64)
#if defined(CONFIG_USER_ONLY)
static GenOpFunc *gen_op_eciwx[] = {
&gen_op_eciwx_raw,
&gen_op_eciwx_le_raw,
+ &gen_op_eciwx_64_raw,
+ &gen_op_eciwx_le_64_raw,
};
static GenOpFunc *gen_op_ecowx[] = {
&gen_op_ecowx_raw,
&gen_op_ecowx_le_raw,
+ &gen_op_ecowx_64_raw,
+ &gen_op_ecowx_le_64_raw,
};
#else
static GenOpFunc *gen_op_eciwx[] = {
&gen_op_eciwx_le_user,
&gen_op_eciwx_kernel,
&gen_op_eciwx_le_kernel,
+ &gen_op_eciwx_64_user,
+ &gen_op_eciwx_le_64_user,
+ &gen_op_eciwx_64_kernel,
+ &gen_op_eciwx_le_64_kernel,
};
static GenOpFunc *gen_op_ecowx[] = {
&gen_op_ecowx_user,
&gen_op_ecowx_le_user,
&gen_op_ecowx_kernel,
&gen_op_ecowx_le_kernel,
+ &gen_op_ecowx_64_user,
+ &gen_op_ecowx_le_64_user,
+ &gen_op_ecowx_64_kernel,
+ &gen_op_ecowx_le_64_kernel,
};
#endif
+#else
+#if defined(CONFIG_USER_ONLY)
+static GenOpFunc *gen_op_eciwx[] = {
+ &gen_op_eciwx_raw,
+ &gen_op_eciwx_le_raw,
+};
+static GenOpFunc *gen_op_ecowx[] = {
+ &gen_op_ecowx_raw,
+ &gen_op_ecowx_le_raw,
+};
+#else
+static GenOpFunc *gen_op_eciwx[] = {
+ &gen_op_eciwx_user,
+ &gen_op_eciwx_le_user,
+ &gen_op_eciwx_kernel,
+ &gen_op_eciwx_le_kernel,
+};
+static GenOpFunc *gen_op_ecowx[] = {
+ &gen_op_ecowx_user,
+ &gen_op_ecowx_le_user,
+ &gen_op_ecowx_kernel,
+ &gen_op_ecowx_le_kernel,
+};
+#endif
+#endif
/* eciwx */
GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN)
ra = rb;
}
/* NIP cannot be restored if the memory exception comes from an helper */
- gen_op_update_nip(ctx->nip - 4);
+ gen_update_nip(ctx, ctx->nip - 4);
gen_op_load_xer_bc();
gen_op_load_xer_cmp();
op_POWER_lscbx(rD(ctx->opcode), ra, rb);
gen_set_Rc0(ctx);
}
-/* sraiq -sraiq. */
+/* sraiq - sraiq. */
GEN_HANDLER(sraiq, 0x1F, 0x18, 0x1D, 0x00000000, PPC_POWER_BR)
{
gen_op_load_gpr_T0(rS(ctx->opcode));
GEN_HANDLER(lfq, 0x38, 0xFF, 0xFF, 0x00000003, PPC_POWER2)
{
/* NIP cannot be restored if the memory exception comes from an helper */
- gen_op_update_nip(ctx->nip - 4);
+ gen_update_nip(ctx, ctx->nip - 4);
gen_addr_imm_index(ctx);
op_POWER2_lfq();
gen_op_store_FT0_fpr(rD(ctx->opcode));
int ra = rA(ctx->opcode);
/* NIP cannot be restored if the memory exception comes from an helper */
- gen_op_update_nip(ctx->nip - 4);
+ gen_update_nip(ctx, ctx->nip - 4);
gen_addr_imm_index(ctx);
op_POWER2_lfq();
gen_op_store_FT0_fpr(rD(ctx->opcode));
int ra = rA(ctx->opcode);
/* NIP cannot be restored if the memory exception comes from an helper */
- gen_op_update_nip(ctx->nip - 4);
+ gen_update_nip(ctx, ctx->nip - 4);
gen_addr_reg_index(ctx);
op_POWER2_lfq();
gen_op_store_FT0_fpr(rD(ctx->opcode));
GEN_HANDLER(lfqx, 0x1F, 0x17, 0x18, 0x00000001, PPC_POWER2)
{
/* NIP cannot be restored if the memory exception comes from an helper */
- gen_op_update_nip(ctx->nip - 4);
+ gen_update_nip(ctx, ctx->nip - 4);
gen_addr_reg_index(ctx);
op_POWER2_lfq();
gen_op_store_FT0_fpr(rD(ctx->opcode));
GEN_HANDLER(stfq, 0x3C, 0xFF, 0xFF, 0x00000003, PPC_POWER2)
{
/* NIP cannot be restored if the memory exception comes from an helper */
- gen_op_update_nip(ctx->nip - 4);
+ gen_update_nip(ctx, ctx->nip - 4);
gen_addr_imm_index(ctx);
gen_op_load_fpr_FT0(rS(ctx->opcode));
gen_op_load_fpr_FT1(rS(ctx->opcode) + 1);
int ra = rA(ctx->opcode);
/* NIP cannot be restored if the memory exception comes from an helper */
- gen_op_update_nip(ctx->nip - 4);
+ gen_update_nip(ctx, ctx->nip - 4);
gen_addr_imm_index(ctx);
gen_op_load_fpr_FT0(rS(ctx->opcode));
gen_op_load_fpr_FT1(rS(ctx->opcode) + 1);
int ra = rA(ctx->opcode);
/* NIP cannot be restored if the memory exception comes from an helper */
- gen_op_update_nip(ctx->nip - 4);
+ gen_update_nip(ctx, ctx->nip - 4);
gen_addr_reg_index(ctx);
gen_op_load_fpr_FT0(rS(ctx->opcode));
gen_op_load_fpr_FT1(rS(ctx->opcode) + 1);
GEN_HANDLER(stfqx, 0x1F, 0x17, 0x1C, 0x00000001, PPC_POWER2)
{
/* NIP cannot be restored if the memory exception comes from an helper */
- gen_op_update_nip(ctx->nip - 4);
+ gen_update_nip(ctx, ctx->nip - 4);
gen_addr_reg_index(ctx);
gen_op_load_fpr_FT0(rS(ctx->opcode));
gen_op_load_fpr_FT1(rS(ctx->opcode) + 1);
}
gen_addr_reg_index(ctx);
/* Use the same micro-ops as for tlbie */
- gen_op_tlbie();
+#if defined(TARGET_PPC64)
+ if (ctx->sf_mode)
+ gen_op_tlbie_64();
+ else
+#endif
+ gen_op_tlbie();
RET_STOP(ctx);
#endif
}
#endif
}
+/* TLB management - PowerPC 405 implementation */
/* tlbre */
GEN_HANDLER(tlbre, 0x1F, 0x12, 0x1D, 0x00000001, PPC_EMB_COMMON)
{
#endif
}
-/* tlbsx - tlbsx. */ /* Named tlbs in BookE */
+/* tlbsx - tlbsx. */
GEN_HANDLER(tlbsx, 0x1F, 0x12, 0x1C, 0x00000000, PPC_EMB_COMMON)
{
#if defined(CONFIG_USER_ONLY)
}
/* tlbwe */
-GEN_HANDLER(tlbwe, 0x1F, 0x12, 0x1E, 0x00000001, PPC_EMB_COMMON)
+GEN_HANDLER(tlbwe, 0x1F, 0x12, 0x1E, 0x00000001, PPC_40x_SPEC)
{
#if defined(CONFIG_USER_ONLY)
RET_PRIVOPC(ctx);
cpu_fprintf(f, "NIP " REGX " LR " REGX " CTR " REGX "\n",
env->nip, env->lr, env->ctr);
- cpu_fprintf(f, "MSR " REGX FILL " XER %08x TB %08x %08x "
+ cpu_fprintf(f, "MSR " REGX FILL " XER %08x "
+#if !defined(NO_TIMER_DUMP)
+ "TB %08x %08x "
#if !defined(CONFIG_USER_ONLY)
"DECR %08x"
+#endif
#endif
"\n",
- do_load_msr(env), load_xer(env), cpu_ppc_load_tbu(env),
- cpu_ppc_load_tbl(env)
+ do_load_msr(env), load_xer(env)
+#if !defined(NO_TIMER_DUMP)
+ , cpu_ppc_load_tbu(env), cpu_ppc_load_tbl(env)
#if !defined(CONFIG_USER_ONLY)
, cpu_ppc_load_decr(env)
+#endif
#endif
);
for (i = 0; i < 32; i++) {
"SDR1 " REGX "\n",
env->spr[SPR_SRR0], env->spr[SPR_SRR1], env->sdr1);
-#undef REGX
#undef RGPL
#undef RFPL
#undef FILL
ctx.spr_cb = env->spr_cb;
#if defined(CONFIG_USER_ONLY)
ctx.mem_idx = msr_le;
+#if defined(TARGET_PPC64)
+ ctx.mem_idx |= msr_sf << 1;
+#endif
#else
ctx.supervisor = 1 - msr_pr;
ctx.mem_idx = ((1 - msr_pr) << 1) | msr_le;
+#if defined(TARGET_PPC64)
+ ctx.mem_idx |= msr_sf << 2;
+#endif
+#endif
+#if defined(TARGET_PPC64)
+ ctx.sf_mode = msr_sf;
#endif
ctx.fpu_enabled = msr_fp;
ctx.singlestep_enabled = env->singlestep_enabled;
if (unlikely(env->nb_breakpoints > 0)) {
for (j = 0; j < env->nb_breakpoints; j++) {
if (env->breakpoints[j] == ctx.nip) {
- gen_op_update_nip(ctx.nip);
+ gen_update_nip(&ctx, ctx.nip);
gen_op_debug();
break;
}
if (unlikely(handler->handler == &gen_invalid)) {
if (loglevel > 0) {
fprintf(logfile, "invalid/unsupported opcode: "
- "%02x - %02x - %02x (%08x) 0x%08x %d\n",
+ "%02x - %02x - %02x (%08x) 0x" REGX " %d\n",
opc1(ctx.opcode), opc2(ctx.opcode),
opc3(ctx.opcode), ctx.opcode, ctx.nip - 4, msr_ir);
} else {
printf("invalid/unsupported opcode: "
- "%02x - %02x - %02x (%08x) 0x%08x %d\n",
+ "%02x - %02x - %02x (%08x) 0x" REGX " %d\n",
opc1(ctx.opcode), opc2(ctx.opcode),
opc3(ctx.opcode), ctx.opcode, ctx.nip - 4, msr_ir);
}
if (unlikely((ctx.opcode & handler->inval) != 0)) {
if (loglevel > 0) {
fprintf(logfile, "invalid bits: %08x for opcode: "
- "%02x -%02x - %02x (0x%08x) (0x%08x)\n",
+ "%02x -%02x - %02x (%08x) " REGX "\n",
ctx.opcode & handler->inval, opc1(ctx.opcode),
opc2(ctx.opcode), opc3(ctx.opcode),
ctx.opcode, ctx.nip - 4);
} else {
printf("invalid bits: %08x for opcode: "
- "%02x -%02x - %02x (0x%08x) (0x%08x)\n",
+ "%02x -%02x - %02x (%08x) " REGX "\n",
ctx.opcode & handler->inval, opc1(ctx.opcode),
opc2(ctx.opcode), opc3(ctx.opcode),
ctx.opcode, ctx.nip - 4);
} else {
tb->size = ctx.nip - pc_start;
}
-#ifdef DEBUG_DISAS
+#if defined(DEBUG_DISAS)
if (loglevel & CPU_LOG_TB_CPU) {
fprintf(logfile, "---------------- excp: %04x\n", ctx.exception);
cpu_dump_state(env, logfile, fprintf, 0);