int fpu_enabled;
int address_mask_32bit;
struct TranslationBlock *tb;
- uint32_t features;
+ sparc_def_t *def;
} DisasContext;
// This function uses non-native bit order
*/
static inline void gen_cc_C_add_icc(TCGv dst, TCGv src1)
{
- TCGv r_temp;
+ TCGv r_temp1, r_temp2;
int l1;
l1 = gen_new_label();
- r_temp = tcg_temp_new(TCG_TYPE_TL);
- tcg_gen_andi_tl(r_temp, dst, 0xffffffffULL);
- tcg_gen_brcond_tl(TCG_COND_GEU, dst, src1, l1);
+ r_temp1 = tcg_temp_new(TCG_TYPE_TL);
+ r_temp2 = tcg_temp_new(TCG_TYPE_TL);
+ tcg_gen_andi_tl(r_temp1, dst, 0xffffffffULL);
+ tcg_gen_andi_tl(r_temp2, src1, 0xffffffffULL);
+ tcg_gen_brcond_tl(TCG_COND_GEU, r_temp1, r_temp2, l1);
tcg_gen_ori_i32(cpu_psr, cpu_psr, PSR_CARRY);
gen_set_label(l1);
- tcg_temp_free(r_temp);
+ tcg_temp_free(r_temp1);
+ tcg_temp_free(r_temp2);
}
#ifdef TARGET_SPARC64
static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
{
- TCGv r_temp, r_temp2;
+ TCGv r_temp;
int l1;
l1 = gen_new_label();
r_temp = tcg_temp_new(TCG_TYPE_TL);
- r_temp2 = tcg_temp_new(TCG_TYPE_I32);
/* old op:
if (!(env->y & 1))
T1 = 0;
*/
tcg_gen_mov_tl(cpu_cc_src, src1);
- tcg_gen_ld32u_tl(r_temp, cpu_env, offsetof(CPUSPARCState, y));
- tcg_gen_trunc_tl_i32(r_temp2, r_temp);
- tcg_gen_andi_i32(r_temp2, r_temp2, 0x1);
+ tcg_gen_ld_tl(r_temp, cpu_env, offsetof(CPUSPARCState, y));
+ tcg_gen_andi_tl(r_temp, r_temp, 0x1);
tcg_gen_mov_tl(cpu_cc_src2, src2);
- tcg_gen_brcondi_i32(TCG_COND_NE, r_temp2, 0, l1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, r_temp, 0, l1);
tcg_gen_movi_tl(cpu_cc_src2, 0);
gen_set_label(l1);
// b2 = T0 & 1;
// env->y = (b2 << 31) | (env->y >> 1);
- tcg_gen_trunc_tl_i32(r_temp2, cpu_cc_src);
- tcg_gen_andi_i32(r_temp2, r_temp2, 0x1);
- tcg_gen_shli_i32(r_temp2, r_temp2, 31);
- tcg_gen_ld_i32(cpu_tmp32, cpu_env, offsetof(CPUSPARCState, y));
- tcg_gen_shri_i32(cpu_tmp32, cpu_tmp32, 1);
- tcg_gen_or_i32(cpu_tmp32, cpu_tmp32, r_temp2);
- tcg_temp_free(r_temp2);
- tcg_gen_st_i32(cpu_tmp32, cpu_env, offsetof(CPUSPARCState, y));
+ tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
+ tcg_gen_shli_tl(r_temp, r_temp, 31);
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUSPARCState, y));
+ tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 1);
+ tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, r_temp);
+ tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUSPARCState, y));
// b1 = N ^ V;
gen_mov_reg_N(cpu_tmp0, cpu_psr);
tcg_gen_mul_i64(r_temp2, r_temp, r_temp2);
tcg_gen_shri_i64(r_temp, r_temp2, 32);
- tcg_gen_trunc_i64_i32(r_temp, r_temp);
- tcg_gen_st_i32(r_temp, cpu_env, offsetof(CPUSPARCState, y));
+ tcg_gen_trunc_i64_tl(cpu_tmp0, r_temp);
tcg_temp_free(r_temp);
+ tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff);
+ tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUSPARCState, y));
#ifdef TARGET_SPARC64
tcg_gen_mov_i64(dst, r_temp2);
#else
tcg_gen_mul_i64(r_temp2, r_temp, r_temp2);
tcg_gen_shri_i64(r_temp, r_temp2, 32);
- tcg_gen_trunc_i64_i32(r_temp, r_temp);
- tcg_gen_st_i32(r_temp, cpu_env, offsetof(CPUSPARCState, y));
+ tcg_gen_trunc_i64_tl(cpu_tmp0, r_temp);
tcg_temp_free(r_temp);
+ tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff);
+ tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUSPARCState, y));
#ifdef TARGET_SPARC64
tcg_gen_mov_i64(dst, r_temp2);
#else
static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
unsigned int fcc_offset)
{
- tcg_gen_extu_i32_tl(reg, src);
- tcg_gen_shri_tl(reg, reg, FSR_FCC0_SHIFT + fcc_offset);
+ tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
tcg_gen_andi_tl(reg, reg, 0x1);
}
static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
unsigned int fcc_offset)
{
- tcg_gen_extu_i32_tl(reg, src);
- tcg_gen_shri_tl(reg, reg, FSR_FCC1_SHIFT + fcc_offset);
+ tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
tcg_gen_andi_tl(reg, reg, 0x1);
}
}
#define CHECK_IU_FEATURE(dc, FEATURE) \
- if (!((dc)->features & CPU_FEATURE_ ## FEATURE)) \
+ if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
goto illegal_insn;
#define CHECK_FPU_FEATURE(dc, FEATURE) \
- if (!((dc)->features & CPU_FEATURE_ ## FEATURE)) \
+ if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
goto nfpu_insn;
/* before an instruction, dc->pc must be static */
if (insn & (1 << 12)) {
tcg_gen_shli_i64(cpu_dst, cpu_src1, rs2 & 0x3f);
} else {
- tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
- tcg_gen_shli_i64(cpu_dst, cpu_dst, rs2 & 0x1f);
+ tcg_gen_shli_i64(cpu_dst, cpu_src1, rs2 & 0x1f);
}
} else { /* register */
rs2 = GET_FIELD(insn, 27, 31);
gen_movl_reg_TN(rs2, cpu_src2);
if (insn & (1 << 12)) {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
- tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
} else {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
- tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
- tcg_gen_shl_i64(cpu_dst, cpu_dst, cpu_tmp0);
}
+ tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
}
gen_movl_TN_reg(rd, cpu_dst);
} else if (xop == 0x26) { /* srl, V9 srlx */
} else {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
+ tcg_gen_ext_i32_i64(cpu_dst, cpu_dst);
tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
}
}
}
#else
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
+ if (dc->def->nwindows != 32)
+ tcg_gen_andi_tl(cpu_tmp32, cpu_tmp32,
+ (1 << dc->def->nwindows) - 1);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, wim));
#endif
goto jmp_insn;
#endif
case 0x3b: /* flush */
- if (!((dc)->features & CPU_FEATURE_FLUSH))
+ if (!((dc)->def->features & CPU_FEATURE_FLUSH))
goto unimp_flush;
tcg_gen_helper_0_1(helper_flush, cpu_dst);
break;
last_pc = dc->pc;
dc->npc = (target_ulong) tb->cs_base;
dc->mem_idx = cpu_mmu_index(env);
- dc->features = env->features;
- if ((dc->features & CPU_FEATURE_FLOAT)) {
+ dc->def = env->def;
+ if ((dc->def->features & CPU_FEATURE_FLOAT))
dc->fpu_enabled = cpu_fpu_enabled(env);
-#if defined(CONFIG_USER_ONLY)
- dc->features |= CPU_FEATURE_FLOAT128;
-#endif
- } else
+ else
dc->fpu_enabled = 0;
#ifdef TARGET_SPARC64
dc->address_mask_32bit = env->pstate & PS_AM;