tcg_temp_free_i64(arg1); \
} while (0)
+#define GEN_HELPER_RR(name, rl, rh, arg1, arg2) do { \
+ TCGv_i64 ret = tcg_temp_new_i64(); \
+ \
+ gen_helper_##name(ret, cpu_env, arg1, arg2); \
+ tcg_gen_extr_i64_i32(rl, rh, ret); \
+ \
+ tcg_temp_free_i64(ret); \
+} while (0)
+
#define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
#define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
((offset & 0x0fffff) << 1))
tcg_temp_free(temp);
}
+static void gen_cmpswap(DisasContext *ctx, int reg, TCGv ea)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_movcond_tl(TCG_COND_EQ, temp2, cpu_gpr_d[reg+1], temp,
+ cpu_gpr_d[reg], temp);
+ tcg_gen_qemu_st_tl(temp2, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_mov_tl(cpu_gpr_d[reg], temp);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static void gen_swapmsk(DisasContext *ctx, int reg, TCGv ea)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ TCGv temp3 = tcg_temp_new();
+
+ tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_and_tl(temp2, cpu_gpr_d[reg], cpu_gpr_d[reg+1]);
+ tcg_gen_andc_tl(temp3, temp, cpu_gpr_d[reg+1]);
+ tcg_gen_or_tl(temp2, temp2, temp3);
+ tcg_gen_qemu_st_tl(temp2, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_mov_tl(cpu_gpr_d[reg], temp);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+}
+
+
/* We generate loads and store to core special function register (csfr) through
the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
makros R, A and E, which allow read-only, all and endinit protected access.
tcg_gen_xor_i64(t1, result, r1);
tcg_gen_xor_i64(t0, r1, r2);
tcg_gen_andc_i64(t1, t1, t0);
- tcg_gen_trunc_shr_i64_i32(cpu_PSW_V, t1, 32);
+ tcg_gen_extrh_i64_i32(cpu_PSW_V, t1);
/* calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* calc AV/SAV bits */
- tcg_gen_trunc_shr_i64_i32(temp, result, 32);
+ tcg_gen_extrh_i64_i32(temp, result);
tcg_gen_add_tl(cpu_PSW_AV, temp, temp);
tcg_gen_xor_tl(cpu_PSW_AV, temp, cpu_PSW_AV);
/* calc SAV */
tcg_gen_mul_i64(t1, t1, t3);
tcg_gen_add_i64(t1, t2, t1);
- tcg_gen_trunc_i64_i32(ret, t1);
+ tcg_gen_extrl_i64_i32(ret, t1);
/* calc V
t1 > 0x7fffffff */
tcg_gen_setcondi_i64(TCG_COND_GT, t3, t1, 0x7fffffffLL);
/* t1 < -0x80000000 */
tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL);
tcg_gen_or_i64(t2, t2, t3);
- tcg_gen_trunc_i64_i32(cpu_PSW_V, t2);
+ tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
/* Calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* only the add overflows, if t2 < t1
calc V bit */
tcg_gen_setcond_i64(TCG_COND_LTU, t2, t2, t1);
- tcg_gen_trunc_i64_i32(cpu_PSW_V, t2);
+ tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
/* Calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
tcg_gen_sari_i64(t2, t2, up_shift);
tcg_gen_add_i64(t3, t1, t2);
- tcg_gen_trunc_i64_i32(temp3, t3);
+ tcg_gen_extrl_i64_i32(temp3, t3);
/* calc v bit */
tcg_gen_setcondi_i64(TCG_COND_GT, t1, t3, 0x7fffffffLL);
tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL);
tcg_gen_or_i64(t1, t1, t2);
- tcg_gen_trunc_i64_i32(cpu_PSW_V, t1);
+ tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
/* We produce an overflow on the host if the mul before was
(0x80000000 * 0x80000000) << 1). If this is the
tcg_gen_xor_i64(t3, t4, t1);
tcg_gen_xor_i64(t2, t1, t2);
tcg_gen_andc_i64(t3, t3, t2);
- tcg_gen_trunc_shr_i64_i32(cpu_PSW_V, t3, 32);
+ tcg_gen_extrh_i64_i32(cpu_PSW_V, t3);
/* We produce an overflow on the host if the mul before was
(0x80000000 * 0x80000000) << 1). If this is the
case, we negate the ovf. */
tcg_gen_mul_i64(t1, t1, t3);
tcg_gen_sub_i64(t1, t2, t1);
- tcg_gen_trunc_i64_i32(ret, t1);
+ tcg_gen_extrl_i64_i32(ret, t1);
/* calc V
t2 > 0x7fffffff */
tcg_gen_setcondi_i64(TCG_COND_GT, t3, t1, 0x7fffffffLL);
/* result < -0x80000000 */
tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL);
tcg_gen_or_i64(t2, t2, t3);
- tcg_gen_trunc_i64_i32(cpu_PSW_V, t2);
+ tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
/* Calc SV bit */
tcg_gen_extr_i64_i32(ret_low, ret_high, t3);
/* calc V bit, only the sub can overflow, if t1 > t2 */
tcg_gen_setcond_i64(TCG_COND_GTU, t1, t1, t2);
- tcg_gen_trunc_i64_i32(cpu_PSW_V, t1);
+ tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
/* Calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
tcg_gen_xor_i64(t1, result, r1);
tcg_gen_xor_i64(t0, r1, r2);
tcg_gen_and_i64(t1, t1, t0);
- tcg_gen_trunc_shr_i64_i32(cpu_PSW_V, t1, 32);
+ tcg_gen_extrh_i64_i32(cpu_PSW_V, t1);
/* calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* calc AV/SAV bits */
- tcg_gen_trunc_shr_i64_i32(temp, result, 32);
+ tcg_gen_extrh_i64_i32(temp, result);
tcg_gen_add_tl(cpu_PSW_AV, temp, temp);
tcg_gen_xor_tl(cpu_PSW_AV, temp, cpu_PSW_AV);
/* calc SAV */
tcg_gen_add_i64(t2, t2, t4);
tcg_gen_sub_i64(t3, t1, t2);
- tcg_gen_trunc_i64_i32(temp3, t3);
+ tcg_gen_extrl_i64_i32(temp3, t3);
/* calc v bit */
tcg_gen_setcondi_i64(TCG_COND_GT, t1, t3, 0x7fffffffLL);
tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL);
tcg_gen_or_i64(t1, t1, t2);
- tcg_gen_trunc_i64_i32(cpu_PSW_V, t1);
+ tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
- /* We produce an overflow on the host if the mul before was
- (0x80000000 * 0x80000000) << 1). If this is the
- case, we negate the ovf. */
- if (n == 1) {
- tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000);
- tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3);
- tcg_gen_and_tl(temp, temp, temp2);
- tcg_gen_shli_tl(temp, temp, 31);
- /* negate v bit, if special condition */
- tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp);
- }
/* Calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* Calc AV/SAV bits */
tcg_gen_xor_i64(t3, t4, t1);
tcg_gen_xor_i64(t2, t1, t2);
tcg_gen_and_i64(t3, t3, t2);
- tcg_gen_trunc_shr_i64_i32(cpu_PSW_V, t3, 32);
+ tcg_gen_extrh_i64_i32(cpu_PSW_V, t3);
/* We produce an overflow on the host if the mul before was
(0x80000000 * 0x80000000) << 1). If this is the
case, we negate the ovf. */
gen_goto_tb(ctx, 0, ctx->next_pc);
}
+static void gen_fcall_save_ctx(DisasContext *ctx)
+{
+ TCGv temp = tcg_temp_new();
+
+ tcg_gen_addi_tl(temp, cpu_gpr_a[10], -4);
+ tcg_gen_qemu_st_tl(cpu_gpr_a[11], temp, ctx->mem_idx, MO_LESL);
+ tcg_gen_movi_tl(cpu_gpr_a[11], ctx->next_pc);
+ tcg_gen_mov_tl(cpu_gpr_a[10], temp);
+
+ tcg_temp_free(temp);
+}
+
+static void gen_fret(DisasContext *ctx)
+{
+ TCGv temp = tcg_temp_new();
+
+ tcg_gen_andi_tl(temp, cpu_gpr_a[11], ~0x1);
+ tcg_gen_qemu_ld_tl(cpu_gpr_a[11], cpu_gpr_a[10], ctx->mem_idx, MO_LESL);
+ tcg_gen_addi_tl(cpu_gpr_a[10], cpu_gpr_a[10], 4);
+ tcg_gen_mov_tl(cpu_PC, temp);
+ tcg_gen_exit_tb(0);
+ ctx->bstate = BS_BRANCH;
+
+ tcg_temp_free(temp);
+}
+
static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
int r2 , int32_t constant , int32_t offset)
{
gen_helper_1arg(call, ctx->next_pc);
gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset));
break;
+ case OPC1_32_B_FCALL:
+ gen_fcall_save_ctx(ctx);
+ gen_goto_tb(ctx, 0, ctx->pc + offset * 2);
+ break;
+ case OPC1_32_B_FCALLA:
+ gen_fcall_save_ctx(ctx);
+ gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset));
+ break;
case OPC1_32_B_JLA:
tcg_gen_movi_tl(cpu_gpr_a[11], ctx->next_pc);
/* fall through */
* Functions for decoding instructions
*/
-static void decode_src_opc(DisasContext *ctx, int op1)
+static void decode_src_opc(CPUTriCoreState *env, DisasContext *ctx, int op1)
{
int r1;
int32_t const4;
const4 = MASK_OP_SRC_CONST4(ctx->opcode);
tcg_gen_movi_tl(cpu_gpr_a[r1], const4);
break;
+ case OPC1_16_SRC_MOV_E:
+ if (tricore_feature(env, TRICORE_FEATURE_16)) {
+ tcg_gen_movi_tl(cpu_gpr_d[r1], const4);
+ tcg_gen_sari_tl(cpu_gpr_d[r1+1], cpu_gpr_d[r1], 31);
+ } /* TODO: else raise illegal opcode trap */
+ break;
case OPC1_16_SRC_SH:
gen_shi(cpu_gpr_d[r1], cpu_gpr_d[r1], const4);
break;
case OPC2_16_SR_DEBUG:
/* raise EXCP_DEBUG */
break;
+ case OPC2_16_SR_FRET:
+ gen_fret(ctx);
}
}
case OPC1_16_SRC_LT:
case OPC1_16_SRC_MOV:
case OPC1_16_SRC_MOV_A:
+ case OPC1_16_SRC_MOV_E:
case OPC1_16_SRC_SH:
case OPC1_16_SRC_SHA:
- decode_src_opc(ctx, op1);
+ decode_src_opc(env, ctx, op1);
break;
/* SRR-format */
case OPC1_16_SRR_ADD:
tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
gen_swap(ctx, r1, cpu_gpr_a[r2]);
break;
+ case OPC2_32_BO_CMPSWAP_W_SHORTOFF:
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_cmpswap(ctx, r1, temp);
+ break;
+ case OPC2_32_BO_CMPSWAP_W_POSTINC:
+ gen_cmpswap(ctx, r1, cpu_gpr_a[r2]);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_CMPSWAP_W_PREINC:
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ gen_cmpswap(ctx, r1, cpu_gpr_a[r2]);
+ break;
+ case OPC2_32_BO_SWAPMSK_W_SHORTOFF:
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_swapmsk(ctx, r1, temp);
+ break;
+ case OPC2_32_BO_SWAPMSK_W_POSTINC:
+ gen_swapmsk(ctx, r1, cpu_gpr_a[r2]);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_SWAPMSK_W_PREINC:
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ gen_swapmsk(ctx, r1, cpu_gpr_a[r2]);
+ break;
}
tcg_temp_free(temp);
tcg_temp_free(temp2);
gen_swap(ctx, r1, temp2);
gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
break;
+ case OPC2_32_BO_CMPSWAP_W_BR:
+ gen_cmpswap(ctx, r1, temp2);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_CMPSWAP_W_CIRC:
+ gen_cmpswap(ctx, r1, temp2);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_SWAPMSK_W_BR:
+ gen_swapmsk(ctx, r1, temp2);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_SWAPMSK_W_CIRC:
+ gen_swapmsk(ctx, r1, temp2);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
}
+
tcg_temp_free(temp);
tcg_temp_free(temp2);
tcg_temp_free(temp3);
break;
case OPC1_32_BOL_ST_H_LONGOFF:
if (tricore_feature(env, TRICORE_FEATURE_16)) {
- gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_LESW);
+ gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_LESW);
} else {
/* raise illegal opcode trap */
}
gen_helper_1arg(call, ctx->next_pc);
tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1);
break;
+ case OPC2_32_RR_FCALLI:
+ gen_fcall_save_ctx(ctx);
+ tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1);
+ break;
}
tcg_gen_exit_tb(0);
ctx->bstate = BS_BRANCH;
/* sv */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
/* write result */
- tcg_gen_mov_tl(cpu_gpr_d[r3+1], temp3);
tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 16);
+ tcg_gen_mov_tl(cpu_gpr_d[r3+1], temp3);
tcg_temp_free(temp);
tcg_temp_free(temp2);
tcg_temp_free(temp3);
case OPC2_32_RR_UNPACK:
gen_unpack(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1]);
break;
+ case OPC2_32_RR_CRC32:
+ if (tricore_feature(env, TRICORE_FEATURE_161)) {
+ gen_helper_crc32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ } /* TODO: else raise illegal opcode trap */
+ break;
+ case OPC2_32_RR_DIV:
+ if (tricore_feature(env, TRICORE_FEATURE_16)) {
+ GEN_HELPER_RR(divide, cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ } /* TODO: else raise illegal opcode trap */
+ break;
+ case OPC2_32_RR_DIV_U:
+ if (tricore_feature(env, TRICORE_FEATURE_16)) {
+ GEN_HELPER_RR(divide_u, cpu_gpr_d[r3], cpu_gpr_d[r3+1],
+ cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ } /* TODO: else raise illegal opcode trap */
+ break;
}
}
static void decode_sys_interrupts(CPUTriCoreState *env, DisasContext *ctx)
{
uint32_t op2;
+ uint32_t r1;
TCGLabel *l1;
TCGv tmp;
op2 = MASK_OP_SYS_OP2(ctx->opcode);
+ r1 = MASK_OP_SYS_S1D(ctx->opcode);
switch (op2) {
case OPC2_32_SYS_DEBUG:
case OPC2_32_SYS_RET:
gen_compute_branch(ctx, op2, 0, 0, 0, 0);
break;
+ case OPC2_32_SYS_FRET:
+ gen_fret(ctx);
+ break;
case OPC2_32_SYS_RFE:
gen_helper_rfe(cpu_env);
tcg_gen_exit_tb(0);
case OPC2_32_SYS_SVLCX:
gen_helper_svlcx(cpu_env);
break;
+ case OPC2_32_SYS_RESTORE:
+ if (tricore_feature(env, TRICORE_FEATURE_16)) {
+ if ((ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_SM ||
+ (ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_UM1) {
+ tcg_gen_deposit_tl(cpu_ICR, cpu_ICR, cpu_gpr_d[r1], 8, 1);
+ } /* else raise privilege trap */
+ } /* else raise illegal opcode trap */
+ break;
case OPC2_32_SYS_TRAPSV:
/* TODO: raise sticky overflow trap */
break;
/* B-format */
case OPC1_32_B_CALL:
case OPC1_32_B_CALLA:
+ case OPC1_32_B_FCALL:
+ case OPC1_32_B_FCALLA:
case OPC1_32_B_J:
case OPC1_32_B_JA:
case OPC1_32_B_JL:
}
}
-static inline void
-gen_intermediate_code_internal(TriCoreCPU *cpu, struct TranslationBlock *tb,
- int search_pc)
+void gen_intermediate_code(CPUTriCoreState *env, struct TranslationBlock *tb)
{
+ TriCoreCPU *cpu = tricore_env_get_cpu(env);
CPUState *cs = CPU(cpu);
- CPUTriCoreState *env = &cpu->env;
DisasContext ctx;
target_ulong pc_start;
- int num_insns;
+ int num_insns, max_insns;
- if (search_pc) {
- qemu_log("search pc %d\n", search_pc);
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+ if (singlestep) {
+ max_insns = 1;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
}
- num_insns = 0;
pc_start = tb->pc;
ctx.pc = pc_start;
ctx.saved_pc = -1;
ctx.tb = tb;
ctx.singlestep_enabled = cs->singlestep_enabled;
ctx.bstate = BS_NONE;
- ctx.mem_idx = cpu_mmu_index(env);
+ ctx.mem_idx = cpu_mmu_index(env, false);
tcg_clear_temp_count();
gen_tb_start(tb);
while (ctx.bstate == BS_NONE) {
+ tcg_gen_insn_start(ctx.pc);
+ num_insns++;
+
ctx.opcode = cpu_ldl_code(env, ctx.pc);
decode_opc(env, &ctx, 0);
- num_insns++;
-
- if (tcg_op_buf_full()) {
- gen_save_pc(ctx.next_pc);
- tcg_gen_exit_tb(0);
- break;
- }
- if (singlestep) {
+ if (num_insns >= max_insns || tcg_op_buf_full()) {
gen_save_pc(ctx.next_pc);
tcg_gen_exit_tb(0);
break;
}
gen_tb_end(tb, num_insns);
- if (search_pc) {
- printf("done_generating search pc\n");
- } else {
- tb->size = ctx.pc - pc_start;
- tb->icount = num_insns;
- }
+ tb->size = ctx.pc - pc_start;
+ tb->icount = num_insns;
+
if (tcg_check_temp_count()) {
printf("LEAK at %08x\n", env->PC);
}
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
qemu_log("IN: %s\n", lookup_symbol(pc_start));
- log_target_disas(env, pc_start, ctx.pc - pc_start, 0);
+ log_target_disas(cs, pc_start, ctx.pc - pc_start, 0);
qemu_log("\n");
}
#endif
}
void
-gen_intermediate_code(CPUTriCoreState *env, struct TranslationBlock *tb)
-{
- gen_intermediate_code_internal(tricore_env_get_cpu(env), tb, false);
-}
-
-void
-gen_intermediate_code_pc(CPUTriCoreState *env, struct TranslationBlock *tb)
-{
- gen_intermediate_code_internal(tricore_env_get_cpu(env), tb, true);
-}
-
-void
-restore_state_to_opc(CPUTriCoreState *env, TranslationBlock *tb, int pc_pos)
+restore_state_to_opc(CPUTriCoreState *env, TranslationBlock *tb,
+ target_ulong *data)
{
- env->PC = tcg_ctx.gen_opc_pc[pc_pos];
+ env->PC = data[0];
}
/*
*