return pc;
}
-void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
- int flags)
+void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
+ int flags)
{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
int i;
if (env->cc_op > 3) {
offsetof(CPUS390XState, fregs[i].d),
cpu_reg_names[i + 16]);
}
-
- /* register helpers */
-#define GEN_HELPER 2
-#include "helper.h"
}
static TCGv_i64 load_reg(int reg)
static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
{
- TCGv_i64 tmp;
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ bool need_31 = !(s->tb->flags & FLAG_MASK_64);
- /* 31-bitify the immediate part; register contents are dealt with below */
- if (!(s->tb->flags & FLAG_MASK_64)) {
- d2 &= 0x7fffffffUL;
- }
+ /* Note that d2 is limited to 20 bits, signed. If we crop negative
+ displacements early we create larger immedate addends. */
- if (x2) {
- if (d2) {
- tmp = tcg_const_i64(d2);
- tcg_gen_add_i64(tmp, tmp, regs[x2]);
- } else {
- tmp = load_reg(x2);
- }
- if (b2) {
- tcg_gen_add_i64(tmp, tmp, regs[b2]);
- }
+ /* Note that addi optimizes the imm==0 case. */
+ if (b2 && x2) {
+ tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
+ tcg_gen_addi_i64(tmp, tmp, d2);
} else if (b2) {
- if (d2) {
- tmp = tcg_const_i64(d2);
- tcg_gen_add_i64(tmp, tmp, regs[b2]);
- } else {
- tmp = load_reg(b2);
- }
+ tcg_gen_addi_i64(tmp, regs[b2], d2);
+ } else if (x2) {
+ tcg_gen_addi_i64(tmp, regs[x2], d2);
} else {
- tmp = tcg_const_i64(d2);
+ if (need_31) {
+ d2 &= 0x7fffffff;
+ need_31 = false;
+ }
+ tcg_gen_movi_i64(tmp, d2);
}
-
- /* 31-bit mode mask if there are values loaded from registers */
- if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
- tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
+ if (need_31) {
+ tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
}
return tmp;
}
/* Table of mask values to comparison codes, given a comparison as input.
- For a true comparison CC=3 will never be set, but we treat this
- conservatively for possible use when CC=3 indicates overflow. */
+ For such, CC=3 should not be possible. */
static const TCGCond ltgt_cond[16] = {
TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
- TCG_COND_GT, TCG_COND_NEVER, /* | | GT | x */
- TCG_COND_LT, TCG_COND_NEVER, /* | LT | | x */
- TCG_COND_NE, TCG_COND_NEVER, /* | LT | GT | x */
- TCG_COND_EQ, TCG_COND_NEVER, /* EQ | | | x */
- TCG_COND_GE, TCG_COND_NEVER, /* EQ | | GT | x */
- TCG_COND_LE, TCG_COND_NEVER, /* EQ | LT | | x */
+ TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
+ TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
+ TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
+ TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
+ TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
+ TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
};
/* Table of mask values to comparison codes, given a logic op as input.
For such, only CC=0 and CC=1 should be possible. */
static const TCGCond nz_cond[16] = {
- /* | | x | x */
- TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
- /* | NE | x | x */
- TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
- /* EQ | | x | x */
- TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
- /* EQ | NE | x | x */
- TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
+ TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
+ TCG_COND_NEVER, TCG_COND_NEVER,
+ TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
+ TCG_COND_NE, TCG_COND_NE,
+ TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
+ TCG_COND_EQ, TCG_COND_EQ,
+ TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
+ TCG_COND_ALWAYS, TCG_COND_ALWAYS,
};
/* Interpret MASK in terms of S->CC_OP, and fill in C with all the
account_inline_branch(s, old_cc_op);
break;
+ case CC_OP_ADDU_32:
+ case CC_OP_ADDU_64:
+ switch (mask) {
+ case 8 | 2: /* vr == 0 */
+ cond = TCG_COND_EQ;
+ break;
+ case 4 | 1: /* vr != 0 */
+ cond = TCG_COND_NE;
+ break;
+ case 8 | 4: /* no carry -> vr >= src */
+ cond = TCG_COND_GEU;
+ break;
+ case 2 | 1: /* carry -> vr < src */
+ cond = TCG_COND_LTU;
+ break;
+ default:
+ goto do_dynamic;
+ }
+ account_inline_branch(s, old_cc_op);
+ break;
+
+ case CC_OP_SUBU_32:
+ case CC_OP_SUBU_64:
+ /* Note that CC=0 is impossible; treat it as dont-care. */
+ switch (mask & 7) {
+ case 2: /* zero -> op1 == op2 */
+ cond = TCG_COND_EQ;
+ break;
+ case 4 | 1: /* !zero -> op1 != op2 */
+ cond = TCG_COND_NE;
+ break;
+ case 4: /* borrow (!carry) -> op1 < op2 */
+ cond = TCG_COND_LTU;
+ break;
+ case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
+ cond = TCG_COND_GEU;
+ break;
+ default:
+ goto do_dynamic;
+ }
+ account_inline_branch(s, old_cc_op);
+ break;
+
default:
do_dynamic:
/* Calculate cc value. */
break;
case CC_OP_LTGT_32:
case CC_OP_LTUGTU_32:
+ case CC_OP_SUBU_32:
c->is_64 = false;
c->u.s32.a = tcg_temp_new_i32();
tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
break;
case CC_OP_LTGT_64:
case CC_OP_LTUGTU_64:
+ case CC_OP_SUBU_64:
c->u.s64.a = cc_src;
c->u.s64.b = cc_dst;
c->g1 = c->g2 = true;
tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
break;
+ case CC_OP_ADDU_32:
+ c->is_64 = false;
+ c->u.s32.a = tcg_temp_new_i32();
+ c->u.s32.b = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
+ if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
+ tcg_gen_movi_i32(c->u.s32.b, 0);
+ } else {
+ tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
+ }
+ break;
+
+ case CC_OP_ADDU_64:
+ c->u.s64.a = cc_vr;
+ c->g1 = true;
+ if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
+ c->u.s64.b = tcg_const_i64(0);
+ } else {
+ c->u.s64.b = cc_src;
+ c->g2 = true;
+ }
+ break;
+
case CC_OP_STATIC:
c->is_64 = false;
c->u.s32.a = cc_op;
#define SPEC_r1_even 1
#define SPEC_r2_even 2
-#define SPEC_r1_f128 4
-#define SPEC_r2_f128 8
+#define SPEC_r3_even 4
+#define SPEC_r1_f128 8
+#define SPEC_r2_f128 16
/* Return values from translate_one, indicating the state of the TB. */
typedef enum {
struct DisasInsn {
unsigned opc:16;
- DisasFormat fmt:6;
- DisasFacility fac:6;
- unsigned spec:4;
+ DisasFormat fmt:8;
+ DisasFacility fac:8;
+ unsigned spec:8;
const char *name;
};
/* ====================================================================== */
-/* Miscelaneous helpers, used by several operations. */
+/* Miscellaneous helpers, used by several operations. */
static void help_l2_shift(DisasContext *s, DisasFields *f,
DisasOps *o, int mask)
update_cc_op(s);
tcg_gen_goto_tb(0);
tcg_gen_movi_i64(psw_addr, dest);
- tcg_gen_exit_tb((tcg_target_long)s->tb);
+ tcg_gen_exit_tb((uintptr_t)s->tb);
return EXIT_GOTO_TB;
} else {
tcg_gen_movi_i64(psw_addr, dest);
/* Branch not taken. */
tcg_gen_goto_tb(0);
tcg_gen_movi_i64(psw_addr, s->next_pc);
- tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
+ tcg_gen_exit_tb((uintptr_t)s->tb + 0);
/* Branch taken. */
gen_set_label(lab);
tcg_gen_goto_tb(1);
tcg_gen_movi_i64(psw_addr, dest);
- tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
+ tcg_gen_exit_tb((uintptr_t)s->tb + 1);
ret = EXIT_GOTO_TB;
} else {
update_cc_op(s);
tcg_gen_goto_tb(0);
tcg_gen_movi_i64(psw_addr, s->next_pc);
- tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
+ tcg_gen_exit_tb((uintptr_t)s->tb + 0);
gen_set_label(lab);
if (is_imm) {
static ExitStatus op_addc(DisasContext *s, DisasOps *o)
{
- TCGv_i64 cc;
+ DisasCompare cmp;
+ TCGv_i64 carry;
tcg_gen_add_i64(o->out, o->in1, o->in2);
- /* XXX possible optimization point */
- gen_op_calc_cc(s);
- cc = tcg_temp_new_i64();
- tcg_gen_extu_i32_i64(cc, cc_op);
- tcg_gen_shri_i64(cc, cc, 1);
+ /* The carry flag is the msb of CC, therefore the branch mask that would
+ create that comparison is 3. Feeding the generated comparison to
+ setcond produces the carry flag that we desire. */
+ disas_jcc(s, &cmp, 3);
+ carry = tcg_temp_new_i64();
+ if (cmp.is_64) {
+ tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
+ } else {
+ TCGv_i32 t = tcg_temp_new_i32();
+ tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
+ tcg_gen_extu_i32_i64(carry, t);
+ tcg_temp_free_i32(t);
+ }
+ free_compare(&cmp);
- tcg_gen_add_i64(o->out, o->out, cc);
- tcg_temp_free_i64(cc);
+ tcg_gen_add_i64(o->out, o->out, carry);
+ tcg_temp_free_i64(carry);
return NO_EXIT;
}
bool is_imm;
DisasCompare c;
- /* Bit 3 of the m3 field is reserved and should be zero.
- Choose to ignore it wrt the ltgt_cond table above. */
- c.cond = ltgt_cond[m3 & 14];
+ c.cond = ltgt_cond[m3];
if (s->insn->data) {
c.cond = tcg_unsigned_cond(c.cond);
}
static ExitStatus op_cs(DisasContext *s, DisasOps *o)
{
- int r3 = get_field(s->fields, r3);
- potential_page_fault(s);
- gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
+ /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
+ int d2 = get_field(s->fields, d2);
+ int b2 = get_field(s->fields, b2);
+ int is_64 = s->insn->data;
+ TCGv_i64 addr, mem, cc, z;
+
+ /* Note that in1 = R3 (new value) and
+ in2 = (zero-extended) R1 (expected value). */
+
+ /* Load the memory into the (temporary) output. While the PoO only talks
+ about moving the memory to R1 on inequality, if we include equality it
+ means that R1 is equal to the memory in all conditions. */
+ addr = get_address(s, 0, b2, d2);
+ if (is_64) {
+ tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
+ } else {
+ tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
+ }
+
+ /* Are the memory and expected values (un)equal? Note that this setcond
+ produces the output CC value, thus the NE sense of the test. */
+ cc = tcg_temp_new_i64();
+ tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
+
+ /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
+ Recall that we are allowed to unconditionally issue the store (and
+ thus any possible write trap), so (re-)store the original contents
+ of MEM in case of inequality. */
+ z = tcg_const_i64(0);
+ mem = tcg_temp_new_i64();
+ tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
+ if (is_64) {
+ tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
+ } else {
+ tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
+ }
+ tcg_temp_free_i64(z);
+ tcg_temp_free_i64(mem);
+ tcg_temp_free_i64(addr);
+
+ /* Store CC back to cc_op. Wait until after the store so that any
+ exception gets the old cc_op value. */
+ tcg_gen_trunc_i64_i32(cc_op, cc);
+ tcg_temp_free_i64(cc);
set_cc_static(s);
return NO_EXIT;
}
-static ExitStatus op_csg(DisasContext *s, DisasOps *o)
+static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
{
+ /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
+ int r1 = get_field(s->fields, r1);
int r3 = get_field(s->fields, r3);
- potential_page_fault(s);
- gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
+ int d2 = get_field(s->fields, d2);
+ int b2 = get_field(s->fields, b2);
+ TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
+
+ /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
+
+ addrh = get_address(s, 0, b2, d2);
+ addrl = get_address(s, 0, b2, d2 + 8);
+ outh = tcg_temp_new_i64();
+ outl = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
+ tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
+
+ /* Fold the double-word compare with arithmetic. */
+ cc = tcg_temp_new_i64();
+ z = tcg_temp_new_i64();
+ tcg_gen_xor_i64(cc, outh, regs[r1]);
+ tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
+ tcg_gen_or_i64(cc, cc, z);
+ tcg_gen_movi_i64(z, 0);
+ tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
+
+ memh = tcg_temp_new_i64();
+ meml = tcg_temp_new_i64();
+ tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
+ tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
+ tcg_temp_free_i64(z);
+
+ tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
+ tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
+ tcg_temp_free_i64(memh);
+ tcg_temp_free_i64(meml);
+ tcg_temp_free_i64(addrh);
+ tcg_temp_free_i64(addrl);
+
+ /* Save back state now that we've passed all exceptions. */
+ tcg_gen_mov_i64(regs[r1], outh);
+ tcg_gen_mov_i64(regs[r1 + 1], outl);
+ tcg_gen_trunc_i64_i32(cc_op, cc);
+ tcg_temp_free_i64(outh);
+ tcg_temp_free_i64(outl);
+ tcg_temp_free_i64(cc);
set_cc_static(s);
return NO_EXIT;
}
}
#endif
-static ExitStatus op_cds(DisasContext *s, DisasOps *o)
-{
- int r3 = get_field(s->fields, r3);
- TCGv_i64 in3 = tcg_temp_new_i64();
- tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
- potential_page_fault(s);
- gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
- tcg_temp_free_i64(in3);
- set_cc_static(s);
- return NO_EXIT;
-}
-
-static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
-{
- TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
- TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
- potential_page_fault(s);
- /* XXX rewrite in tcg */
- gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
- set_cc_static(s);
- return NO_EXIT;
-}
-
static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
{
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i32 t;
TCGCond c;
- /* Bit 3 of the m3 field is reserved and should be zero.
- Choose to ignore it wrt the ltgt_cond table above. */
- c = tcg_invert_cond(ltgt_cond[m3 & 14]);
+ c = tcg_invert_cond(ltgt_cond[m3]);
if (s->insn->data) {
c = tcg_unsigned_cond(c);
}
static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
{
- gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
- return_low128(o->out2);
+ tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
return NO_EXIT;
}
break;
case 0xb9: /* SRNMT */
pos = 4, len = 3;
+ break;
default:
tcg_abort();
}
static ExitStatus op_subb(DisasContext *s, DisasOps *o)
{
- TCGv_i64 cc;
+ DisasCompare cmp;
+ TCGv_i64 borrow;
- assert(!o->g_in2);
- tcg_gen_not_i64(o->in2, o->in2);
- tcg_gen_add_i64(o->out, o->in1, o->in2);
+ tcg_gen_sub_i64(o->out, o->in1, o->in2);
- /* XXX possible optimization point */
- gen_op_calc_cc(s);
- cc = tcg_temp_new_i64();
- tcg_gen_extu_i32_i64(cc, cc_op);
- tcg_gen_shri_i64(cc, cc, 1);
- tcg_gen_add_i64(o->out, o->out, cc);
- tcg_temp_free_i64(cc);
+ /* The !borrow flag is the msb of CC. Since we want the inverse of
+ that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
+ disas_jcc(s, &cmp, 8 | 4);
+ borrow = tcg_temp_new_i64();
+ if (cmp.is_64) {
+ tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
+ } else {
+ TCGv_i32 t = tcg_temp_new_i32();
+ tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
+ tcg_gen_extu_i32_i64(borrow, t);
+ tcg_temp_free_i32(t);
+ }
+ free_compare(&cmp);
+
+ tcg_gen_sub_i64(o->out, o->out, borrow);
+ tcg_temp_free_i64(borrow);
return NO_EXIT;
}
}
/* ====================================================================== */
-/* The "PREPeration" generators. These initialize the DisasOps.OUT fields
+/* The "PREParation" generators. These initialize the DisasOps.OUT fields
with the TCG register to which we will write. Used in combination with
the "wout" generators, in some cases we need a new temporary, and in
some cases we can write to a TCG global. */
}
#define SPEC_in1_r3_32u 0
+static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ int r3 = get_field(f, r3);
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
+}
+#define SPEC_in1_r3_D32 SPEC_r3_even
+
static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
{
o->in1 = load_freg32_i64(get_field(f, r1));
}
#define SPEC_in2_r1_32u 0
+static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ int r1 = get_field(f, r1);
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
+}
+#define SPEC_in2_r1_D32 SPEC_r1_even
+
static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
{
o->in2 = load_reg(get_field(f, r2));
excp = PGM_SPECIFICATION;
}
}
+ if (spec & SPEC_r3_even) {
+ r = get_field(&f, r3);
+ if (r & 1) {
+ excp = PGM_SPECIFICATION;
+ }
+ }
if (spec & SPEC_r1_f128) {
r = get_field(&f, r1);
if (r > 13) {
return ret;
}
-static inline void gen_intermediate_code_internal(CPUS390XState *env,
+static inline void gen_intermediate_code_internal(S390CPU *cpu,
TranslationBlock *tb,
- int search_pc)
+ bool search_pc)
{
+ CPUState *cs = CPU(cpu);
+ CPUS390XState *env = &cpu->env;
DisasContext dc;
target_ulong pc_start;
uint64_t next_page_start;
dc.tb = tb;
dc.pc = pc_start;
dc.cc_op = CC_OP_DYNAMIC;
- do_debug = dc.singlestep_enabled = env->singlestep_enabled;
+ do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
max_insns = CF_COUNT_MASK;
}
- gen_icount_start();
+ gen_tb_start();
do {
if (search_pc) {
|| tcg_ctx.gen_opc_ptr >= gen_opc_end
|| num_insns >= max_insns
|| singlestep
- || env->singlestep_enabled)) {
+ || cs->singlestep_enabled)) {
status = EXIT_PC_STALE;
}
} while (status == NO_EXIT);
abort();
}
- gen_icount_end(tb, num_insns);
+ gen_tb_end(tb, num_insns);
*tcg_ctx.gen_opc_ptr = INDEX_op_end;
if (search_pc) {
j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
{
- gen_intermediate_code_internal(env, tb, 0);
+ gen_intermediate_code_internal(s390_env_get_cpu(env), tb, false);
}
void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
{
- gen_intermediate_code_internal(env, tb, 1);
+ gen_intermediate_code_internal(s390_env_get_cpu(env), tb, true);
}
void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)