#include "tcg-op.h"
#include "qemu/log.h"
#include "qemu/host-utils.h"
+#include "exec/cpu_ldst.h"
/* global register indexes */
static TCGv_ptr cpu_env;
#include "exec/gen-icount.h"
-#include "helper.h"
-#define GEN_HELPER 1
-#include "helper.h"
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+
+#include "trace-tcg.h"
/* Information that (most) every instruction needs to manipulate. */
return pc;
}
-void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
- int flags)
+void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
+ int flags)
{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
int i;
if (env->cc_op > 3) {
offsetof(CPUS390XState, fregs[i].d),
cpu_reg_names[i + 16]);
}
-
- /* register helpers */
-#define GEN_HELPER 2
-#include "helper.h"
}
static TCGv_i64 load_reg(int reg)
return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
}
-static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
-{
- return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
-}
-
static int get_mem_index(DisasContext *s)
{
switch (s->tb->flags & FLAG_MASK_ASC) {
static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
{
- TCGv_i64 tmp;
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ bool need_31 = !(s->tb->flags & FLAG_MASK_64);
- /* 31-bitify the immediate part; register contents are dealt with below */
- if (!(s->tb->flags & FLAG_MASK_64)) {
- d2 &= 0x7fffffffUL;
- }
+ /* Note that d2 is limited to 20 bits, signed. If we crop negative
+ displacements early we create larger immedate addends. */
- if (x2) {
- if (d2) {
- tmp = tcg_const_i64(d2);
- tcg_gen_add_i64(tmp, tmp, regs[x2]);
- } else {
- tmp = load_reg(x2);
- }
- if (b2) {
- tcg_gen_add_i64(tmp, tmp, regs[b2]);
- }
+ /* Note that addi optimizes the imm==0 case. */
+ if (b2 && x2) {
+ tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
+ tcg_gen_addi_i64(tmp, tmp, d2);
} else if (b2) {
- if (d2) {
- tmp = tcg_const_i64(d2);
- tcg_gen_add_i64(tmp, tmp, regs[b2]);
- } else {
- tmp = load_reg(b2);
- }
+ tcg_gen_addi_i64(tmp, regs[b2], d2);
+ } else if (x2) {
+ tcg_gen_addi_i64(tmp, regs[x2], d2);
} else {
- tmp = tcg_const_i64(d2);
+ if (need_31) {
+ d2 &= 0x7fffffff;
+ need_31 = false;
+ }
+ tcg_gen_movi_i64(tmp, d2);
}
-
- /* 31-bit mode mask if there are values loaded from registers */
- if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
- tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
+ if (need_31) {
+ tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
}
return tmp;
#define SPEC_r1_even 1
#define SPEC_r2_even 2
-#define SPEC_r1_f128 4
-#define SPEC_r2_f128 8
+#define SPEC_r3_even 4
+#define SPEC_r1_f128 8
+#define SPEC_r2_f128 16
/* Return values from translate_one, indicating the state of the TB. */
typedef enum {
struct DisasInsn {
unsigned opc:16;
- DisasFormat fmt:6;
- DisasFacility fac:6;
- unsigned spec:4;
+ DisasFormat fmt:8;
+ DisasFacility fac:8;
+ unsigned spec:8;
const char *name;
};
/* ====================================================================== */
-/* Miscelaneous helpers, used by several operations. */
+/* Miscellaneous helpers, used by several operations. */
static void help_l2_shift(DisasContext *s, DisasFields *f,
DisasOps *o, int mask)
update_cc_op(s);
tcg_gen_goto_tb(0);
tcg_gen_movi_i64(psw_addr, dest);
- tcg_gen_exit_tb((tcg_target_long)s->tb);
+ tcg_gen_exit_tb((uintptr_t)s->tb);
return EXIT_GOTO_TB;
} else {
tcg_gen_movi_i64(psw_addr, dest);
/* Branch not taken. */
tcg_gen_goto_tb(0);
tcg_gen_movi_i64(psw_addr, s->next_pc);
- tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
+ tcg_gen_exit_tb((uintptr_t)s->tb + 0);
/* Branch taken. */
gen_set_label(lab);
tcg_gen_goto_tb(1);
tcg_gen_movi_i64(psw_addr, dest);
- tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
+ tcg_gen_exit_tb((uintptr_t)s->tb + 1);
ret = EXIT_GOTO_TB;
} else {
update_cc_op(s);
tcg_gen_goto_tb(0);
tcg_gen_movi_i64(psw_addr, s->next_pc);
- tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
+ tcg_gen_exit_tb((uintptr_t)s->tb + 0);
gen_set_label(lab);
if (is_imm) {
static ExitStatus op_addc(DisasContext *s, DisasOps *o)
{
- TCGv_i64 cc;
+ DisasCompare cmp;
+ TCGv_i64 carry;
tcg_gen_add_i64(o->out, o->in1, o->in2);
- /* XXX possible optimization point */
- gen_op_calc_cc(s);
- cc = tcg_temp_new_i64();
- tcg_gen_extu_i32_i64(cc, cc_op);
- tcg_gen_shri_i64(cc, cc, 1);
+ /* The carry flag is the msb of CC, therefore the branch mask that would
+ create that comparison is 3. Feeding the generated comparison to
+ setcond produces the carry flag that we desire. */
+ disas_jcc(s, &cmp, 3);
+ carry = tcg_temp_new_i64();
+ if (cmp.is_64) {
+ tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
+ } else {
+ TCGv_i32 t = tcg_temp_new_i32();
+ tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
+ tcg_gen_extu_i32_i64(carry, t);
+ tcg_temp_free_i32(t);
+ }
+ free_compare(&cmp);
- tcg_gen_add_i64(o->out, o->out, cc);
- tcg_temp_free_i64(cc);
+ tcg_gen_add_i64(o->out, o->out, carry);
+ tcg_temp_free_i64(carry);
return NO_EXIT;
}
static ExitStatus op_cs(DisasContext *s, DisasOps *o)
{
- int r3 = get_field(s->fields, r3);
- potential_page_fault(s);
- gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
+ /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
+ int d2 = get_field(s->fields, d2);
+ int b2 = get_field(s->fields, b2);
+ int is_64 = s->insn->data;
+ TCGv_i64 addr, mem, cc, z;
+
+ /* Note that in1 = R3 (new value) and
+ in2 = (zero-extended) R1 (expected value). */
+
+ /* Load the memory into the (temporary) output. While the PoO only talks
+ about moving the memory to R1 on inequality, if we include equality it
+ means that R1 is equal to the memory in all conditions. */
+ addr = get_address(s, 0, b2, d2);
+ if (is_64) {
+ tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
+ } else {
+ tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
+ }
+
+ /* Are the memory and expected values (un)equal? Note that this setcond
+ produces the output CC value, thus the NE sense of the test. */
+ cc = tcg_temp_new_i64();
+ tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
+
+ /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
+ Recall that we are allowed to unconditionally issue the store (and
+ thus any possible write trap), so (re-)store the original contents
+ of MEM in case of inequality. */
+ z = tcg_const_i64(0);
+ mem = tcg_temp_new_i64();
+ tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
+ if (is_64) {
+ tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
+ } else {
+ tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
+ }
+ tcg_temp_free_i64(z);
+ tcg_temp_free_i64(mem);
+ tcg_temp_free_i64(addr);
+
+ /* Store CC back to cc_op. Wait until after the store so that any
+ exception gets the old cc_op value. */
+ tcg_gen_trunc_i64_i32(cc_op, cc);
+ tcg_temp_free_i64(cc);
set_cc_static(s);
return NO_EXIT;
}
-static ExitStatus op_csg(DisasContext *s, DisasOps *o)
+static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
{
+ /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
+ int r1 = get_field(s->fields, r1);
int r3 = get_field(s->fields, r3);
- potential_page_fault(s);
- gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
+ int d2 = get_field(s->fields, d2);
+ int b2 = get_field(s->fields, b2);
+ TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
+
+ /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
+
+ addrh = get_address(s, 0, b2, d2);
+ addrl = get_address(s, 0, b2, d2 + 8);
+ outh = tcg_temp_new_i64();
+ outl = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
+ tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
+
+ /* Fold the double-word compare with arithmetic. */
+ cc = tcg_temp_new_i64();
+ z = tcg_temp_new_i64();
+ tcg_gen_xor_i64(cc, outh, regs[r1]);
+ tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
+ tcg_gen_or_i64(cc, cc, z);
+ tcg_gen_movi_i64(z, 0);
+ tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
+
+ memh = tcg_temp_new_i64();
+ meml = tcg_temp_new_i64();
+ tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
+ tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
+ tcg_temp_free_i64(z);
+
+ tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
+ tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
+ tcg_temp_free_i64(memh);
+ tcg_temp_free_i64(meml);
+ tcg_temp_free_i64(addrh);
+ tcg_temp_free_i64(addrl);
+
+ /* Save back state now that we've passed all exceptions. */
+ tcg_gen_mov_i64(regs[r1], outh);
+ tcg_gen_mov_i64(regs[r1 + 1], outl);
+ tcg_gen_trunc_i64_i32(cc_op, cc);
+ tcg_temp_free_i64(outh);
+ tcg_temp_free_i64(outl);
+ tcg_temp_free_i64(cc);
set_cc_static(s);
return NO_EXIT;
}
}
#endif
-static ExitStatus op_cds(DisasContext *s, DisasOps *o)
-{
- int r3 = get_field(s->fields, r3);
- TCGv_i64 in3 = tcg_temp_new_i64();
- tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
- potential_page_fault(s);
- gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
- tcg_temp_free_i64(in3);
- set_cc_static(s);
- return NO_EXIT;
-}
-
-static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
-{
- TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
- TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
- potential_page_fault(s);
- /* XXX rewrite in tcg */
- gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
- set_cc_static(s);
- return NO_EXIT;
-}
-
static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
{
TCGv_i64 t1 = tcg_temp_new_i64();
static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
{
- gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
- return_low128(o->out2);
+ tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
return NO_EXIT;
}
/* Addressing mode has changed, so end the block. */
return EXIT_PC_STALE;
}
+
+static ExitStatus op_sam(DisasContext *s, DisasOps *o)
+{
+ int sam = s->insn->data;
+ TCGv_i64 tsam = tcg_const_i64(sam);
+
+ /* Overwrite PSW_MASK_64 and PSW_MASK_32 */
+ tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
+
+ tcg_temp_free_i64(tsam);
+ return EXIT_PC_STALE;
+}
#endif
static ExitStatus op_sar(DisasContext *s, DisasOps *o)
break;
case 0xb9: /* SRNMT */
pos = 4, len = 3;
+ break;
default:
tcg_abort();
}
static ExitStatus op_subb(DisasContext *s, DisasOps *o)
{
- TCGv_i64 cc;
+ DisasCompare cmp;
+ TCGv_i64 borrow;
- assert(!o->g_in2);
- tcg_gen_not_i64(o->in2, o->in2);
- tcg_gen_add_i64(o->out, o->in1, o->in2);
+ tcg_gen_sub_i64(o->out, o->in1, o->in2);
- /* XXX possible optimization point */
- gen_op_calc_cc(s);
- cc = tcg_temp_new_i64();
- tcg_gen_extu_i32_i64(cc, cc_op);
- tcg_gen_shri_i64(cc, cc, 1);
- tcg_gen_add_i64(o->out, o->out, cc);
- tcg_temp_free_i64(cc);
+ /* The !borrow flag is the msb of CC. Since we want the inverse of
+ that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
+ disas_jcc(s, &cmp, 8 | 4);
+ borrow = tcg_temp_new_i64();
+ if (cmp.is_64) {
+ tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
+ } else {
+ TCGv_i32 t = tcg_temp_new_i32();
+ tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
+ tcg_gen_extu_i32_i64(borrow, t);
+ tcg_temp_free_i32(t);
+ }
+ free_compare(&cmp);
+
+ tcg_gen_sub_i64(o->out, o->out, borrow);
+ tcg_temp_free_i64(borrow);
return NO_EXIT;
}
}
/* ====================================================================== */
-/* The "PREPeration" generators. These initialize the DisasOps.OUT fields
+/* The "PREParation" generators. These initialize the DisasOps.OUT fields
with the TCG register to which we will write. Used in combination with
the "wout" generators, in some cases we need a new temporary, and in
some cases we can write to a TCG global. */
}
#define SPEC_in1_r3_32u 0
+static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ int r3 = get_field(f, r3);
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
+}
+#define SPEC_in1_r3_D32 SPEC_r3_even
+
static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
{
o->in1 = load_freg32_i64(get_field(f, r1));
}
#define SPEC_in2_r1_32u 0
+static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ int r1 = get_field(f, r1);
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
+}
+#define SPEC_in2_r1_D32 SPEC_r1_even
+
static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
{
o->in2 = load_reg(get_field(f, r2));
excp = PGM_SPECIFICATION;
}
}
+ if (spec & SPEC_r3_even) {
+ r = get_field(&f, r3);
+ if (r & 1) {
+ excp = PGM_SPECIFICATION;
+ }
+ }
if (spec & SPEC_r1_f128) {
r = get_field(&f, r1);
if (r > 13) {
return ret;
}
-static inline void gen_intermediate_code_internal(CPUS390XState *env,
+static inline void gen_intermediate_code_internal(S390CPU *cpu,
TranslationBlock *tb,
- int search_pc)
+ bool search_pc)
{
+ CPUState *cs = CPU(cpu);
+ CPUS390XState *env = &cpu->env;
DisasContext dc;
target_ulong pc_start;
uint64_t next_page_start;
dc.tb = tb;
dc.pc = pc_start;
dc.cc_op = CC_OP_DYNAMIC;
- do_debug = dc.singlestep_enabled = env->singlestep_enabled;
+ do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
max_insns = CF_COUNT_MASK;
}
- gen_icount_start();
+ gen_tb_start();
do {
if (search_pc) {
}
status = NO_EXIT;
- if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
- QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc.pc) {
status = EXIT_PC_STALE;
do_debug = true;
|| tcg_ctx.gen_opc_ptr >= gen_opc_end
|| num_insns >= max_insns
|| singlestep
- || env->singlestep_enabled)) {
+ || cs->singlestep_enabled)) {
status = EXIT_PC_STALE;
}
} while (status == NO_EXIT);
abort();
}
- gen_icount_end(tb, num_insns);
+ gen_tb_end(tb, num_insns);
*tcg_ctx.gen_opc_ptr = INDEX_op_end;
if (search_pc) {
j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
{
- gen_intermediate_code_internal(env, tb, 0);
+ gen_intermediate_code_internal(s390_env_get_cpu(env), tb, false);
}
void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
{
- gen_intermediate_code_internal(env, tb, 1);
+ gen_intermediate_code_internal(s390_env_get_cpu(env), tb, true);
}
void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)