#include "../tcg-pool.c.inc"
#include "elf.h"
-/* ??? The translation blocks produced by TCG are generally small enough to
- be entirely reachable with a 16-bit displacement. Leaving the option for
- a 32-bit displacement here Just In Case. */
-#define USE_LONG_BRANCHES 0
-
#define TCG_CT_CONST_S16 0x100
#define TCG_CT_CONST_S32 0x200
#define TCG_CT_CONST_S33 0x400
/* A scratch register that may be be used throughout the backend. */
#define TCG_TMP0 TCG_REG_R1
-/* A scratch register that holds a pointer to the beginning of the TB.
- We don't need this when we have pc-relative loads with the general
- instructions extension facility. */
-#define TCG_REG_TB TCG_REG_R12
-#define USE_REG_TB (!HAVE_FACILITY(GEN_INST_EXT))
-
#ifndef CONFIG_SOFTMMU
#define TCG_GUEST_BASE_REG TCG_REG_R13
#endif
}
for (i = 0; i < 4; i++) {
- tcg_target_long mask = 0xffffull << i*16;
+ tcg_target_long mask = 0xffffull << i * 16;
if ((uval & mask) == uval) {
- tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16);
+ tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i * 16);
return true;
}
}
}
/* load a register with an immediate value */
-static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
- tcg_target_long sval, bool in_prologue)
+static void tcg_out_movi(TCGContext *s, TCGType type,
+ TCGReg ret, tcg_target_long sval)
{
tcg_target_ulong uval;
}
/* Try all 48-bit insns that can load it in one go. */
- if (HAVE_FACILITY(EXT_IMM)) {
- if (sval == (int32_t)sval) {
- tcg_out_insn(s, RIL, LGFI, ret, sval);
- return;
- }
- if (uval <= 0xffffffff) {
- tcg_out_insn(s, RIL, LLILF, ret, uval);
- return;
- }
- if ((uval & 0xffffffff) == 0) {
- tcg_out_insn(s, RIL, LLIHF, ret, uval >> 32);
- return;
- }
+ if (sval == (int32_t)sval) {
+ tcg_out_insn(s, RIL, LGFI, ret, sval);
+ return;
+ }
+ if (uval <= 0xffffffff) {
+ tcg_out_insn(s, RIL, LLILF, ret, uval);
+ return;
+ }
+ if ((uval & 0xffffffff) == 0) {
+ tcg_out_insn(s, RIL, LLIHF, ret, uval >> 32);
+ return;
}
/* Try for PC-relative address load. For odd addresses,
tcg_out_insn(s, RIL, LARL, ret, off);
return;
}
- } else if (USE_REG_TB && !in_prologue) {
- ptrdiff_t off = tcg_tbrel_diff(s, (void *)sval);
- if (off == sextract64(off, 0, 20)) {
- /* This is certain to be an address within TB, and therefore
- OFF will be negative; don't try RX_LA. */
- tcg_out_insn(s, RXY, LAY, ret, TCG_REG_TB, TCG_REG_NONE, off);
- return;
- }
- }
-
- /* A 32-bit unsigned value can be loaded in 2 insns. And given
- that LLILL, LLIHL, LLILF above did not succeed, we know that
- both insns are required. */
- if (uval <= 0xffffffff) {
- tcg_out_insn(s, RI, LLILL, ret, uval);
- tcg_out_insn(s, RI, IILH, ret, uval >> 16);
- return;
}
/* Otherwise, stuff it in the constant pool. */
- if (HAVE_FACILITY(GEN_INST_EXT)) {
- tcg_out_insn(s, RIL, LGRL, ret, 0);
- new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2);
- } else if (USE_REG_TB && !in_prologue) {
- tcg_out_insn(s, RXY, LG, ret, TCG_REG_TB, TCG_REG_NONE, 0);
- new_pool_label(s, sval, R_390_20, s->code_ptr - 2,
- tcg_tbrel_diff(s, NULL));
- } else {
- TCGReg base = ret ? ret : TCG_TMP0;
- tcg_out_insn(s, RIL, LARL, base, 0);
- new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2);
- tcg_out_insn(s, RXY, LG, ret, base, TCG_REG_NONE, 0);
- }
-}
-
-static void tcg_out_movi(TCGContext *s, TCGType type,
- TCGReg ret, tcg_target_long sval)
-{
- tcg_out_movi_int(s, type, ret, sval, false);
+ tcg_out_insn(s, RIL, LGRL, ret, 0);
+ new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2);
}
/* Emit a load/store type instruction. Inputs are:
return false;
}
-/* load data from an absolute host address */
-static void tcg_out_ld_abs(TCGContext *s, TCGType type,
- TCGReg dest, const void *abs)
-{
- intptr_t addr = (intptr_t)abs;
-
- if (HAVE_FACILITY(GEN_INST_EXT) && !(addr & 1)) {
- ptrdiff_t disp = tcg_pcrel_diff(s, abs) >> 1;
- if (disp == (int32_t)disp) {
- if (type == TCG_TYPE_I32) {
- tcg_out_insn(s, RIL, LRL, dest, disp);
- } else {
- tcg_out_insn(s, RIL, LGRL, dest, disp);
- }
- return;
- }
- }
- if (USE_REG_TB) {
- ptrdiff_t disp = tcg_tbrel_diff(s, abs);
- if (disp == sextract64(disp, 0, 20)) {
- tcg_out_ld(s, type, dest, TCG_REG_TB, disp);
- return;
- }
- }
-
- tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff);
- tcg_out_ld(s, type, dest, dest, addr & 0xffff);
-}
-
static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
int msb, int lsb, int ofs, int z)
{
static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
{
- if (HAVE_FACILITY(EXT_IMM)) {
- tcg_out_insn(s, RRE, LGBR, dest, src);
- return;
- }
-
- if (type == TCG_TYPE_I32) {
- if (dest == src) {
- tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24);
- } else {
- tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24);
- }
- tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24);
- } else {
- tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56);
- tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56);
- }
+ tcg_out_insn(s, RRE, LGBR, dest, src);
}
static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
{
- if (HAVE_FACILITY(EXT_IMM)) {
- tcg_out_insn(s, RRE, LLGCR, dest, src);
- return;
- }
-
- if (dest == src) {
- tcg_out_movi(s, type, TCG_TMP0, 0xff);
- src = TCG_TMP0;
- } else {
- tcg_out_movi(s, type, dest, 0xff);
- }
- if (type == TCG_TYPE_I32) {
- tcg_out_insn(s, RR, NR, dest, src);
- } else {
- tcg_out_insn(s, RRE, NGR, dest, src);
- }
+ tcg_out_insn(s, RRE, LLGCR, dest, src);
}
static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
{
- if (HAVE_FACILITY(EXT_IMM)) {
- tcg_out_insn(s, RRE, LGHR, dest, src);
- return;
- }
-
- if (type == TCG_TYPE_I32) {
- if (dest == src) {
- tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16);
- } else {
- tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16);
- }
- tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16);
- } else {
- tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48);
- tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48);
- }
+ tcg_out_insn(s, RRE, LGHR, dest, src);
}
static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
{
- if (HAVE_FACILITY(EXT_IMM)) {
- tcg_out_insn(s, RRE, LLGHR, dest, src);
- return;
- }
-
- if (dest == src) {
- tcg_out_movi(s, type, TCG_TMP0, 0xffff);
- src = TCG_TMP0;
- } else {
- tcg_out_movi(s, type, dest, 0xffff);
- }
- if (type == TCG_TYPE_I32) {
- tcg_out_insn(s, RR, NR, dest, src);
- } else {
- tcg_out_insn(s, RRE, NGR, dest, src);
- }
+ tcg_out_insn(s, RRE, LLGHR, dest, src);
}
static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
tgen_ext32u(s, dest, dest);
return;
}
- if (HAVE_FACILITY(EXT_IMM)) {
- if ((val & valid) == 0xff) {
- tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
- return;
- }
- if ((val & valid) == 0xffff) {
- tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
- return;
- }
+ if ((val & valid) == 0xff) {
+ tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
+ return;
+ }
+ if ((val & valid) == 0xffff) {
+ tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
+ return;
}
/* Try all 32-bit insns that can perform it in one go. */
for (i = 0; i < 4; i++) {
- tcg_target_ulong mask = ~(0xffffull << i*16);
+ tcg_target_ulong mask = ~(0xffffull << i * 16);
if (((val | ~valid) & mask) == mask) {
- tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
+ tcg_out_insn_RI(s, ni_insns[i], dest, val >> i * 16);
return;
}
}
/* Try all 48-bit insns that can perform it in one go. */
- if (HAVE_FACILITY(EXT_IMM)) {
- for (i = 0; i < 2; i++) {
- tcg_target_ulong mask = ~(0xffffffffull << i*32);
- if (((val | ~valid) & mask) == mask) {
- tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
- return;
- }
+ for (i = 0; i < 2; i++) {
+ tcg_target_ulong mask = ~(0xffffffffull << i * 32);
+ if (((val | ~valid) & mask) == mask) {
+ tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i * 32);
+ return;
}
}
- if (HAVE_FACILITY(GEN_INST_EXT) && risbg_mask(val)) {
+ if (risbg_mask(val)) {
tgen_andi_risbg(s, dest, dest, val);
return;
}
- /* Use the constant pool if USE_REG_TB, but not for small constants. */
- if (USE_REG_TB) {
- if (!maybe_out_small_movi(s, type, TCG_TMP0, val)) {
- tcg_out_insn(s, RXY, NG, dest, TCG_REG_TB, TCG_REG_NONE, 0);
- new_pool_label(s, val & valid, R_390_20, s->code_ptr - 2,
- tcg_tbrel_diff(s, NULL));
- return;
- }
- } else {
- tcg_out_movi(s, type, TCG_TMP0, val);
- }
+ tcg_out_movi(s, type, TCG_TMP0, val);
if (type == TCG_TYPE_I32) {
tcg_out_insn(s, RR, NR, dest, TCG_TMP0);
} else {
/* Try all 32-bit insns that can perform it in one go. */
for (i = 0; i < 4; i++) {
- tcg_target_ulong mask = (0xffffull << i*16);
+ tcg_target_ulong mask = (0xffffull << i * 16);
if ((val & mask) != 0 && (val & ~mask) == 0) {
- tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
+ tcg_out_insn_RI(s, oi_insns[i], dest, val >> i * 16);
return;
}
}
/* Try all 48-bit insns that can perform it in one go. */
- if (HAVE_FACILITY(EXT_IMM)) {
- for (i = 0; i < 2; i++) {
- tcg_target_ulong mask = (0xffffffffull << i*32);
- if ((val & mask) != 0 && (val & ~mask) == 0) {
- tcg_out_insn_RIL(s, oif_insns[i], dest, val >> i*32);
- return;
- }
+ for (i = 0; i < 2; i++) {
+ tcg_target_ulong mask = (0xffffffffull << i * 32);
+ if ((val & mask) != 0 && (val & ~mask) == 0) {
+ tcg_out_insn_RIL(s, oif_insns[i], dest, val >> i * 32);
+ return;
}
}
- /* Use the constant pool if USE_REG_TB, but not for small constants. */
if (maybe_out_small_movi(s, type, TCG_TMP0, val)) {
if (type == TCG_TYPE_I32) {
tcg_out_insn(s, RR, OR, dest, TCG_TMP0);
} else {
tcg_out_insn(s, RRE, OGR, dest, TCG_TMP0);
}
- } else if (USE_REG_TB) {
- tcg_out_insn(s, RXY, OG, dest, TCG_REG_TB, TCG_REG_NONE, 0);
- new_pool_label(s, val, R_390_20, s->code_ptr - 2,
- tcg_tbrel_diff(s, NULL));
} else {
/* Perform the OR via sequential modifications to the high and
low parts. Do this via recursion to handle 16-bit vs 32-bit
masks in each half. */
- tcg_debug_assert(HAVE_FACILITY(EXT_IMM));
tgen_ori(s, type, dest, val & 0x00000000ffffffffull);
tgen_ori(s, type, dest, val & 0xffffffff00000000ull);
}
static void tgen_xori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
{
/* Try all 48-bit insns that can perform it in one go. */
- if (HAVE_FACILITY(EXT_IMM)) {
- if ((val & 0xffffffff00000000ull) == 0) {
- tcg_out_insn(s, RIL, XILF, dest, val);
- return;
- }
- if ((val & 0x00000000ffffffffull) == 0) {
- tcg_out_insn(s, RIL, XIHF, dest, val >> 32);
- return;
- }
+ if ((val & 0xffffffff00000000ull) == 0) {
+ tcg_out_insn(s, RIL, XILF, dest, val);
+ return;
+ }
+ if ((val & 0x00000000ffffffffull) == 0) {
+ tcg_out_insn(s, RIL, XIHF, dest, val >> 32);
+ return;
}
- /* Use the constant pool if USE_REG_TB, but not for small constants. */
if (maybe_out_small_movi(s, type, TCG_TMP0, val)) {
if (type == TCG_TYPE_I32) {
tcg_out_insn(s, RR, XR, dest, TCG_TMP0);
} else {
tcg_out_insn(s, RRE, XGR, dest, TCG_TMP0);
}
- } else if (USE_REG_TB) {
- tcg_out_insn(s, RXY, XG, dest, TCG_REG_TB, TCG_REG_NONE, 0);
- new_pool_label(s, val, R_390_20, s->code_ptr - 2,
- tcg_tbrel_diff(s, NULL));
} else {
/* Perform the xor by parts. */
- tcg_debug_assert(HAVE_FACILITY(EXT_IMM));
if (val & 0xffffffff) {
tcg_out_insn(s, RIL, XILF, dest, val);
}
goto exit;
}
- if (HAVE_FACILITY(EXT_IMM)) {
- if (type == TCG_TYPE_I32) {
- op = (is_unsigned ? RIL_CLFI : RIL_CFI);
- tcg_out_insn_RIL(s, op, r1, c2);
- goto exit;
- } else if (c2 == (is_unsigned ? (TCGArg)(uint32_t)c2 : (TCGArg)(int32_t)c2)) {
- op = (is_unsigned ? RIL_CLGFI : RIL_CGFI);
- tcg_out_insn_RIL(s, op, r1, c2);
- goto exit;
- }
+ if (type == TCG_TYPE_I32) {
+ op = (is_unsigned ? RIL_CLFI : RIL_CFI);
+ tcg_out_insn_RIL(s, op, r1, c2);
+ goto exit;
+ }
+ if (c2 == (is_unsigned ? (TCGArg)(uint32_t)c2 : (TCGArg)(int32_t)c2)) {
+ op = (is_unsigned ? RIL_CLGFI : RIL_CGFI);
+ tcg_out_insn_RIL(s, op, r1, c2);
+ goto exit;
}
/* Use the constant pool, but not for small constants. */
if (maybe_out_small_movi(s, type, TCG_TMP0, c2)) {
c2 = TCG_TMP0;
/* fall through to reg-reg */
- } else if (USE_REG_TB) {
- if (type == TCG_TYPE_I32) {
- op = (is_unsigned ? RXY_CLY : RXY_CY);
- tcg_out_insn_RXY(s, op, r1, TCG_REG_TB, TCG_REG_NONE, 0);
- new_pool_label(s, (uint32_t)c2, R_390_20, s->code_ptr - 2,
- 4 - tcg_tbrel_diff(s, NULL));
- } else {
- op = (is_unsigned ? RXY_CLG : RXY_CG);
- tcg_out_insn_RXY(s, op, r1, TCG_REG_TB, TCG_REG_NONE, 0);
- new_pool_label(s, c2, R_390_20, s->code_ptr - 2,
- tcg_tbrel_diff(s, NULL));
- }
- goto exit;
} else {
- if (type == TCG_TYPE_I32) {
- op = (is_unsigned ? RIL_CLRL : RIL_CRL);
- tcg_out_insn_RIL(s, op, r1, 0);
- new_pool_label(s, (uint32_t)c2, R_390_PC32DBL,
- s->code_ptr - 2, 2 + 4);
- } else {
- op = (is_unsigned ? RIL_CLGRL : RIL_CGRL);
- tcg_out_insn_RIL(s, op, r1, 0);
- new_pool_label(s, c2, R_390_PC32DBL, s->code_ptr - 2, 2);
- }
+ op = (is_unsigned ? RIL_CLGRL : RIL_CGRL);
+ tcg_out_insn_RIL(s, op, r1, 0);
+ new_pool_label(s, c2, R_390_PC32DBL, s->code_ptr - 2, 2);
goto exit;
}
}
TCGReg dest, TCGReg c1, TCGArg c2, int c2const)
{
int cc;
- bool have_loc;
/* With LOC2, we can always emit the minimum 3 insns. */
if (HAVE_FACILITY(LOAD_ON_COND2)) {
return;
}
- have_loc = HAVE_FACILITY(LOAD_ON_COND);
-
- /* For HAVE_LOC, only the paths through GTU/GT/LEU/LE are smaller. */
restart:
switch (cond) {
case TCG_COND_NE:
case TCG_COND_LT:
case TCG_COND_GE:
/* Swap operands so that we can use LEU/GTU/GT/LE. */
- if (c2const) {
- if (have_loc) {
- break;
- }
- tcg_out_movi(s, type, TCG_TMP0, c2);
- c2 = c1;
- c2const = 0;
- c1 = TCG_TMP0;
- } else {
+ if (!c2const) {
TCGReg t = c1;
c1 = c2;
c2 = t;
+ cond = tcg_swap_cond(cond);
+ goto restart;
}
- cond = tcg_swap_cond(cond);
- goto restart;
+ break;
default:
g_assert_not_reached();
}
cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
- if (have_loc) {
- /* Emit: d = 0, t = 1, d = (cc ? t : d). */
- tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
- tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1);
- tcg_out_insn(s, RRF, LOCGR, dest, TCG_TMP0, cc);
- } else {
- /* Emit: d = 1; if (cc) goto over; d = 0; over: */
- tcg_out_movi(s, type, dest, 1);
- tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
- tcg_out_movi(s, type, dest, 0);
- }
+ /* Emit: d = 0, t = 1, d = (cc ? t : d). */
+ tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1);
+ tcg_out_insn(s, RRF, LOCGR, dest, TCG_TMP0, cc);
}
static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
TCGReg c1, TCGArg c2, int c2const,
TCGArg v3, int v3const)
{
- int cc;
- if (HAVE_FACILITY(LOAD_ON_COND)) {
- cc = tgen_cmp(s, type, c, c1, c2, c2const, false);
- if (v3const) {
- tcg_out_insn(s, RIE, LOCGHI, dest, v3, cc);
- } else {
- tcg_out_insn(s, RRF, LOCGR, dest, v3, cc);
- }
+ int cc = tgen_cmp(s, type, c, c1, c2, c2const, false);
+ if (v3const) {
+ tcg_out_insn(s, RIE, LOCGHI, dest, v3, cc);
} else {
- c = tcg_invert_cond(c);
- cc = tgen_cmp(s, type, c, c1, c2, c2const, false);
-
- /* Emit: if (cc) goto over; dest = r3; over: */
- tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
- tcg_out_insn(s, RRE, LGR, dest, v3);
+ tcg_out_insn(s, RRF, LOCGR, dest, v3, cc);
}
}
} else {
tcg_out_mov(s, TCG_TYPE_I64, dest, a2);
}
- if (HAVE_FACILITY(LOAD_ON_COND)) {
- /* Emit: if (one bit found) dest = r0. */
- tcg_out_insn(s, RRF, LOCGR, dest, TCG_REG_R0, 2);
- } else {
- /* Emit: if (no one bit found) goto over; dest = r0; over: */
- tcg_out_insn(s, RI, BRC, 8, (4 + 4) >> 1);
- tcg_out_insn(s, RRE, LGR, dest, TCG_REG_R0);
- }
+ /* Emit: if (one bit found) dest = r0. */
+ tcg_out_insn(s, RRF, LOCGR, dest, TCG_REG_R0, 2);
}
}
{
if (l->has_value) {
tgen_gotoi(s, cc, l->u.value_ptr);
- } else if (USE_LONG_BRANCHES) {
- tcg_out16(s, RIL_BRCL | (cc << 4));
- tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, l, 2);
- s->code_ptr += 2;
} else {
tcg_out16(s, RI_BRC | (cc << 4));
tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, l, 2);
TCGReg r1, TCGArg c2, int c2const, TCGLabel *l)
{
int cc;
+ bool is_unsigned = is_unsigned_cond(c);
+ bool in_range;
+ S390Opcode opc;
- if (HAVE_FACILITY(GEN_INST_EXT)) {
- bool is_unsigned = is_unsigned_cond(c);
- bool in_range;
- S390Opcode opc;
-
- cc = tcg_cond_to_s390_cond[c];
+ cc = tcg_cond_to_s390_cond[c];
- if (!c2const) {
- opc = (type == TCG_TYPE_I32
- ? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
- : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
- tgen_compare_branch(s, opc, cc, r1, c2, l);
- return;
- }
+ if (!c2const) {
+ opc = (type == TCG_TYPE_I32
+ ? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
+ : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
+ tgen_compare_branch(s, opc, cc, r1, c2, l);
+ return;
+ }
- /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
- If the immediate we've been given does not fit that range, we'll
- fall back to separate compare and branch instructions using the
- larger comparison range afforded by COMPARE IMMEDIATE. */
- if (type == TCG_TYPE_I32) {
- if (is_unsigned) {
- opc = RIE_CLIJ;
- in_range = (uint32_t)c2 == (uint8_t)c2;
- } else {
- opc = RIE_CIJ;
- in_range = (int32_t)c2 == (int8_t)c2;
- }
+ /*
+ * COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
+ * If the immediate we've been given does not fit that range, we'll
+ * fall back to separate compare and branch instructions using the
+ * larger comparison range afforded by COMPARE IMMEDIATE.
+ */
+ if (type == TCG_TYPE_I32) {
+ if (is_unsigned) {
+ opc = RIE_CLIJ;
+ in_range = (uint32_t)c2 == (uint8_t)c2;
} else {
- if (is_unsigned) {
- opc = RIE_CLGIJ;
- in_range = (uint64_t)c2 == (uint8_t)c2;
- } else {
- opc = RIE_CGIJ;
- in_range = (int64_t)c2 == (int8_t)c2;
- }
+ opc = RIE_CIJ;
+ in_range = (int32_t)c2 == (int8_t)c2;
}
- if (in_range) {
- tgen_compare_imm_branch(s, opc, cc, r1, c2, l);
- return;
+ } else {
+ if (is_unsigned) {
+ opc = RIE_CLGIJ;
+ in_range = (uint64_t)c2 == (uint8_t)c2;
+ } else {
+ opc = RIE_CGIJ;
+ in_range = (int64_t)c2 == (int8_t)c2;
}
}
+ if (in_range) {
+ tgen_compare_imm_branch(s, opc, cc, r1, c2, l);
+ return;
+ }
cc = tgen_cmp(s, type, c, r1, c2, c2const, false);
tgen_branch(s, cc, l);
}
-static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest)
+static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *dest)
{
ptrdiff_t off = tcg_pcrel_diff(s, dest) >> 1;
if (off == (int32_t)off) {
}
}
+static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
+ const TCGHelperInfo *info)
+{
+ tcg_out_call_int(s, dest);
+}
+
static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data,
TCGReg base, TCGReg index, int disp)
{
cross pages using the address of the last byte of the access. */
a_off = (a_bits >= s_bits ? 0 : s_mask - a_mask);
tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask;
- if (HAVE_FACILITY(GEN_INST_EXT) && a_off == 0) {
+ if (a_off == 0) {
tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask);
} else {
tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
}
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, oi);
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R5, (uintptr_t)lb->raddr);
- tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]);
+ tcg_out_call_int(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]);
tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
}
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, oi);
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R6, (uintptr_t)lb->raddr);
- tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
+ tcg_out_call_int(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
return true;
case INDEX_op_goto_tb:
a0 = args[0];
- if (s->tb_jmp_insn_offset) {
- /*
- * branch displacement must be aligned for atomic patching;
- * see if we need to add extra nop before branch
- */
- if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
- tcg_out16(s, NOP);
- }
- tcg_debug_assert(!USE_REG_TB);
- tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
- s->code_ptr += 2;
- } else {
- /* load address stored at s->tb_jmp_target_addr + a0 */
- tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_REG_TB,
- tcg_splitwx_to_rx(s->tb_jmp_target_addr + a0));
- /* and go there */
- tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_TB);
- }
+ /*
+ * branch displacement must be aligned for atomic patching;
+ * see if we need to add extra nop before branch
+ */
+ if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
+ tcg_out16(s, NOP);
+ }
+ tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
+ s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
+ s->code_ptr += 2;
set_jmp_reset_offset(s, a0);
-
- /* For the unlinked path of goto_tb, we need to reset
- TCG_REG_TB to the beginning of this TB. */
- if (USE_REG_TB) {
- int ofs = -tcg_current_code_size(s);
- /* All TB are restricted to 64KiB by unwind info. */
- tcg_debug_assert(ofs == sextract64(ofs, 0, 20));
- tcg_out_insn(s, RXY, LAY, TCG_REG_TB,
- TCG_REG_TB, TCG_REG_NONE, ofs);
- }
break;
case INDEX_op_goto_ptr:
a0 = args[0];
- if (USE_REG_TB) {
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, a0);
- }
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0);
break;
tcg_out_insn(s, RI, AHI, a0, a2);
break;
}
- if (HAVE_FACILITY(EXT_IMM)) {
- tcg_out_insn(s, RIL, AFI, a0, a2);
- break;
- }
+ tcg_out_insn(s, RIL, AFI, a0, a2);
+ break;
}
tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
} else if (a0 == a1) {
break;
case INDEX_op_div2_i32:
- tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]);
+ tcg_debug_assert(args[0] == args[2]);
+ tcg_debug_assert(args[1] == args[3]);
+ tcg_debug_assert((args[1] & 1) == 0);
+ tcg_debug_assert(args[0] == args[1] + 1);
+ tcg_out_insn(s, RR, DR, args[1], args[4]);
break;
case INDEX_op_divu2_i32:
- tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]);
+ tcg_debug_assert(args[0] == args[2]);
+ tcg_debug_assert(args[1] == args[3]);
+ tcg_debug_assert((args[1] & 1) == 0);
+ tcg_debug_assert(args[0] == args[1] + 1);
+ tcg_out_insn(s, RRE, DLR, args[1], args[4]);
break;
case INDEX_op_shl_i32:
tcg_out_insn(s, RI, AGHI, a0, a2);
break;
}
- if (HAVE_FACILITY(EXT_IMM)) {
- if (a2 == (int32_t)a2) {
- tcg_out_insn(s, RIL, AGFI, a0, a2);
- break;
- } else if (a2 == (uint32_t)a2) {
- tcg_out_insn(s, RIL, ALGFI, a0, a2);
- break;
- } else if (-a2 == (uint32_t)-a2) {
- tcg_out_insn(s, RIL, SLGFI, a0, -a2);
- break;
- }
+ if (a2 == (int32_t)a2) {
+ tcg_out_insn(s, RIL, AGFI, a0, a2);
+ break;
+ }
+ if (a2 == (uint32_t)a2) {
+ tcg_out_insn(s, RIL, ALGFI, a0, a2);
+ break;
+ }
+ if (-a2 == (uint32_t)-a2) {
+ tcg_out_insn(s, RIL, SLGFI, a0, -a2);
+ break;
}
}
tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
if (const_args[2]) {
a2 = -a2;
goto do_addi_64;
- } else if (a0 == a1) {
- tcg_out_insn(s, RRE, SGR, a0, a2);
} else {
tcg_out_insn(s, RRF, SGRK, a0, a1, a2);
}
if (const_args[2]) {
tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
- } else if (a0 == a1) {
- tcg_out_insn(s, RRE, NGR, args[0], args[2]);
} else {
tcg_out_insn(s, RRF, NGRK, a0, a1, a2);
}
if (const_args[2]) {
tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
tgen_ori(s, TCG_TYPE_I64, a0, a2);
- } else if (a0 == a1) {
- tcg_out_insn(s, RRE, OGR, a0, a2);
} else {
tcg_out_insn(s, RRF, OGRK, a0, a1, a2);
}
if (const_args[2]) {
tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
tgen_xori(s, TCG_TYPE_I64, a0, a2);
- } else if (a0 == a1) {
- tcg_out_insn(s, RRE, XGR, a0, a2);
} else {
tcg_out_insn(s, RRF, XGRK, a0, a1, a2);
}
break;
case INDEX_op_div2_i64:
- /* ??? We get an unnecessary sign-extension of the dividend
- into R3 with this definition, but as we do in fact always
- produce both quotient and remainder using INDEX_op_div_i64
- instead requires jumping through even more hoops. */
- tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]);
+ /*
+ * ??? We get an unnecessary sign-extension of the dividend
+ * into op0 with this definition, but as we do in fact always
+ * produce both quotient and remainder using INDEX_op_div_i64
+ * instead requires jumping through even more hoops.
+ */
+ tcg_debug_assert(args[0] == args[2]);
+ tcg_debug_assert(args[1] == args[3]);
+ tcg_debug_assert((args[1] & 1) == 0);
+ tcg_debug_assert(args[0] == args[1] + 1);
+ tcg_out_insn(s, RRE, DSGR, args[1], args[4]);
break;
case INDEX_op_divu2_i64:
- tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]);
+ tcg_debug_assert(args[0] == args[2]);
+ tcg_debug_assert(args[1] == args[3]);
+ tcg_debug_assert((args[1] & 1) == 0);
+ tcg_debug_assert(args[0] == args[1] + 1);
+ tcg_out_insn(s, RRE, DLGR, args[1], args[4]);
break;
case INDEX_op_mulu2_i64:
- tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]);
+ tcg_debug_assert(args[0] == args[2]);
+ tcg_debug_assert((args[1] & 1) == 0);
+ tcg_debug_assert(args[0] == args[1] + 1);
+ tcg_out_insn(s, RRE, MLGR, args[1], args[3]);
break;
case INDEX_op_shl_i64:
/* The host memory model is quite strong, we simply need to
serialize the instruction stream. */
if (args[0] & TCG_MO_ST_LD) {
- tcg_out_insn(s, RR, BCR, HAVE_FACILITY(FAST_BCR_SER) ? 14 : 15, 0);
+ /* fast-bcr-serialization facility (45) is present */
+ tcg_out_insn(s, RR, BCR, 14, 0);
}
break;
if (vece == MO_64) {
return true;
}
+ src = dst;
}
/*
msb = clz32(val);
lsb = 31 - ctz32(val);
}
- tcg_out_insn(s, VRIb, VGM, dst, lsb, msb, MO_32);
+ tcg_out_insn(s, VRIb, VGM, dst, msb, lsb, MO_32);
return;
}
} else {
msb = clz64(val);
lsb = 63 - ctz64(val);
}
- tcg_out_insn(s, VRIb, VGM, dst, lsb, msb, MO_64);
+ tcg_out_insn(s, VRIb, VGM, dst, msb, lsb, MO_64);
return;
}
}
break;
case INDEX_op_bitsel_vec:
- tcg_out_insn(s, VRRe, VSEL, a0, a1, a2, args[3]);
+ tcg_out_insn(s, VRRe, VSEL, a0, a2, args[3], a1);
break;
case INDEX_op_cmp_vec:
case INDEX_op_or_i64:
case INDEX_op_xor_i32:
case INDEX_op_xor_i64:
- return (HAVE_FACILITY(DISTINCT_OPS)
- ? C_O1_I2(r, r, ri)
- : C_O1_I2(r, 0, ri));
+ return C_O1_I2(r, r, ri);
case INDEX_op_mul_i32:
- /* If we have the general-instruction-extensions, then we have
- MULTIPLY SINGLE IMMEDIATE with a signed 32-bit, otherwise we
- have only MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
- return (HAVE_FACILITY(GEN_INST_EXT)
- ? C_O1_I2(r, 0, ri)
- : C_O1_I2(r, 0, rI));
-
+ return C_O1_I2(r, 0, ri);
case INDEX_op_mul_i64:
- return (HAVE_FACILITY(GEN_INST_EXT)
- ? C_O1_I2(r, 0, rJ)
- : C_O1_I2(r, 0, rI));
+ return C_O1_I2(r, 0, rJ);
case INDEX_op_shl_i32:
case INDEX_op_shr_i32:
case INDEX_op_sar_i32:
- return (HAVE_FACILITY(DISTINCT_OPS)
- ? C_O1_I2(r, r, ri)
- : C_O1_I2(r, 0, ri));
+ return C_O1_I2(r, r, ri);
case INDEX_op_brcond_i32:
case INDEX_op_brcond_i64:
case INDEX_op_div2_i64:
case INDEX_op_divu2_i32:
case INDEX_op_divu2_i64:
- return C_O2_I3(b, a, 0, 1, r);
+ return C_O2_I3(o, m, 0, 1, r);
case INDEX_op_mulu2_i64:
- return C_O2_I2(b, a, 0, r);
+ return C_O2_I2(o, m, 0, r);
case INDEX_op_add2_i32:
case INDEX_op_sub2_i32:
- return (HAVE_FACILITY(EXT_IMM)
- ? C_O2_I4(r, r, 0, 1, ri, r)
- : C_O2_I4(r, r, 0, 1, r, r));
+ return C_O2_I4(r, r, 0, 1, ri, r);
case INDEX_op_add2_i64:
case INDEX_op_sub2_i64:
- return (HAVE_FACILITY(EXT_IMM)
- ? C_O2_I4(r, r, 0, 1, rA, r)
- : C_O2_I4(r, r, 0, 1, r, r));
+ return C_O2_I4(r, r, 0, 1, rA, r);
case INDEX_op_st_vec:
return C_O0_I2(v, r);
static void query_s390_facilities(void)
{
unsigned long hwcap = qemu_getauxval(AT_HWCAP);
+ const char *which;
/* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
is present on all 64-bit systems, but let's check for it anyway. */
if (!(hwcap & HWCAP_S390_VXRS)) {
s390_facilities[2] = 0;
}
+
+ /*
+ * Minimum supported cpu revision is z196.
+ * Check for all required facilities.
+ * ZARCH_ACTIVE is done via preprocessor check for 64-bit.
+ */
+ if (!HAVE_FACILITY(LONG_DISP)) {
+ which = "long-displacement";
+ goto fail;
+ }
+ if (!HAVE_FACILITY(EXT_IMM)) {
+ which = "extended-immediate";
+ goto fail;
+ }
+ if (!HAVE_FACILITY(GEN_INST_EXT)) {
+ which = "general-instructions-extension";
+ goto fail;
+ }
+ /*
+ * Facility 45 is a big bin that contains: distinct-operands,
+ * fast-BCR-serialization, high-word, population-count,
+ * interlocked-access-1, and load/store-on-condition-1
+ */
+ if (!HAVE_FACILITY(45)) {
+ which = "45";
+ goto fail;
+ }
+ return;
+
+ fail:
+ error_report("%s: missing required facility %s", __func__, which);
+ exit(EXIT_FAILURE);
}
static void tcg_target_init(TCGContext *s)
/* XXX many insns can't be used with R0, so we better avoid it for now */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
- if (USE_REG_TB) {
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
- }
}
#define FRAME_SIZE ((int)(TCG_TARGET_CALL_STACK_OFFSET \
#ifndef CONFIG_SOFTMMU
if (guest_base >= 0x80000) {
- tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
}
#endif
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
- if (USE_REG_TB) {
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB,
- tcg_target_call_iarg_regs[1]);
- }
/* br %r3 (go to TB) */
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);