static uint8_t *tb_ret_addr;
-#ifdef _CALL_DARWIN
+#if defined _CALL_DARWIN || defined __APPLE__
+#define TCG_TARGET_CALL_DARWIN
+#endif
+
+#ifdef TCG_TARGET_CALL_DARWIN
#define LINKAGE_AREA_SIZE 24
#define LR_OFFSET 8
#elif defined _CALL_AIX
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
"r0",
"r1",
- "rp",
+ "r2",
"r3",
"r4",
"r5",
TCG_REG_R29,
TCG_REG_R30,
TCG_REG_R31,
-#ifdef _CALL_DARWIN
+#ifdef TCG_TARGET_CALL_DARWIN
TCG_REG_R2,
#endif
TCG_REG_R3,
TCG_REG_R8,
TCG_REG_R9,
TCG_REG_R10,
-#ifndef _CALL_DARWIN
+#ifndef TCG_TARGET_CALL_DARWIN
TCG_REG_R11,
#endif
TCG_REG_R12,
};
static const int tcg_target_callee_save_regs[] = {
-#ifdef _CALL_DARWIN
+#ifdef TCG_TARGET_CALL_DARWIN
TCG_REG_R11,
TCG_REG_R13,
#endif
TCG_REG_R24,
TCG_REG_R25,
TCG_REG_R26,
- /* TCG_REG_R27, */ /* currently used for the global env, so no
- need to save */
+ TCG_REG_R27, /* currently used for the global env */
TCG_REG_R28,
TCG_REG_R29,
TCG_REG_R30,
tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
+#if TARGET_LONG_BITS == 64
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6);
+#ifdef TCG_TARGET_CALL_ALIGN_ARGS
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R7);
+#endif
+#endif
break;
case 'K': /* qemu_st[8..32] constraint */
ct->ct |= TCG_CT_REG;
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
-#if TARGET_LONG_BITS == 64
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6);
+#if TARGET_LONG_BITS == 64
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R7);
+#ifdef TCG_TARGET_CALL_ALIGN_ARGS
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R8);
+#endif
#endif
break;
case 'M': /* qemu_st64 constraint */
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R7);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R8);
+#ifdef TCG_TARGET_CALL_ALIGN_ARGS
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R9);
+#endif
break;
#else
case 'L':
[TCG_COND_GTU] = BC | BI (7, CR_GT) | BO_COND_TRUE,
};
-static void tcg_out_mov(TCGContext *s, int ret, int arg)
+static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
{
tcg_out32 (s, OR | SAB (arg, ret, arg));
}
static void tcg_out_movi(TCGContext *s, TCGType type,
- int ret, tcg_target_long arg)
+ TCGReg ret, tcg_target_long arg)
{
if (arg == (int16_t) arg)
tcg_out32 (s, ADDI | RT (ret) | RA (0) | (arg & 0xffff));
#include "../../softmmu_defs.h"
-static void *qemu_ld_helpers[4] = {
- __ldb_mmu,
- __ldw_mmu,
- __ldl_mmu,
- __ldq_mmu,
+/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
+ int mmu_idx) */
+static const void * const qemu_ld_helpers[4] = {
+ helper_ldb_mmu,
+ helper_ldw_mmu,
+ helper_ldl_mmu,
+ helper_ldq_mmu,
};
-static void *qemu_st_helpers[4] = {
- __stb_mmu,
- __stw_mmu,
- __stl_mmu,
- __stq_mmu,
+/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
+ uintxx_t val, int mmu_idx) */
+static const void * const qemu_st_helpers[4] = {
+ helper_stb_mmu,
+ helper_stw_mmu,
+ helper_stl_mmu,
+ helper_stq_mmu,
};
#endif
static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc)
{
- int addr_reg, data_reg, data_reg2, r0, r1, rbase, mem_index, s_bits, bswap;
+ int addr_reg, data_reg, data_reg2, r0, r1, rbase, bswap;
#ifdef CONFIG_SOFTMMU
- int r2;
+ int mem_index, s_bits, r2, ir;
void *label1_ptr, *label2_ptr;
-#endif
#if TARGET_LONG_BITS == 64
int addr_reg2;
+#endif
#endif
data_reg = *args++;
else
data_reg2 = 0;
addr_reg = *args++;
+
+#ifdef CONFIG_SOFTMMU
#if TARGET_LONG_BITS == 64
addr_reg2 = *args++;
#endif
mem_index = *args;
s_bits = opc & 3;
-
-#ifdef CONFIG_SOFTMMU
r0 = 3;
r1 = 4;
r2 = 0;
tcg_out32 (s, (LWZU
| RT (r1)
| RA (r0)
- | offsetof (CPUState, tlb_table[mem_index][0].addr_read)
+ | offsetof (CPUArchState, tlb_table[mem_index][0].addr_read)
)
);
tcg_out32 (s, (RLWINM
#endif
/* slow path */
+ ir = 3;
+ tcg_out_mov (s, TCG_TYPE_I32, ir++, TCG_AREG0);
#if TARGET_LONG_BITS == 32
- tcg_out_mov (s, 3, addr_reg);
- tcg_out_movi (s, TCG_TYPE_I32, 4, mem_index);
+ tcg_out_mov (s, TCG_TYPE_I32, ir++, addr_reg);
#else
- tcg_out_mov (s, 3, addr_reg2);
- tcg_out_mov (s, 4, addr_reg);
- tcg_out_movi (s, TCG_TYPE_I32, 5, mem_index);
+#ifdef TCG_TARGET_CALL_ALIGN_ARGS
+ ir |= 1;
+#endif
+ tcg_out_mov (s, TCG_TYPE_I32, ir++, addr_reg2);
+ tcg_out_mov (s, TCG_TYPE_I32, ir++, addr_reg);
#endif
+ tcg_out_movi (s, TCG_TYPE_I32, ir, mem_index);
tcg_out_call (s, (tcg_target_long) qemu_ld_helpers[s_bits], 1);
switch (opc) {
case 1:
case 2:
if (data_reg != 3)
- tcg_out_mov (s, data_reg, 3);
+ tcg_out_mov (s, TCG_TYPE_I32, data_reg, 3);
break;
case 3:
if (data_reg == 3) {
if (data_reg2 == 4) {
- tcg_out_mov (s, 0, 4);
- tcg_out_mov (s, 4, 3);
- tcg_out_mov (s, 3, 0);
+ tcg_out_mov (s, TCG_TYPE_I32, 0, 4);
+ tcg_out_mov (s, TCG_TYPE_I32, 4, 3);
+ tcg_out_mov (s, TCG_TYPE_I32, 3, 0);
}
else {
- tcg_out_mov (s, data_reg2, 3);
- tcg_out_mov (s, 3, 4);
+ tcg_out_mov (s, TCG_TYPE_I32, data_reg2, 3);
+ tcg_out_mov (s, TCG_TYPE_I32, 3, 4);
}
}
else {
- if (data_reg != 4) tcg_out_mov (s, data_reg, 4);
- if (data_reg2 != 3) tcg_out_mov (s, data_reg2, 3);
+ if (data_reg != 4) tcg_out_mov (s, TCG_TYPE_I32, data_reg, 4);
+ if (data_reg2 != 3) tcg_out_mov (s, TCG_TYPE_I32, data_reg2, 3);
}
break;
}
if (r0 == data_reg2) {
tcg_out32 (s, LWZ | RT (0) | RA (r0));
tcg_out32 (s, LWZ | RT (data_reg) | RA (r0) | 4);
- tcg_out_mov (s, data_reg2, 0);
+ tcg_out_mov (s, TCG_TYPE_I32, data_reg2, 0);
}
else {
tcg_out32 (s, LWZ | RT (data_reg2) | RA (r0));
static void tcg_out_qemu_st (TCGContext *s, const TCGArg *args, int opc)
{
- int addr_reg, r0, r1, data_reg, data_reg2, mem_index, bswap, rbase;
+ int addr_reg, r0, r1, data_reg, data_reg2, bswap, rbase;
#ifdef CONFIG_SOFTMMU
- int r2, ir;
+ int mem_index, r2, ir;
void *label1_ptr, *label2_ptr;
-#endif
#if TARGET_LONG_BITS == 64
int addr_reg2;
+#endif
#endif
data_reg = *args++;
else
data_reg2 = 0;
addr_reg = *args++;
+
+#ifdef CONFIG_SOFTMMU
#if TARGET_LONG_BITS == 64
addr_reg2 = *args++;
#endif
mem_index = *args;
-
-#ifdef CONFIG_SOFTMMU
r0 = 3;
r1 = 4;
r2 = 0;
tcg_out32 (s, (LWZU
| RT (r1)
| RA (r0)
- | offsetof (CPUState, tlb_table[mem_index][0].addr_write)
+ | offsetof (CPUArchState, tlb_table[mem_index][0].addr_write)
)
);
tcg_out32 (s, (RLWINM
#endif
/* slow path */
+ ir = 3;
+ tcg_out_mov (s, TCG_TYPE_I32, ir++, TCG_AREG0);
#if TARGET_LONG_BITS == 32
- tcg_out_mov (s, 3, addr_reg);
- ir = 4;
+ tcg_out_mov (s, TCG_TYPE_I32, ir++, addr_reg);
#else
- tcg_out_mov (s, 3, addr_reg2);
- tcg_out_mov (s, 4, addr_reg);
#ifdef TCG_TARGET_CALL_ALIGN_ARGS
- ir = 5;
-#else
- ir = 4;
+ ir |= 1;
#endif
+ tcg_out_mov (s, TCG_TYPE_I32, ir++, addr_reg2);
+ tcg_out_mov (s, TCG_TYPE_I32, ir++, addr_reg);
#endif
switch (opc) {
| ME (31)));
break;
case 2:
- tcg_out_mov (s, ir, data_reg);
+ tcg_out_mov (s, TCG_TYPE_I32, ir, data_reg);
break;
case 3:
#ifdef TCG_TARGET_CALL_ALIGN_ARGS
- ir = 5;
+ ir |= 1;
#endif
- tcg_out_mov (s, ir++, data_reg2);
- tcg_out_mov (s, ir, data_reg);
+ tcg_out_mov (s, TCG_TYPE_I32, ir++, data_reg2);
+ tcg_out_mov (s, TCG_TYPE_I32, ir, data_reg);
break;
}
ir++;
#endif
}
-void tcg_target_qemu_prologue (TCGContext *s)
+static void tcg_target_qemu_prologue (TCGContext *s)
{
int i, frame_size;
+ LINKAGE_AREA_SIZE
+ TCG_STATIC_CALL_ARGS_SIZE
+ ARRAY_SIZE (tcg_target_callee_save_regs) * 4
+ + CPU_TEMP_BUF_NLONGS * sizeof(long)
;
frame_size = (frame_size + 15) & ~15;
+ tcg_set_frame(s, TCG_REG_CALL_STACK, frame_size
+ - CPU_TEMP_BUF_NLONGS * sizeof(long),
+ CPU_TEMP_BUF_NLONGS * sizeof(long));
+
#ifdef _CALL_AIX
{
uint32_t addr;
tcg_out32 (s, STW | RS (0) | RA (1) | (frame_size + LR_OFFSET));
#ifdef CONFIG_USE_GUEST_BASE
- tcg_out_movi (s, TCG_TYPE_I32, TCG_GUEST_BASE_REG, GUEST_BASE);
+ if (GUEST_BASE) {
+ tcg_out_movi (s, TCG_TYPE_I32, TCG_GUEST_BASE_REG, GUEST_BASE);
+ tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
+ }
#endif
- tcg_out32 (s, MTSPR | RS (3) | CTR);
+ tcg_out_mov (s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
+ tcg_out32 (s, MTSPR | RS (tcg_target_call_iarg_regs[1]) | CTR);
tcg_out32 (s, BCCTR | BO_ALWAYS);
tb_ret_addr = s->code_ptr;
tcg_out32 (s, BCLR | BO_ALWAYS);
}
-static void tcg_out_ld (TCGContext *s, TCGType type, int ret, int arg1,
+static void tcg_out_ld (TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
tcg_target_long arg2)
{
tcg_out_ldst (s, ret, arg1, arg2, LWZ, LWZX);
}
-static void tcg_out_st (TCGContext *s, TCGType type, int arg, int arg1,
+static void tcg_out_st (TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
tcg_target_long arg2)
{
tcg_out_ldst (s, arg, arg1, arg2, STW, STWX);
}
}
-static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
-{
- ppc_addi (s, reg, reg, val);
-}
-
static void tcg_out_cmp (TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
int const_arg2, int cr)
{
case TCG_COND_GEU:
op = (b->bit1 != b->bit2) ? CRANDC : CRAND;
tcg_out_cmp (s, b->cond2, args[1], args[3], const_args[3], 5);
- tcg_out_cmp (s, TCG_COND_EQ, args[1], args[3], const_args[3], 6);
- tcg_out_cmp (s, cond, args[0], args[2], const_args[2], 7);
- tcg_out32 (s, op | BT (7, CR_EQ) | BA (6, CR_EQ) | BB (7, b->bit2));
+ tcg_out_cmp (s, tcg_unsigned_cond (cond), args[0], args[2],
+ const_args[2], 7);
+ tcg_out32 (s, op | BT (7, CR_EQ) | BA (5, CR_EQ) | BB (7, b->bit2));
tcg_out32 (s, CROR | BT (7, CR_EQ) | BA (5, b->bit1) | BB (7, CR_EQ));
break;
default:
if (args[0] == args[2] || args[0] == args[3]) {
tcg_out32 (s, MULLW | TAB (0, args[2], args[3]));
tcg_out32 (s, MULHWU | TAB (args[1], args[2], args[3]));
- tcg_out_mov (s, args[0], 0);
+ tcg_out_mov (s, TCG_TYPE_I32, args[0], 0);
}
else {
tcg_out32 (s, MULLW | TAB (args[0], args[2], args[3]));
case INDEX_op_rotr_i32:
if (const_args[2]) {
if (!args[2]) {
- tcg_out_mov (s, args[0], args[1]);
+ tcg_out_mov (s, TCG_TYPE_I32, args[0], args[1]);
}
else {
tcg_out32 (s, RLWINM
if (args[0] == args[3] || args[0] == args[5]) {
tcg_out32 (s, ADDC | TAB (0, args[2], args[4]));
tcg_out32 (s, ADDE | TAB (args[1], args[3], args[5]));
- tcg_out_mov (s, args[0], 0);
+ tcg_out_mov (s, TCG_TYPE_I32, args[0], 0);
}
else {
tcg_out32 (s, ADDC | TAB (args[0], args[2], args[4]));
if (args[0] == args[3] || args[0] == args[5]) {
tcg_out32 (s, SUBFC | TAB (0, args[4], args[2]));
tcg_out32 (s, SUBFE | TAB (args[1], args[5], args[3]));
- tcg_out_mov (s, args[0], 0);
+ tcg_out_mov (s, TCG_TYPE_I32, args[0], 0);
}
else {
tcg_out32 (s, SUBFC | TAB (args[0], args[4], args[2]));
);
if (!a0) {
- tcg_out_mov (s, args[0], a0);
+ tcg_out_mov (s, TCG_TYPE_I32, args[0], a0);
}
}
break;
+ case INDEX_op_deposit_i32:
+ tcg_out32 (s, RLWIMI
+ | RA (args[0])
+ | RS (args[2])
+ | SH (args[3])
+ | MB (32 - args[3] - args[4])
+ | ME (31 - args[3])
+ );
+ break;
+
default:
- tcg_dump_ops (s, stderr);
+ tcg_dump_ops (s);
tcg_abort ();
}
}
{ INDEX_op_ext16s_i32, { "r", "r" } },
{ INDEX_op_ext16u_i32, { "r", "r" } },
+ { INDEX_op_deposit_i32, { "r", "0", "r" } },
+
{ -1 },
};
-void tcg_target_init(TCGContext *s)
+static void tcg_target_init(TCGContext *s)
{
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
tcg_regset_set32(tcg_target_call_clobber_regs, 0,
(1 << TCG_REG_R0) |
-#ifdef _CALL_DARWIN
+#ifdef TCG_TARGET_CALL_DARWIN
(1 << TCG_REG_R2) |
#endif
(1 << TCG_REG_R3) |
tcg_regset_clear(s->reserved_regs);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1);
-#ifndef _CALL_DARWIN
+#ifndef TCG_TARGET_CALL_DARWIN
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2);
#endif
#ifdef _CALL_SYSV
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13);
#endif
-#ifdef CONFIG_USE_GUEST_BASE
- tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
-#endif
tcg_add_target_add_op_defs(ppc_op_defs);
}