#else
/* To assert at compile-time that these values are never used
for TCG_TARGET_REG_BITS == 64. */
-/* extern */ int link_error(void);
+int link_error(void);
# define LO_OFF link_error()
# define HI_OFF link_error()
#endif
*pc = deposit32(*pc, 0, 26, reloc_26_val(pc, target));
}
-static void patch_reloc(tcg_insn_unit *code_ptr, int type,
+static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
intptr_t value, intptr_t addend)
{
tcg_debug_assert(type == R_MIPS_PC16);
tcg_debug_assert(addend == 0);
reloc_pc16(code_ptr, (tcg_insn_unit *)value);
+ return true;
}
#define TCG_CT_CONST_ZERO 0x100
switch(*ct_str++) {
case 'r':
ct->ct |= TCG_CT_REG;
- tcg_regset_set(ct->u.regs, 0xffffffff);
+ ct->u.regs = 0xffffffff;
break;
case 'L': /* qemu_ld input arg constraint */
ct->ct |= TCG_CT_REG;
- tcg_regset_set(ct->u.regs, 0xffffffff);
+ ct->u.regs = 0xffffffff;
tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0);
#if defined(CONFIG_SOFTMMU)
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
break;
case 'S': /* qemu_st constraint */
ct->ct |= TCG_CT_REG;
- tcg_regset_set(ct->u.regs, 0xffffffff);
+ ct->u.regs = 0xffffffff;
tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0);
#if defined(CONFIG_SOFTMMU)
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
static inline void tcg_out_opc_br(TCGContext *s, MIPSInsn opc,
TCGReg rt, TCGReg rs)
{
- /* We pay attention here to not modify the branch target by reading
- the existing value and using it again. This ensure that caches and
- memory are kept coherent during retranslation. */
- uint16_t offset = (uint16_t)*s->code_ptr;
-
- tcg_out_opc_imm(s, opc, rt, rs, offset);
+ tcg_out_opc_imm(s, opc, rt, rs, 0);
}
/*
tcg_out_opc_sa64(s, OPC_DSRA, OPC_DSRA32, rd, rt, sa);
}
-static inline void tcg_out_mov(TCGContext *s, TCGType type,
+static inline bool tcg_out_mov(TCGContext *s, TCGType type,
TCGReg ret, TCGReg arg)
{
/* Simple reg-reg move, optimising out the 'do nothing' case */
if (ret != arg) {
tcg_out_opc_reg(s, OPC_OR, ret, arg, TCG_REG_ZERO);
}
+ return true;
}
static void tcg_out_movi(TCGContext *s, TCGType type,
tcg_out_opc_imm(s, OPC_ADDIU, rl, al, bl);
tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, rl, bl);
} else if (rl == al && rl == bl) {
- tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, al, 31);
+ tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, al, TCG_TARGET_REG_BITS - 1);
tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
} else {
tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
return i;
}
-/* Perform the tlb comparison operation. The complete host address is
- placed in BASE. Clobbers TMP0, TMP1, TMP2, A0. */
+/* We expect to use a 16-bit negative offset from ENV. */
+QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
+QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768);
+
+/*
+ * Perform the tlb comparison operation.
+ * The complete host address is placed in BASE.
+ * Clobbers TMP0, TMP1, TMP2, TMP3.
+ */
static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
TCGReg addrh, TCGMemOpIdx oi,
tcg_insn_unit *label_ptr[2], bool is_load)
TCGMemOp opc = get_memop(oi);
unsigned s_bits = opc & MO_SIZE;
unsigned a_bits = get_alignment_bits(opc);
- target_ulong mask;
int mem_index = get_mmuidx(oi);
- int cmp_off
- = (is_load
- ? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)
- : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write));
- int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
+ int fast_off = TLB_MASK_TABLE_OFS(mem_index);
+ int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
+ int table_off = fast_off + offsetof(CPUTLBDescFast, table);
+ int add_off = offsetof(CPUTLBEntry, addend);
+ int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read)
+ : offsetof(CPUTLBEntry, addr_write));
+ target_ulong mask;
+
+ /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_AREG0, mask_off);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP1, TCG_AREG0, table_off);
- tcg_out_opc_sa(s, ALIAS_TSRL, TCG_REG_A0, addrl,
+ /* Extract the TLB index from the address into TMP3. */
+ tcg_out_opc_sa(s, ALIAS_TSRL, TCG_TMP3, addrl,
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
- tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_A0, TCG_REG_A0,
- (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
- tcg_out_opc_reg(s, ALIAS_PADD, TCG_REG_A0, TCG_REG_A0, TCG_AREG0);
-
- /* Compensate for very large offsets. */
- if (add_off >= 0x8000) {
- /* Most target env are smaller than 32k; none are larger than 64k.
- Simplify the logic here merely to offset by 0x7ff0, giving us a
- range just shy of 64k. Check this assumption. */
- QEMU_BUILD_BUG_ON(offsetof(CPUArchState,
- tlb_table[NB_MMU_MODES - 1][1])
- > 0x7ff0 + 0x7fff);
- tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_A0, TCG_REG_A0, 0x7ff0);
- cmp_off -= 0x7ff0;
- add_off -= 0x7ff0;
- }
+ tcg_out_opc_reg(s, OPC_AND, TCG_TMP3, TCG_TMP3, TCG_TMP0);
+
+ /* Add the tlb_table pointer, creating the CPUTLBEntry address in TMP3. */
+ tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP3, TCG_TMP3, TCG_TMP1);
/* We don't currently support unaligned accesses.
We could do so with mips32r6. */
a_bits = s_bits;
}
+ /* Mask the page bits, keeping the alignment bits to compare against. */
mask = (target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
- /* Load the (low half) tlb comparator. Mask the page bits, keeping the
- alignment bits to compare against. */
+ /* Load the (low-half) tlb comparator. */
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
- tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_REG_A0, cmp_off + LO_OFF);
+ tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3, cmp_off + LO_OFF);
tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1, mask);
} else {
- tcg_out_ldst(s,
- (TARGET_LONG_BITS == 64 ? OPC_LD
- : TCG_TARGET_REG_BITS == 64 ? OPC_LWU : OPC_LW),
- TCG_TMP0, TCG_REG_A0, cmp_off);
+ tcg_out_ldst(s, (TARGET_LONG_BITS == 64 ? OPC_LD
+ : TCG_TARGET_REG_BITS == 64 ? OPC_LWU : OPC_LW),
+ TCG_TMP0, TCG_TMP3, cmp_off);
tcg_out_movi(s, TCG_TYPE_TL, TCG_TMP1, mask);
/* No second compare is required here;
load the tlb addend for the fast path. */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_REG_A0, add_off);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off);
}
tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrl);
/* Load and test the high half tlb comparator. */
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
/* delay slot */
- tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_REG_A0, cmp_off + HI_OFF);
+ tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF);
/* Load the tlb addend for the fast path. */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_REG_A0, add_off);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off);
label_ptr[1] = s->code_ptr;
tcg_out_opc_br(s, OPC_BNE, addrh, TCG_TMP0);
}
}
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{
TCGMemOpIdx oi = l->oi;
TCGMemOp opc = get_memop(oi);
}
}
- reloc_pc16(s->code_ptr, l->raddr);
tcg_out_opc_br(s, OPC_BEQ, TCG_REG_ZERO, TCG_REG_ZERO);
+ reloc_pc16(s->code_ptr - 1, l->raddr);
+
/* delay slot */
if (TCG_TARGET_REG_BITS == 64 && l->type == TCG_TYPE_I32) {
/* we always sign-extend 32-bit loads */
} else {
tcg_out_opc_reg(s, OPC_OR, v0, TCG_REG_V0, TCG_REG_ZERO);
}
+ return true;
}
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{
TCGMemOpIdx oi = l->oi;
TCGMemOp opc = get_memop(oi);
tcg_out_call_int(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)], true);
/* delay slot */
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
+ return true;
}
#endif
tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
}
tcg_out_nop(s);
- s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
+ set_jmp_reset_offset(s, a0);
break;
case INDEX_op_goto_ptr:
/* jmp to the given host address (could be epilogue) */
}
}
-static const TCGTargetOpDef mips_op_defs[] = {
- { INDEX_op_exit_tb, { } },
- { INDEX_op_goto_tb, { } },
- { INDEX_op_br, { } },
- { INDEX_op_goto_ptr, { "r" } },
+static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
+{
+ static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
+ static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
+ static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } };
+ static const TCGTargetOpDef rZ_r = { .args_ct_str = { "rZ", "r" } };
+ static const TCGTargetOpDef SZ_S = { .args_ct_str = { "SZ", "S" } };
+ static const TCGTargetOpDef rZ_rZ = { .args_ct_str = { "rZ", "rZ" } };
+ static const TCGTargetOpDef r_r_L = { .args_ct_str = { "r", "r", "L" } };
+ static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } };
+ static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
+ static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } };
+ static const TCGTargetOpDef r_r_rJ = { .args_ct_str = { "r", "r", "rJ" } };
+ static const TCGTargetOpDef SZ_S_S = { .args_ct_str = { "SZ", "S", "S" } };
+ static const TCGTargetOpDef SZ_SZ_S
+ = { .args_ct_str = { "SZ", "SZ", "S" } };
+ static const TCGTargetOpDef SZ_SZ_S_S
+ = { .args_ct_str = { "SZ", "SZ", "S", "S" } };
+ static const TCGTargetOpDef r_rZ_rN
+ = { .args_ct_str = { "r", "rZ", "rN" } };
+ static const TCGTargetOpDef r_rZ_rZ
+ = { .args_ct_str = { "r", "rZ", "rZ" } };
+ static const TCGTargetOpDef r_r_rIK
+ = { .args_ct_str = { "r", "r", "rIK" } };
+ static const TCGTargetOpDef r_r_rWZ
+ = { .args_ct_str = { "r", "r", "rWZ" } };
+ static const TCGTargetOpDef r_r_r_r
+ = { .args_ct_str = { "r", "r", "r", "r" } };
+ static const TCGTargetOpDef r_r_L_L
+ = { .args_ct_str = { "r", "r", "L", "L" } };
+ static const TCGTargetOpDef dep
+ = { .args_ct_str = { "r", "0", "rZ" } };
+ static const TCGTargetOpDef movc
+ = { .args_ct_str = { "r", "rZ", "rZ", "rZ", "0" } };
+ static const TCGTargetOpDef movc_r6
+ = { .args_ct_str = { "r", "rZ", "rZ", "rZ", "rZ" } };
+ static const TCGTargetOpDef add2
+ = { .args_ct_str = { "r", "r", "rZ", "rZ", "rN", "rN" } };
+ static const TCGTargetOpDef br2
+ = { .args_ct_str = { "rZ", "rZ", "rZ", "rZ" } };
+ static const TCGTargetOpDef setc2
+ = { .args_ct_str = { "r", "rZ", "rZ", "rZ", "rZ" } };
+
+ switch (op) {
+ case INDEX_op_goto_ptr:
+ return &r;
- { INDEX_op_ld8u_i32, { "r", "r" } },
- { INDEX_op_ld8s_i32, { "r", "r" } },
- { INDEX_op_ld16u_i32, { "r", "r" } },
- { INDEX_op_ld16s_i32, { "r", "r" } },
- { INDEX_op_ld_i32, { "r", "r" } },
- { INDEX_op_st8_i32, { "rZ", "r" } },
- { INDEX_op_st16_i32, { "rZ", "r" } },
- { INDEX_op_st_i32, { "rZ", "r" } },
+ case INDEX_op_ld8u_i32:
+ case INDEX_op_ld8s_i32:
+ case INDEX_op_ld16u_i32:
+ case INDEX_op_ld16s_i32:
+ case INDEX_op_ld_i32:
+ case INDEX_op_not_i32:
+ case INDEX_op_bswap16_i32:
+ case INDEX_op_bswap32_i32:
+ case INDEX_op_ext8s_i32:
+ case INDEX_op_ext16s_i32:
+ case INDEX_op_extract_i32:
+ case INDEX_op_ld8u_i64:
+ case INDEX_op_ld8s_i64:
+ case INDEX_op_ld16u_i64:
+ case INDEX_op_ld16s_i64:
+ case INDEX_op_ld32s_i64:
+ case INDEX_op_ld32u_i64:
+ case INDEX_op_ld_i64:
+ case INDEX_op_not_i64:
+ case INDEX_op_bswap16_i64:
+ case INDEX_op_bswap32_i64:
+ case INDEX_op_bswap64_i64:
+ case INDEX_op_ext8s_i64:
+ case INDEX_op_ext16s_i64:
+ case INDEX_op_ext32s_i64:
+ case INDEX_op_ext32u_i64:
+ case INDEX_op_ext_i32_i64:
+ case INDEX_op_extu_i32_i64:
+ case INDEX_op_extrl_i64_i32:
+ case INDEX_op_extrh_i64_i32:
+ case INDEX_op_extract_i64:
+ return &r_r;
- { INDEX_op_add_i32, { "r", "rZ", "rJ" } },
- { INDEX_op_mul_i32, { "r", "rZ", "rZ" } },
-#if !use_mips32r6_instructions
- { INDEX_op_muls2_i32, { "r", "r", "rZ", "rZ" } },
- { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rZ" } },
-#endif
- { INDEX_op_mulsh_i32, { "r", "rZ", "rZ" } },
- { INDEX_op_muluh_i32, { "r", "rZ", "rZ" } },
- { INDEX_op_div_i32, { "r", "rZ", "rZ" } },
- { INDEX_op_divu_i32, { "r", "rZ", "rZ" } },
- { INDEX_op_rem_i32, { "r", "rZ", "rZ" } },
- { INDEX_op_remu_i32, { "r", "rZ", "rZ" } },
- { INDEX_op_sub_i32, { "r", "rZ", "rN" } },
-
- { INDEX_op_and_i32, { "r", "rZ", "rIK" } },
- { INDEX_op_nor_i32, { "r", "rZ", "rZ" } },
- { INDEX_op_not_i32, { "r", "rZ" } },
- { INDEX_op_or_i32, { "r", "rZ", "rIZ" } },
- { INDEX_op_xor_i32, { "r", "rZ", "rIZ" } },
-
- { INDEX_op_shl_i32, { "r", "rZ", "ri" } },
- { INDEX_op_shr_i32, { "r", "rZ", "ri" } },
- { INDEX_op_sar_i32, { "r", "rZ", "ri" } },
- { INDEX_op_rotr_i32, { "r", "rZ", "ri" } },
- { INDEX_op_rotl_i32, { "r", "rZ", "ri" } },
- { INDEX_op_clz_i32, { "r", "r", "rWZ" } },
-
- { INDEX_op_bswap16_i32, { "r", "r" } },
- { INDEX_op_bswap32_i32, { "r", "r" } },
-
- { INDEX_op_ext8s_i32, { "r", "rZ" } },
- { INDEX_op_ext16s_i32, { "r", "rZ" } },
-
- { INDEX_op_deposit_i32, { "r", "0", "rZ" } },
- { INDEX_op_extract_i32, { "r", "r" } },
-
- { INDEX_op_brcond_i32, { "rZ", "rZ" } },
-#if use_mips32r6_instructions
- { INDEX_op_movcond_i32, { "r", "rZ", "rZ", "rZ", "rZ" } },
-#else
- { INDEX_op_movcond_i32, { "r", "rZ", "rZ", "rZ", "0" } },
-#endif
- { INDEX_op_setcond_i32, { "r", "rZ", "rZ" } },
+ case INDEX_op_st8_i32:
+ case INDEX_op_st16_i32:
+ case INDEX_op_st_i32:
+ case INDEX_op_st8_i64:
+ case INDEX_op_st16_i64:
+ case INDEX_op_st32_i64:
+ case INDEX_op_st_i64:
+ return &rZ_r;
-#if TCG_TARGET_REG_BITS == 32
- { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rN", "rN" } },
- { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rN", "rN" } },
- { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rZ", "rZ" } },
- { INDEX_op_brcond2_i32, { "rZ", "rZ", "rZ", "rZ" } },
-#endif
+ case INDEX_op_add_i32:
+ case INDEX_op_add_i64:
+ return &r_r_rJ;
+ case INDEX_op_sub_i32:
+ case INDEX_op_sub_i64:
+ return &r_rZ_rN;
+ case INDEX_op_mul_i32:
+ case INDEX_op_mulsh_i32:
+ case INDEX_op_muluh_i32:
+ case INDEX_op_div_i32:
+ case INDEX_op_divu_i32:
+ case INDEX_op_rem_i32:
+ case INDEX_op_remu_i32:
+ case INDEX_op_nor_i32:
+ case INDEX_op_setcond_i32:
+ case INDEX_op_mul_i64:
+ case INDEX_op_mulsh_i64:
+ case INDEX_op_muluh_i64:
+ case INDEX_op_div_i64:
+ case INDEX_op_divu_i64:
+ case INDEX_op_rem_i64:
+ case INDEX_op_remu_i64:
+ case INDEX_op_nor_i64:
+ case INDEX_op_setcond_i64:
+ return &r_rZ_rZ;
+ case INDEX_op_muls2_i32:
+ case INDEX_op_mulu2_i32:
+ case INDEX_op_muls2_i64:
+ case INDEX_op_mulu2_i64:
+ return &r_r_r_r;
+ case INDEX_op_and_i32:
+ case INDEX_op_and_i64:
+ return &r_r_rIK;
+ case INDEX_op_or_i32:
+ case INDEX_op_xor_i32:
+ case INDEX_op_or_i64:
+ case INDEX_op_xor_i64:
+ return &r_r_rI;
+ case INDEX_op_shl_i32:
+ case INDEX_op_shr_i32:
+ case INDEX_op_sar_i32:
+ case INDEX_op_rotr_i32:
+ case INDEX_op_rotl_i32:
+ case INDEX_op_shl_i64:
+ case INDEX_op_shr_i64:
+ case INDEX_op_sar_i64:
+ case INDEX_op_rotr_i64:
+ case INDEX_op_rotl_i64:
+ return &r_r_ri;
+ case INDEX_op_clz_i32:
+ case INDEX_op_clz_i64:
+ return &r_r_rWZ;
-#if TCG_TARGET_REG_BITS == 64
- { INDEX_op_ld8u_i64, { "r", "r" } },
- { INDEX_op_ld8s_i64, { "r", "r" } },
- { INDEX_op_ld16u_i64, { "r", "r" } },
- { INDEX_op_ld16s_i64, { "r", "r" } },
- { INDEX_op_ld32s_i64, { "r", "r" } },
- { INDEX_op_ld32u_i64, { "r", "r" } },
- { INDEX_op_ld_i64, { "r", "r" } },
- { INDEX_op_st8_i64, { "rZ", "r" } },
- { INDEX_op_st16_i64, { "rZ", "r" } },
- { INDEX_op_st32_i64, { "rZ", "r" } },
- { INDEX_op_st_i64, { "rZ", "r" } },
-
- { INDEX_op_add_i64, { "r", "rZ", "rJ" } },
- { INDEX_op_mul_i64, { "r", "rZ", "rZ" } },
-#if !use_mips32r6_instructions
- { INDEX_op_muls2_i64, { "r", "r", "rZ", "rZ" } },
- { INDEX_op_mulu2_i64, { "r", "r", "rZ", "rZ" } },
-#endif
- { INDEX_op_mulsh_i64, { "r", "rZ", "rZ" } },
- { INDEX_op_muluh_i64, { "r", "rZ", "rZ" } },
- { INDEX_op_div_i64, { "r", "rZ", "rZ" } },
- { INDEX_op_divu_i64, { "r", "rZ", "rZ" } },
- { INDEX_op_rem_i64, { "r", "rZ", "rZ" } },
- { INDEX_op_remu_i64, { "r", "rZ", "rZ" } },
- { INDEX_op_sub_i64, { "r", "rZ", "rN" } },
-
- { INDEX_op_and_i64, { "r", "rZ", "rIK" } },
- { INDEX_op_nor_i64, { "r", "rZ", "rZ" } },
- { INDEX_op_not_i64, { "r", "rZ" } },
- { INDEX_op_or_i64, { "r", "rZ", "rI" } },
- { INDEX_op_xor_i64, { "r", "rZ", "rI" } },
-
- { INDEX_op_shl_i64, { "r", "rZ", "ri" } },
- { INDEX_op_shr_i64, { "r", "rZ", "ri" } },
- { INDEX_op_sar_i64, { "r", "rZ", "ri" } },
- { INDEX_op_rotr_i64, { "r", "rZ", "ri" } },
- { INDEX_op_rotl_i64, { "r", "rZ", "ri" } },
- { INDEX_op_clz_i64, { "r", "r", "rWZ" } },
-
- { INDEX_op_bswap16_i64, { "r", "r" } },
- { INDEX_op_bswap32_i64, { "r", "r" } },
- { INDEX_op_bswap64_i64, { "r", "r" } },
-
- { INDEX_op_ext8s_i64, { "r", "rZ" } },
- { INDEX_op_ext16s_i64, { "r", "rZ" } },
- { INDEX_op_ext32s_i64, { "r", "rZ" } },
- { INDEX_op_ext32u_i64, { "r", "rZ" } },
- { INDEX_op_ext_i32_i64, { "r", "rZ" } },
- { INDEX_op_extu_i32_i64, { "r", "rZ" } },
- { INDEX_op_extrl_i64_i32, { "r", "rZ" } },
- { INDEX_op_extrh_i64_i32, { "r", "rZ" } },
-
- { INDEX_op_deposit_i64, { "r", "0", "rZ" } },
- { INDEX_op_extract_i64, { "r", "r" } },
-
- { INDEX_op_brcond_i64, { "rZ", "rZ" } },
-#if use_mips32r6_instructions
- { INDEX_op_movcond_i64, { "r", "rZ", "rZ", "rZ", "rZ" } },
-#else
- { INDEX_op_movcond_i64, { "r", "rZ", "rZ", "rZ", "0" } },
-#endif
- { INDEX_op_setcond_i64, { "r", "rZ", "rZ" } },
-
- { INDEX_op_qemu_ld_i32, { "r", "LZ" } },
- { INDEX_op_qemu_st_i32, { "SZ", "SZ" } },
- { INDEX_op_qemu_ld_i64, { "r", "LZ" } },
- { INDEX_op_qemu_st_i64, { "SZ", "SZ" } },
-#elif TARGET_LONG_BITS == 32
- { INDEX_op_qemu_ld_i32, { "r", "LZ" } },
- { INDEX_op_qemu_st_i32, { "SZ", "SZ" } },
- { INDEX_op_qemu_ld_i64, { "r", "r", "LZ" } },
- { INDEX_op_qemu_st_i64, { "SZ", "SZ", "SZ" } },
-#else
- { INDEX_op_qemu_ld_i32, { "r", "LZ", "LZ" } },
- { INDEX_op_qemu_st_i32, { "SZ", "SZ", "SZ" } },
- { INDEX_op_qemu_ld_i64, { "r", "r", "LZ", "LZ" } },
- { INDEX_op_qemu_st_i64, { "SZ", "SZ", "SZ", "SZ" } },
-#endif
+ case INDEX_op_deposit_i32:
+ case INDEX_op_deposit_i64:
+ return &dep;
+ case INDEX_op_brcond_i32:
+ case INDEX_op_brcond_i64:
+ return &rZ_rZ;
+ case INDEX_op_movcond_i32:
+ case INDEX_op_movcond_i64:
+ return use_mips32r6_instructions ? &movc_r6 : &movc;
- { INDEX_op_mb, { } },
- { -1 },
-};
+ case INDEX_op_add2_i32:
+ case INDEX_op_sub2_i32:
+ return &add2;
+ case INDEX_op_setcond2_i32:
+ return &setc2;
+ case INDEX_op_brcond2_i32:
+ return &br2;
-static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
-{
- int i, n = ARRAY_SIZE(mips_op_defs);
+ case INDEX_op_qemu_ld_i32:
+ return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
+ ? &r_L : &r_L_L);
+ case INDEX_op_qemu_st_i32:
+ return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
+ ? &SZ_S : &SZ_S_S);
+ case INDEX_op_qemu_ld_i64:
+ return (TCG_TARGET_REG_BITS == 64 ? &r_L
+ : TARGET_LONG_BITS == 32 ? &r_r_L : &r_r_L_L);
+ case INDEX_op_qemu_st_i64:
+ return (TCG_TARGET_REG_BITS == 64 ? &SZ_S
+ : TARGET_LONG_BITS == 32 ? &SZ_SZ_S : &SZ_SZ_S_S);
- for (i = 0; i < n; ++i) {
- if (mips_op_defs[i].op == op) {
- return &mips_op_defs[i];
- }
+ default:
+ return NULL;
}
- return NULL;
}
-static int tcg_target_callee_save_regs[] = {
+static const int tcg_target_callee_save_regs[] = {
TCG_REG_S0, /* used for the global env (TCG_AREG0) */
TCG_REG_S1,
TCG_REG_S2,
static void tcg_target_init(TCGContext *s)
{
tcg_target_detect_isa();
- tcg_regset_set(tcg_target_available_regs[TCG_TYPE_I32], 0xffffffff);
+ tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
if (TCG_TARGET_REG_BITS == 64) {
- tcg_regset_set(tcg_target_available_regs[TCG_TYPE_I64], 0xffffffff);
- }
- tcg_regset_set(tcg_target_call_clobber_regs,
- (1 << TCG_REG_V0) |
- (1 << TCG_REG_V1) |
- (1 << TCG_REG_A0) |
- (1 << TCG_REG_A1) |
- (1 << TCG_REG_A2) |
- (1 << TCG_REG_A3) |
- (1 << TCG_REG_T0) |
- (1 << TCG_REG_T1) |
- (1 << TCG_REG_T2) |
- (1 << TCG_REG_T3) |
- (1 << TCG_REG_T4) |
- (1 << TCG_REG_T5) |
- (1 << TCG_REG_T6) |
- (1 << TCG_REG_T7) |
- (1 << TCG_REG_T8) |
- (1 << TCG_REG_T9));
-
- tcg_regset_clear(s->reserved_regs);
+ tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
+ }
+
+ tcg_target_call_clobber_regs = 0;
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0);
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1);
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_A0);
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_A1);
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_A2);
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_A3);
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T0);
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T1);
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T2);
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T3);
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T4);
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T5);
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T6);
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T7);
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T8);
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T9);
+
+ s->reserved_regs = 0;
tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); /* zero register */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_K0); /* kernel use only */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_K1); /* kernel use only */