X-Git-Url: https://repo.jachan.dev/qemu.git/blobdiff_plain/a056c9faa4a0bd790630caac4ff9f5a841a33177..7931b05987564b07ada5a4467d8e78a786a3e7d4:/tcg/aarch64/tcg-target.c diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c index 7ff4be7663..0a580b65ea 100644 --- a/tcg/aarch64/tcg-target.c +++ b/tcg/aarch64/tcg-target.c @@ -102,11 +102,10 @@ static inline void patch_reloc(uint8_t *code_ptr, int type, } } -#define TCG_CT_CONST_IS32 0x100 -#define TCG_CT_CONST_AIMM 0x200 -#define TCG_CT_CONST_LIMM 0x400 -#define TCG_CT_CONST_ZERO 0x800 -#define TCG_CT_CONST_MONE 0x1000 +#define TCG_CT_CONST_AIMM 0x100 +#define TCG_CT_CONST_LIMM 0x200 +#define TCG_CT_CONST_ZERO 0x400 +#define TCG_CT_CONST_MONE 0x800 /* parse target specific constraints */ static int target_parse_constraint(TCGArgConstraint *ct, @@ -131,9 +130,6 @@ static int target_parse_constraint(TCGArgConstraint *ct, tcg_regset_reset_reg(ct->u.regs, TCG_REG_X3); #endif break; - case 'w': /* The operand should be considered 32-bit. */ - ct->ct |= TCG_CT_CONST_IS32; - break; case 'A': /* Valid for arithmetic immediate (positive or negative). */ ct->ct |= TCG_CT_CONST_AIMM; break; @@ -180,7 +176,7 @@ static inline bool is_limm(uint64_t val) return (val & (val - 1)) == 0; } -static int tcg_target_const_match(tcg_target_long val, +static int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct) { int ct = arg_ct->ct; @@ -188,7 +184,7 @@ static int tcg_target_const_match(tcg_target_long val, if (ct & TCG_CT_CONST) { return 1; } - if (ct & TCG_CT_CONST_IS32) { + if (type == TCG_TYPE_I32) { val = (int32_t)val; } if ((ct & TCG_CT_CONST_AIMM) && (is_aimm(val) || is_aimm(-val))) { @@ -979,9 +975,7 @@ static inline void tcg_out_addsub2(TCGContext *s, int ext, TCGReg rl, } tcg_out_insn_3503(s, insn, ext, rh, ah, bh); - if (rl != orig_rl) { - tcg_out_movr(s, ext, orig_rl, rl); - } + tcg_out_mov(s, ext, orig_rl, rl); } #ifdef CONFIG_SOFTMMU @@ -1025,15 +1019,15 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) reloc_pc19(lb->label_ptr[0], (intptr_t)s->code_ptr); - tcg_out_movr(s, TCG_TYPE_I64, TCG_REG_X0, TCG_AREG0); - tcg_out_movr(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg); + tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_X0, TCG_AREG0); + tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg); tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X2, lb->mem_index); tcg_out_adr(s, TCG_REG_X3, (intptr_t)lb->raddr); tcg_out_call(s, (intptr_t)qemu_ld_helpers[opc & ~MO_SIGN]); if (opc & MO_SIGN) { tcg_out_sxt(s, TCG_TYPE_I64, size, lb->datalo_reg, TCG_REG_X0); } else { - tcg_out_movr(s, TCG_TYPE_I64, lb->datalo_reg, TCG_REG_X0); + tcg_out_mov(s, size == MO_64, lb->datalo_reg, TCG_REG_X0); } tcg_out_goto(s, (intptr_t)lb->raddr); @@ -1046,16 +1040,16 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) reloc_pc19(lb->label_ptr[0], (intptr_t)s->code_ptr); - tcg_out_movr(s, TCG_TYPE_I64, TCG_REG_X0, TCG_AREG0); - tcg_out_movr(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg); - tcg_out_movr(s, size == MO_64, TCG_REG_X2, lb->datalo_reg); + tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_X0, TCG_AREG0); + tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg); + tcg_out_mov(s, size == MO_64, TCG_REG_X2, lb->datalo_reg); tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X3, lb->mem_index); tcg_out_adr(s, TCG_REG_X4, (intptr_t)lb->raddr); tcg_out_call(s, (intptr_t)qemu_st_helpers[opc]); tcg_out_goto(s, (intptr_t)lb->raddr); } -static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOp opc, +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc, TCGReg data_reg, TCGReg addr_reg, int mem_index, uint8_t *raddr, uint8_t *label_ptr) @@ -1228,7 +1222,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 1); tcg_out_qemu_ld_direct(s, memop, data_reg, addr_reg, TCG_REG_X1); - add_qemu_ldst_label(s, 1, memop, data_reg, addr_reg, + add_qemu_ldst_label(s, true, memop, data_reg, addr_reg, mem_index, s->code_ptr, label_ptr); #else /* !CONFIG_SOFTMMU */ tcg_out_qemu_ld_direct(s, memop, data_reg, addr_reg, @@ -1245,7 +1239,7 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 0); tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg, TCG_REG_X1); - add_qemu_ldst_label(s, 0, memop, data_reg, addr_reg, + add_qemu_ldst_label(s, false, memop, data_reg, addr_reg, mem_index, s->code_ptr, label_ptr); #else /* !CONFIG_SOFTMMU */ tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg, @@ -1665,9 +1659,9 @@ static const TCGTargetOpDef aarch64_op_defs[] = { { INDEX_op_st32_i64, { "rZ", "r" } }, { INDEX_op_st_i64, { "rZ", "r" } }, - { INDEX_op_add_i32, { "r", "r", "rwA" } }, + { INDEX_op_add_i32, { "r", "r", "rA" } }, { INDEX_op_add_i64, { "r", "r", "rA" } }, - { INDEX_op_sub_i32, { "r", "r", "rwA" } }, + { INDEX_op_sub_i32, { "r", "r", "rA" } }, { INDEX_op_sub_i64, { "r", "r", "rA" } }, { INDEX_op_mul_i32, { "r", "r", "r" } }, { INDEX_op_mul_i64, { "r", "r", "r" } }, @@ -1679,17 +1673,17 @@ static const TCGTargetOpDef aarch64_op_defs[] = { { INDEX_op_rem_i64, { "r", "r", "r" } }, { INDEX_op_remu_i32, { "r", "r", "r" } }, { INDEX_op_remu_i64, { "r", "r", "r" } }, - { INDEX_op_and_i32, { "r", "r", "rwL" } }, + { INDEX_op_and_i32, { "r", "r", "rL" } }, { INDEX_op_and_i64, { "r", "r", "rL" } }, - { INDEX_op_or_i32, { "r", "r", "rwL" } }, + { INDEX_op_or_i32, { "r", "r", "rL" } }, { INDEX_op_or_i64, { "r", "r", "rL" } }, - { INDEX_op_xor_i32, { "r", "r", "rwL" } }, + { INDEX_op_xor_i32, { "r", "r", "rL" } }, { INDEX_op_xor_i64, { "r", "r", "rL" } }, - { INDEX_op_andc_i32, { "r", "r", "rwL" } }, + { INDEX_op_andc_i32, { "r", "r", "rL" } }, { INDEX_op_andc_i64, { "r", "r", "rL" } }, - { INDEX_op_orc_i32, { "r", "r", "rwL" } }, + { INDEX_op_orc_i32, { "r", "r", "rL" } }, { INDEX_op_orc_i64, { "r", "r", "rL" } }, - { INDEX_op_eqv_i32, { "r", "r", "rwL" } }, + { INDEX_op_eqv_i32, { "r", "r", "rL" } }, { INDEX_op_eqv_i64, { "r", "r", "rL" } }, { INDEX_op_neg_i32, { "r", "r" } }, @@ -1708,11 +1702,11 @@ static const TCGTargetOpDef aarch64_op_defs[] = { { INDEX_op_rotl_i64, { "r", "r", "ri" } }, { INDEX_op_rotr_i64, { "r", "r", "ri" } }, - { INDEX_op_brcond_i32, { "r", "rwA" } }, + { INDEX_op_brcond_i32, { "r", "rA" } }, { INDEX_op_brcond_i64, { "r", "rA" } }, - { INDEX_op_setcond_i32, { "r", "r", "rwA" } }, + { INDEX_op_setcond_i32, { "r", "r", "rA" } }, { INDEX_op_setcond_i64, { "r", "r", "rA" } }, - { INDEX_op_movcond_i32, { "r", "r", "rwA", "rZ", "rZ" } }, + { INDEX_op_movcond_i32, { "r", "r", "rA", "rZ", "rZ" } }, { INDEX_op_movcond_i64, { "r", "r", "rA", "rZ", "rZ" } }, { INDEX_op_qemu_ld_i32, { "r", "l" } }, @@ -1741,9 +1735,9 @@ static const TCGTargetOpDef aarch64_op_defs[] = { { INDEX_op_deposit_i32, { "r", "0", "rZ" } }, { INDEX_op_deposit_i64, { "r", "0", "rZ" } }, - { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rwA", "rwMZ" } }, + { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rA", "rMZ" } }, { INDEX_op_add2_i64, { "r", "r", "rZ", "rZ", "rA", "rMZ" } }, - { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rwA", "rwMZ" } }, + { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rA", "rMZ" } }, { INDEX_op_sub2_i64, { "r", "r", "rZ", "rZ", "rA", "rMZ" } }, { INDEX_op_muluh_i64, { "r", "r", "r" } },