X-Git-Url: https://repo.jachan.dev/qemu.git/blobdiff_plain/c4e9f845f6248885ff73a9e1ecb74052a1c3dcd4..87892c64bc9527b6eb209b41de6e6f23b786ee6e:/target/arm/translate-a64.c diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index 9dcc5ff3a3..d4bebbe629 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -27,9 +27,8 @@ #include "translate.h" #include "internals.h" #include "qemu/host-utils.h" -#include "qemu/qemu-print.h" -#include "exec/semihost.h" +#include "hw/semihosting/semihost.h" #include "exec/gen-icount.h" #include "exec/helper-proto.h" @@ -86,7 +85,7 @@ typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64); typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr); typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32); typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr); -typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, TCGMemOp); +typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp); /* initialize TCG globals. */ void a64_translate_init(void) @@ -152,133 +151,6 @@ static void set_btype(DisasContext *s, int val) s->btype = -1; } -void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) -{ - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - uint32_t psr = pstate_read(env); - int i; - int el = arm_current_el(env); - const char *ns_status; - - qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc); - for (i = 0; i < 32; i++) { - if (i == 31) { - qemu_fprintf(f, " SP=%016" PRIx64 "\n", env->xregs[i]); - } else { - qemu_fprintf(f, "X%02d=%016" PRIx64 "%s", i, env->xregs[i], - (i + 2) % 3 ? " " : "\n"); - } - } - - if (arm_feature(env, ARM_FEATURE_EL3) && el != 3) { - ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S "; - } else { - ns_status = ""; - } - qemu_fprintf(f, "PSTATE=%08x %c%c%c%c %sEL%d%c", - psr, - psr & PSTATE_N ? 'N' : '-', - psr & PSTATE_Z ? 'Z' : '-', - psr & PSTATE_C ? 'C' : '-', - psr & PSTATE_V ? 'V' : '-', - ns_status, - el, - psr & PSTATE_SP ? 'h' : 't'); - - if (cpu_isar_feature(aa64_bti, cpu)) { - qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10); - } - if (!(flags & CPU_DUMP_FPU)) { - qemu_fprintf(f, "\n"); - return; - } - if (fp_exception_el(env, el) != 0) { - qemu_fprintf(f, " FPU disabled\n"); - return; - } - qemu_fprintf(f, " FPCR=%08x FPSR=%08x\n", - vfp_get_fpcr(env), vfp_get_fpsr(env)); - - if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) { - int j, zcr_len = sve_zcr_len_for_el(env, el); - - for (i = 0; i <= FFR_PRED_NUM; i++) { - bool eol; - if (i == FFR_PRED_NUM) { - qemu_fprintf(f, "FFR="); - /* It's last, so end the line. */ - eol = true; - } else { - qemu_fprintf(f, "P%02d=", i); - switch (zcr_len) { - case 0: - eol = i % 8 == 7; - break; - case 1: - eol = i % 6 == 5; - break; - case 2: - case 3: - eol = i % 3 == 2; - break; - default: - /* More than one quadword per predicate. */ - eol = true; - break; - } - } - for (j = zcr_len / 4; j >= 0; j--) { - int digits; - if (j * 4 + 4 <= zcr_len + 1) { - digits = 16; - } else { - digits = (zcr_len % 4 + 1) * 4; - } - qemu_fprintf(f, "%0*" PRIx64 "%s", digits, - env->vfp.pregs[i].p[j], - j ? ":" : eol ? "\n" : " "); - } - } - - for (i = 0; i < 32; i++) { - if (zcr_len == 0) { - qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64 "%s", - i, env->vfp.zregs[i].d[1], - env->vfp.zregs[i].d[0], i & 1 ? "\n" : " "); - } else if (zcr_len == 1) { - qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64 - ":%016" PRIx64 ":%016" PRIx64 "\n", - i, env->vfp.zregs[i].d[3], env->vfp.zregs[i].d[2], - env->vfp.zregs[i].d[1], env->vfp.zregs[i].d[0]); - } else { - for (j = zcr_len; j >= 0; j--) { - bool odd = (zcr_len - j) % 2 != 0; - if (j == zcr_len) { - qemu_fprintf(f, "Z%02d[%x-%x]=", i, j, j - 1); - } else if (!odd) { - if (j > 0) { - qemu_fprintf(f, " [%x-%x]=", j, j - 1); - } else { - qemu_fprintf(f, " [%x]=", j); - } - } - qemu_fprintf(f, "%016" PRIx64 ":%016" PRIx64 "%s", - env->vfp.zregs[i].d[j * 2 + 1], - env->vfp.zregs[i].d[j * 2], - odd || j == 0 ? "\n" : ":"); - } - } - } - } else { - for (i = 0; i < 32; i++) { - uint64_t *q = aa64_vfp_qreg(env, i); - qemu_fprintf(f, "Q%02d=%016" PRIx64 ":%016" PRIx64 "%s", - i, q[1], q[0], (i & 1 ? "\n" : " ")); - } - } -} - void gen_a64_set_pc_im(uint64_t val) { tcg_gen_movi_i64(cpu_pc, val); @@ -381,40 +253,26 @@ static void gen_exception_internal(int excp) tcg_temp_free_i32(tcg_excp); } -static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el) +static void gen_exception_internal_insn(DisasContext *s, uint64_t pc, int excp) { - TCGv_i32 tcg_excp = tcg_const_i32(excp); - TCGv_i32 tcg_syn = tcg_const_i32(syndrome); - TCGv_i32 tcg_el = tcg_const_i32(target_el); - - gen_helper_exception_with_syndrome(cpu_env, tcg_excp, - tcg_syn, tcg_el); - tcg_temp_free_i32(tcg_el); - tcg_temp_free_i32(tcg_syn); - tcg_temp_free_i32(tcg_excp); -} - -static void gen_exception_internal_insn(DisasContext *s, int offset, int excp) -{ - gen_a64_set_pc_im(s->pc - offset); + gen_a64_set_pc_im(pc); gen_exception_internal(excp); s->base.is_jmp = DISAS_NORETURN; } -static void gen_exception_insn(DisasContext *s, int offset, int excp, +static void gen_exception_insn(DisasContext *s, uint64_t pc, int excp, uint32_t syndrome, uint32_t target_el) { - gen_a64_set_pc_im(s->pc - offset); + gen_a64_set_pc_im(pc); gen_exception(excp, syndrome, target_el); s->base.is_jmp = DISAS_NORETURN; } -static void gen_exception_bkpt_insn(DisasContext *s, int offset, - uint32_t syndrome) +static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome) { TCGv_i32 tcg_syn; - gen_a64_set_pc_im(s->pc - offset); + gen_a64_set_pc_im(s->pc_curr); tcg_syn = tcg_const_i32(syndrome); gen_helper_exception_bkpt_insn(cpu_env, tcg_syn); tcg_temp_free_i32(tcg_syn); @@ -433,8 +291,7 @@ static void gen_step_complete_exception(DisasContext *s) * of the exception, and our syndrome information is always correct. */ gen_ss_advance(s); - gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex), - default_exception_el(s)); + gen_swstep_exception(s, 1, s->is_ldex); s->base.is_jmp = DISAS_NORETURN; } @@ -484,7 +341,7 @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest) void unallocated_encoding(DisasContext *s) { /* Unallocated and reserved encodings are uncategorized */ - gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), default_exception_el(s)); } @@ -583,7 +440,7 @@ TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf) * Dn, Sn, Hn or Bn). * (Note that this is not the same mapping as for A32; see cpu.h) */ -static inline int fp_reg_offset(DisasContext *s, int regno, TCGMemOp size) +static inline int fp_reg_offset(DisasContext *s, int regno, MemOp size) { return vec_reg_offset(s, regno, 0, size); } @@ -704,6 +561,15 @@ static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm, vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s)); } +/* Expand a 4-operand AdvSIMD vector operation using an expander function. */ +static void gen_gvec_fn4(DisasContext *s, bool is_q, int rd, int rn, int rm, + int rx, GVecGen4Fn *gvec_fn, int vece) +{ + gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), vec_full_reg_offset(s, rx), + is_q ? 16 : 8, vec_full_reg_size(s)); +} + /* Expand a 2-operand + immediate AdvSIMD vector operation using * an op descriptor. */ @@ -990,7 +856,7 @@ static void do_gpr_ld_memidx(DisasContext *s, bool iss_valid, unsigned int iss_srt, bool iss_sf, bool iss_ar) { - TCGMemOp memop = s->be_data + size; + MemOp memop = s->be_data + size; g_assert(size <= 3); @@ -1067,7 +933,7 @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size) TCGv_i64 tmphi; if (size < 4) { - TCGMemOp memop = s->be_data + size; + MemOp memop = s->be_data + size; tmphi = tcg_const_i64(0); tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop); } else { @@ -1108,7 +974,7 @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size) /* Get value of an element within a vector register */ static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx, - int element, TCGMemOp memop) + int element, MemOp memop) { int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE); switch (memop) { @@ -1140,7 +1006,7 @@ static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx, } static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx, - int element, TCGMemOp memop) + int element, MemOp memop) { int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE); switch (memop) { @@ -1167,7 +1033,7 @@ static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx, /* Set value of an element within a vector register */ static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx, - int element, TCGMemOp memop) + int element, MemOp memop) { int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE); switch (memop) { @@ -1189,7 +1055,7 @@ static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx, } static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src, - int destidx, int element, TCGMemOp memop) + int destidx, int element, MemOp memop) { int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE); switch (memop) { @@ -1209,7 +1075,7 @@ static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src, /* Store from vector register to memory */ static void do_vec_st(DisasContext *s, int srcidx, int element, - TCGv_i64 tcg_addr, int size, TCGMemOp endian) + TCGv_i64 tcg_addr, int size, MemOp endian) { TCGv_i64 tcg_tmp = tcg_temp_new_i64(); @@ -1221,7 +1087,7 @@ static void do_vec_st(DisasContext *s, int srcidx, int element, /* Load from memory to vector register */ static void do_vec_ld(DisasContext *s, int destidx, int element, - TCGv_i64 tcg_addr, int size, TCGMemOp endian) + TCGv_i64 tcg_addr, int size, MemOp endian) { TCGv_i64 tcg_tmp = tcg_temp_new_i64(); @@ -1247,8 +1113,8 @@ static inline bool fp_access_check(DisasContext *s) return true; } - gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false), - s->fp_excp_el); + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, + syn_fp_access_trap(1, 0xe, false), s->fp_excp_el); return false; } @@ -1258,7 +1124,7 @@ static inline bool fp_access_check(DisasContext *s) bool sve_access_check(DisasContext *s) { if (s->sve_excp_el) { - gen_exception_insn(s, 4, EXCP_UDEF, syn_sve_access_trap(), + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_sve_access_trap(), s->sve_excp_el); return false; } @@ -1367,11 +1233,11 @@ static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table, */ static void disas_uncond_b_imm(DisasContext *s, uint32_t insn) { - uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4; + uint64_t addr = s->pc_curr + sextract32(insn, 0, 26) * 4; if (insn & (1U << 31)) { /* BL Branch with link */ - tcg_gen_movi_i64(cpu_reg(s, 30), s->pc); + tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next); } /* B Branch / BL Branch with link */ @@ -1395,7 +1261,7 @@ static void disas_comp_b_imm(DisasContext *s, uint32_t insn) sf = extract32(insn, 31, 1); op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */ rt = extract32(insn, 0, 5); - addr = s->pc + sextract32(insn, 5, 19) * 4 - 4; + addr = s->pc_curr + sextract32(insn, 5, 19) * 4; tcg_cmp = read_cpu_reg(s, rt, sf); label_match = gen_new_label(); @@ -1404,7 +1270,7 @@ static void disas_comp_b_imm(DisasContext *s, uint32_t insn) tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ, tcg_cmp, 0, label_match); - gen_goto_tb(s, 0, s->pc); + gen_goto_tb(s, 0, s->base.pc_next); gen_set_label(label_match); gen_goto_tb(s, 1, addr); } @@ -1424,7 +1290,7 @@ static void disas_test_b_imm(DisasContext *s, uint32_t insn) bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5); op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */ - addr = s->pc + sextract32(insn, 5, 14) * 4 - 4; + addr = s->pc_curr + sextract32(insn, 5, 14) * 4; rt = extract32(insn, 0, 5); tcg_cmp = tcg_temp_new_i64(); @@ -1435,7 +1301,7 @@ static void disas_test_b_imm(DisasContext *s, uint32_t insn) tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ, tcg_cmp, 0, label_match); tcg_temp_free_i64(tcg_cmp); - gen_goto_tb(s, 0, s->pc); + gen_goto_tb(s, 0, s->base.pc_next); gen_set_label(label_match); gen_goto_tb(s, 1, addr); } @@ -1455,7 +1321,7 @@ static void disas_cond_b_imm(DisasContext *s, uint32_t insn) unallocated_encoding(s); return; } - addr = s->pc + sextract32(insn, 5, 19) * 4 - 4; + addr = s->pc_curr + sextract32(insn, 5, 19) * 4; cond = extract32(insn, 0, 4); reset_btype(s); @@ -1463,7 +1329,7 @@ static void disas_cond_b_imm(DisasContext *s, uint32_t insn) /* genuinely conditional branches */ TCGLabel *label_match = gen_new_label(); arm_gen_test_cc(cond, label_match); - gen_goto_tb(s, 0, s->pc); + gen_goto_tb(s, 0, s->base.pc_next); gen_set_label(label_match); gen_goto_tb(s, 1, addr); } else { @@ -1624,7 +1490,7 @@ static void handle_sync(DisasContext *s, uint32_t insn, * any pending interrupts immediately. */ reset_btype(s); - gen_goto_tb(s, 0, s->pc); + gen_goto_tb(s, 0, s->base.pc_next); return; case 7: /* SB */ @@ -1636,7 +1502,7 @@ static void handle_sync(DisasContext *s, uint32_t insn, * MB and end the TB instead. */ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); - gen_goto_tb(s, 0, s->pc); + gen_goto_tb(s, 0, s->base.pc_next); return; default: @@ -1839,7 +1705,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, TCGv_i32 tcg_syn, tcg_isread; uint32_t syndrome; - gen_a64_set_pc_im(s->pc - 4); + gen_a64_set_pc_im(s->pc_curr); tmpptr = tcg_const_ptr(ri); syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread); tcg_syn = tcg_const_i32(syndrome); @@ -1848,6 +1714,12 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, tcg_temp_free_ptr(tmpptr); tcg_temp_free_i32(tcg_syn); tcg_temp_free_i32(tcg_isread); + } else if (ri->type & ARM_CP_RAISES_EXC) { + /* + * The readfn or writefn might raise an exception; + * synchronize the CPU state in case it does. + */ + gen_a64_set_pc_im(s->pc_curr); } /* Handle special cases first */ @@ -1916,10 +1788,18 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) { /* I/O operations must end the TB here (whether read or write) */ - gen_io_end(); s->base.is_jmp = DISAS_UPDATE; - } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) { - /* We default to ending the TB on a coprocessor register write, + } + if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) { + /* + * A write to any coprocessor regiser that ends a TB + * must rebuild the hflags for the next TB. + */ + TCGv_i32 tcg_el = tcg_const_i32(s->current_el); + gen_helper_rebuild_hflags_a64(cpu_env, tcg_el); + tcg_temp_free_i32(tcg_el); + /* + * We default to ending the TB on a coprocessor register write, * but allow this to be suppressed by the register definition * (usually only necessary to work around guest bugs). */ @@ -1992,8 +1872,8 @@ static void disas_exc(DisasContext *s, uint32_t insn) switch (op2_ll) { case 1: /* SVC */ gen_ss_advance(s); - gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16), - default_exception_el(s)); + gen_exception_insn(s, s->base.pc_next, EXCP_SWI, + syn_aa64_svc(imm16), default_exception_el(s)); break; case 2: /* HVC */ if (s->current_el == 0) { @@ -2003,22 +1883,24 @@ static void disas_exc(DisasContext *s, uint32_t insn) /* The pre HVC helper handles cases when HVC gets trapped * as an undefined insn by runtime configuration. */ - gen_a64_set_pc_im(s->pc - 4); + gen_a64_set_pc_im(s->pc_curr); gen_helper_pre_hvc(cpu_env); gen_ss_advance(s); - gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16), 2); + gen_exception_insn(s, s->base.pc_next, EXCP_HVC, + syn_aa64_hvc(imm16), 2); break; case 3: /* SMC */ if (s->current_el == 0) { unallocated_encoding(s); break; } - gen_a64_set_pc_im(s->pc - 4); + gen_a64_set_pc_im(s->pc_curr); tmp = tcg_const_i32(syn_aa64_smc(imm16)); gen_helper_pre_smc(cpu_env, tmp); tcg_temp_free_i32(tmp); gen_ss_advance(s); - gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16), 3); + gen_exception_insn(s, s->base.pc_next, EXCP_SMC, + syn_aa64_smc(imm16), 3); break; default: unallocated_encoding(s); @@ -2031,7 +1913,7 @@ static void disas_exc(DisasContext *s, uint32_t insn) break; } /* BRK */ - gen_exception_bkpt_insn(s, 4, syn_aa64_bkpt(imm16)); + gen_exception_bkpt_insn(s, syn_aa64_bkpt(imm16)); break; case 2: if (op2_ll != 0) { @@ -2055,7 +1937,7 @@ static void disas_exc(DisasContext *s, uint32_t insn) break; } #endif - gen_exception_internal_insn(s, 0, EXCP_SEMIHOST); + gen_exception_internal_insn(s, s->base.pc_next, EXCP_SEMIHOST); } else { unsupported_encoding(s, insn); } @@ -2148,7 +2030,7 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn) gen_a64_set_pc(s, dst); /* BLR also needs to load return address */ if (opc == 1) { - tcg_gen_movi_i64(cpu_reg(s, 30), s->pc); + tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next); } break; @@ -2175,7 +2057,7 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn) gen_a64_set_pc(s, dst); /* BLRAA also needs to load return address */ if (opc == 9) { - tcg_gen_movi_i64(cpu_reg(s, 30), s->pc); + tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next); } break; @@ -2223,9 +2105,6 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn) gen_helper_exception_return(cpu_env, dst); tcg_temp_free_i64(dst); - if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - } /* Must exit loop to check un-masked IRQs */ s->base.is_jmp = DISAS_EXIT; return; @@ -2319,7 +2198,7 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, TCGv_i64 addr, int size, bool is_pair) { int idx = get_mem_index(s); - TCGMemOp memop = s->be_data; + MemOp memop = s->be_data; g_assert(size <= 3); if (is_pair) { @@ -2734,7 +2613,7 @@ static void disas_ld_lit(DisasContext *s, uint32_t insn) tcg_rt = cpu_reg(s, rt); - clean_addr = tcg_const_i64((s->pc - 4) + imm); + clean_addr = tcg_const_i64(s->pc_curr + imm); if (is_vector) { do_fp_ld(s, rt, clean_addr, size); } else { @@ -3405,7 +3284,7 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) bool is_postidx = extract32(insn, 23, 1); bool is_q = extract32(insn, 30, 1); TCGv_i64 clean_addr, tcg_rn, tcg_ebytes; - TCGMemOp endian = s->be_data; + MemOp endian = s->be_data; int ebytes; /* bytes per element */ int elements; /* elements per vector */ @@ -3713,7 +3592,7 @@ static void disas_pc_rel_adr(DisasContext *s, uint32_t insn) offset = sextract64(insn, 5, 19); offset = offset << 2 | extract32(insn, 29, 2); rd = extract32(insn, 0, 5); - base = s->pc - 4; + base = s->pc_curr; if (page) { /* ADRP (page based) */ @@ -4043,8 +3922,8 @@ static void disas_bitfield(DisasContext *s, uint32_t insn) tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len); return; } - /* opc == 1, BXFIL fall through to deposit */ - tcg_gen_extract_i64(tcg_tmp, tcg_tmp, ri, len); + /* opc == 1, BFXIL fall through to deposit */ + tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri); pos = 0; } else { /* Handle the ri > si case with a deposit @@ -4062,7 +3941,7 @@ static void disas_bitfield(DisasContext *s, uint32_t insn) len = ri; } - if (opc == 1) { /* BFM, BXFIL */ + if (opc == 1) { /* BFM, BFXIL */ tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len); } else { /* SBFM or UBFM: We start with zero, and we haven't modified @@ -4114,25 +3993,27 @@ static void disas_extract(DisasContext *s, uint32_t insn) } else { tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm)); } - } else if (rm == rn) { /* ROR */ + } else { tcg_rm = cpu_reg(s, rm); + tcg_rn = cpu_reg(s, rn); + if (sf) { - tcg_gen_rotri_i64(tcg_rd, tcg_rm, imm); + /* Specialization to ROR happens in EXTRACT2. */ + tcg_gen_extract2_i64(tcg_rd, tcg_rm, tcg_rn, imm); } else { - TCGv_i32 tmp = tcg_temp_new_i32(); - tcg_gen_extrl_i64_i32(tmp, tcg_rm); - tcg_gen_rotri_i32(tmp, tmp, imm); - tcg_gen_extu_i32_i64(tcg_rd, tmp); - tcg_temp_free_i32(tmp); - } - } else { - tcg_rm = read_cpu_reg(s, rm, sf); - tcg_rn = read_cpu_reg(s, rn, sf); - tcg_gen_shri_i64(tcg_rm, tcg_rm, imm); - tcg_gen_shli_i64(tcg_rn, tcg_rn, bitsize - imm); - tcg_gen_or_i64(tcg_rd, tcg_rm, tcg_rn); - if (!sf) { - tcg_gen_ext32u_i64(tcg_rd, tcg_rd); + TCGv_i32 t0 = tcg_temp_new_i32(); + + tcg_gen_extrl_i64_i32(t0, tcg_rm); + if (rm == rn) { + tcg_gen_rotri_i32(t0, t0, imm); + } else { + TCGv_i32 t1 = tcg_temp_new_i32(); + tcg_gen_extrl_i64_i32(t1, tcg_rn); + tcg_gen_extract2_i32(t0, t0, t1, imm); + tcg_temp_free_i32(t1); + } + tcg_gen_extu_i32_i64(tcg_rd, t0); + tcg_temp_free_i32(t0); } } } @@ -5572,7 +5453,7 @@ static void disas_fp_csel(DisasContext *s, uint32_t insn) unsigned int mos, type, rm, cond, rn, rd; TCGv_i64 t_true, t_false, t_zero; DisasCompare64 c; - TCGMemOp sz; + MemOp sz; mos = extract32(insn, 29, 3); type = extract32(insn, 22, 2); @@ -6369,38 +6250,6 @@ static void disas_fp_3src(DisasContext *s, uint32_t insn) } } -/* The imm8 encodes the sign bit, enough bits to represent an exponent in - * the range 01....1xx to 10....0xx, and the most significant 4 bits of - * the mantissa; see VFPExpandImm() in the v8 ARM ARM. - */ -uint64_t vfp_expand_imm(int size, uint8_t imm8) -{ - uint64_t imm; - - switch (size) { - case MO_64: - imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) | - (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) | - extract32(imm8, 0, 6); - imm <<= 48; - break; - case MO_32: - imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) | - (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) | - (extract32(imm8, 0, 6) << 3); - imm <<= 16; - break; - case MO_16: - imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) | - (extract32(imm8, 6, 1) ? 0x3000 : 0x4000) | - (extract32(imm8, 0, 6) << 6); - break; - default: - g_assert_not_reached(); - } - return imm; -} - /* Floating point immediate * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0 * +---+---+---+-----------+------+---+------------+-------+------+------+ @@ -6416,7 +6265,7 @@ static void disas_fp_imm(DisasContext *s, uint32_t insn) int mos = extract32(insn, 29, 3); uint64_t imm; TCGv_i64 tcg_res; - TCGMemOp sz; + MemOp sz; if (mos || imm5) { unallocated_encoding(s); @@ -7179,7 +7028,7 @@ static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn, { if (esize == size) { int element; - TCGMemOp msize = esize == 16 ? MO_16 : MO_32; + MemOp msize = esize == 16 ? MO_16 : MO_32; TCGv_i32 tcg_elem; /* We should have one register left here */ @@ -8171,7 +8020,7 @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q, int shift = (2 * esize) - immhb; int elements = is_scalar ? 1 : (64 / esize); bool round = extract32(opcode, 0, 1); - TCGMemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN); + MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN); TCGv_i64 tcg_rn, tcg_rd, tcg_round; TCGv_i32 tcg_rd_narrowed; TCGv_i64 tcg_final; @@ -8330,7 +8179,7 @@ static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q, } }; NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size]; - TCGMemOp memop = scalar ? size : MO_32; + MemOp memop = scalar ? size : MO_32; int maxpass = scalar ? 1 : is_q ? 4 : 2; for (pass = 0; pass < maxpass; pass++) { @@ -8374,7 +8223,7 @@ static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn, TCGv_ptr tcg_fpst = get_fpstatus_ptr(size == MO_16); TCGv_i32 tcg_shift = NULL; - TCGMemOp mop = size | (is_signed ? MO_SIGN : 0); + MemOp mop = size | (is_signed ? MO_SIGN : 0); int pass; if (fracbits || size == MO_64) { @@ -9468,11 +9317,7 @@ static void handle_2misc_64(DisasContext *s, int opcode, bool u, if (u) { tcg_gen_neg_i64(tcg_rd, tcg_rn); } else { - TCGv_i64 tcg_zero = tcg_const_i64(0); - tcg_gen_neg_i64(tcg_rd, tcg_rn); - tcg_gen_movcond_i64(TCG_COND_GT, tcg_rd, tcg_rn, tcg_zero, - tcg_rn, tcg_rd); - tcg_temp_free_i64(tcg_zero); + tcg_gen_abs_i64(tcg_rd, tcg_rn); } break; case 0x2f: /* FABS */ @@ -10157,7 +10002,7 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u, int dsize = is_q ? 128 : 64; int esize = 8 << size; int elements = dsize/esize; - TCGMemOp memop = size | (is_u ? 0 : MO_SIGN); + MemOp memop = size | (is_u ? 0 : MO_SIGN); TCGv_i64 tcg_rn = new_tmp_a64(s); TCGv_i64 tcg_rd = new_tmp_a64(s); TCGv_i64 tcg_round; @@ -10500,7 +10345,7 @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size, TCGv_i64 tcg_op1 = tcg_temp_new_i64(); TCGv_i64 tcg_op2 = tcg_temp_new_i64(); TCGv_i64 tcg_passres; - TCGMemOp memop = MO_32 | (is_u ? 0 : MO_SIGN); + MemOp memop = MO_32 | (is_u ? 0 : MO_SIGN); int elt = pass + is_q * 2; @@ -10920,13 +10765,13 @@ static void disas_simd_3same_logic(DisasContext *s, uint32_t insn) return; case 5: /* BSL bitwise select */ - gen_gvec_op3(s, is_q, rd, rn, rm, &bsl_op); + gen_gvec_fn4(s, is_q, rd, rd, rn, rm, tcg_gen_gvec_bitsel, 0); return; case 6: /* BIT, bitwise insert if true */ - gen_gvec_op3(s, is_q, rd, rn, rm, &bit_op); + gen_gvec_fn4(s, is_q, rd, rm, rn, rd, tcg_gen_gvec_bitsel, 0); return; case 7: /* BIF, bitwise insert if false */ - gen_gvec_op3(s, is_q, rd, rn, rm, &bif_op); + gen_gvec_fn4(s, is_q, rd, rm, rd, rn, tcg_gen_gvec_bitsel, 0); return; default: @@ -11686,7 +11531,7 @@ static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn) break; default: fprintf(stderr, "%s: insn %#04x, fpop %#2x @ %#" PRIx64 "\n", - __func__, insn, fpopcode, s->pc); + __func__, insn, fpopcode, s->pc_curr); g_assert_not_reached(); } @@ -11980,7 +11825,7 @@ static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u, if (size == 2) { /* 32 + 32 -> 64 op */ - TCGMemOp memop = size + (u ? 0 : MO_SIGN); + MemOp memop = size + (u ? 0 : MO_SIGN); for (pass = 0; pass < maxpass; pass++) { TCGv_i64 tcg_op1 = tcg_temp_new_i64(); @@ -12366,11 +12211,12 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) } break; case 0xb: - if (u) { /* NEG */ + if (u) { /* ABS, NEG */ gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size); - return; + } else { + gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_abs, size); } - break; + return; } if (size == 3) { @@ -12438,17 +12284,6 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op); } break; - case 0xb: /* ABS, NEG */ - if (u) { - tcg_gen_neg_i32(tcg_res, tcg_op); - } else { - TCGv_i32 tcg_zero = tcg_const_i32(0); - tcg_gen_neg_i32(tcg_res, tcg_op); - tcg_gen_movcond_i32(TCG_COND_GT, tcg_res, tcg_op, - tcg_zero, tcg_op, tcg_res); - tcg_temp_free_i32(tcg_zero); - } - break; case 0x2f: /* FABS */ gen_helper_vfp_abss(tcg_res, tcg_op); break; @@ -12561,23 +12396,6 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) tcg_temp_free_i32(tcg_zero); break; } - case 0xb: /* ABS, NEG */ - if (u) { - TCGv_i32 tcg_zero = tcg_const_i32(0); - if (size) { - gen_helper_neon_sub_u16(tcg_res, tcg_zero, tcg_op); - } else { - gen_helper_neon_sub_u8(tcg_res, tcg_zero, tcg_op); - } - tcg_temp_free_i32(tcg_zero); - } else { - if (size) { - gen_helper_neon_abs_s16(tcg_res, tcg_op); - } else { - gen_helper_neon_abs_s8(tcg_res, tcg_op); - } - } - break; case 0x4: /* CLS, CLZ */ if (u) { if (size == 0) { @@ -13029,7 +12847,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) switch (is_fp) { case 1: /* normal fp */ - /* convert insn encoded size to TCGMemOp size */ + /* convert insn encoded size to MemOp size */ switch (size) { case 0: /* half-precision */ size = MO_16; @@ -13077,7 +12895,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) return; } - /* Given TCGMemOp size, adjust register and indexing. */ + /* Given MemOp size, adjust register and indexing. */ switch (size) { case MO_16: index = h << 2 | l << 1 | m; @@ -13374,7 +13192,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) TCGv_i64 tcg_res[2]; int pass; bool satop = extract32(opcode, 0, 1); - TCGMemOp memop = MO_32; + MemOp memop = MO_32; if (satop || !u) { memop |= MO_SIGN; @@ -14163,7 +13981,7 @@ static bool is_guarded_page(CPUARMState *env, DisasContext *s) * table entry even for that case. */ return (tlb_hit(entry->addr_code, addr) && - env->iotlb[mmu_idx][index].attrs.target_tlb_bit0); + env_tlb(env)->d[mmu_idx].iotlb[index].attrs.target_tlb_bit0); #endif } @@ -14224,9 +14042,10 @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s) { uint32_t insn; - insn = arm_ldl_code(env, s->pc, s->sctlr_b); + s->pc_curr = s->base.pc_next; + insn = arm_ldl_code(env, s->base.pc_next, s->sctlr_b); s->insn = insn; - s->pc += 4; + s->base.pc_next += 4; s->fp_access_checked = false; @@ -14257,7 +14076,8 @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s) if (s->btype != 0 && s->guarded_page && !btype_destination_ok(insn, s->bt, s->btype)) { - gen_exception_insn(s, 4, EXCP_UDEF, syn_btitrap(s->btype), + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, + syn_btitrap(s->btype), default_exception_el(s)); return; } @@ -14318,12 +14138,11 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, { DisasContext *dc = container_of(dcbase, DisasContext, base); CPUARMState *env = cpu->env_ptr; - ARMCPU *arm_cpu = arm_env_get_cpu(env); + ARMCPU *arm_cpu = env_archcpu(env); uint32_t tb_flags = dc->base.tb->flags; int bound, core_mmu_idx; dc->isar = &arm_cpu->isar; - dc->pc = dc->base.pc_first; dc->condjmp = 0; dc->aarch64 = 1; @@ -14374,7 +14193,7 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE); dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS); dc->is_ldex = false; - dc->ss_same_el = (arm_debug_target_el(env) == dc->current_el); + dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL); /* Bound the number of insns to execute to those left on the page. */ bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4; @@ -14396,7 +14215,7 @@ static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); - tcg_gen_insn_start(dc->pc, 0, 0); + tcg_gen_insn_start(dc->base.pc_next, 0, 0); dc->insn_start = tcg_last_op(); } @@ -14406,18 +14225,18 @@ static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu, DisasContext *dc = container_of(dcbase, DisasContext, base); if (bp->flags & BP_CPU) { - gen_a64_set_pc_im(dc->pc); + gen_a64_set_pc_im(dc->base.pc_next); gen_helper_check_breakpoints(cpu_env); /* End the TB early; it likely won't be executed */ dc->base.is_jmp = DISAS_TOO_MANY; } else { - gen_exception_internal_insn(dc, 0, EXCP_DEBUG); + gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG); /* The address covered by the breakpoint must be included in [tb->pc, tb->pc + tb->size) in order to for it to be properly cleared -- thus we increment the PC here so that the logic setting tb->size below does the right thing. */ - dc->pc += 4; + dc->base.pc_next += 4; dc->base.is_jmp = DISAS_NORETURN; } @@ -14441,14 +14260,12 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) * bits should be zero. */ assert(dc->base.num_insns == 1); - gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0), - default_exception_el(dc)); + gen_swstep_exception(dc, 0, 0); dc->base.is_jmp = DISAS_NORETURN; } else { disas_a64_insn(env, dc); } - dc->base.pc_next = dc->pc; translator_loop_temp_check(&dc->base); } @@ -14464,7 +14281,7 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) */ switch (dc->base.is_jmp) { default: - gen_a64_set_pc_im(dc->pc); + gen_a64_set_pc_im(dc->base.pc_next); /* fall through */ case DISAS_EXIT: case DISAS_JUMP: @@ -14481,11 +14298,11 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) switch (dc->base.is_jmp) { case DISAS_NEXT: case DISAS_TOO_MANY: - gen_goto_tb(dc, 1, dc->pc); + gen_goto_tb(dc, 1, dc->base.pc_next); break; default: case DISAS_UPDATE: - gen_a64_set_pc_im(dc->pc); + gen_a64_set_pc_im(dc->base.pc_next); /* fall through */ case DISAS_EXIT: tcg_gen_exit_tb(NULL, 0); @@ -14497,11 +14314,11 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) case DISAS_SWI: break; case DISAS_WFE: - gen_a64_set_pc_im(dc->pc); + gen_a64_set_pc_im(dc->base.pc_next); gen_helper_wfe(cpu_env); break; case DISAS_YIELD: - gen_a64_set_pc_im(dc->pc); + gen_a64_set_pc_im(dc->base.pc_next); gen_helper_yield(cpu_env); break; case DISAS_WFI: @@ -14511,7 +14328,7 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) */ TCGv_i32 tmp = tcg_const_i32(4); - gen_a64_set_pc_im(dc->pc); + gen_a64_set_pc_im(dc->base.pc_next); gen_helper_wfi(cpu_env, tmp); tcg_temp_free_i32(tmp); /* The helper doesn't necessarily throw an exception, but we @@ -14522,9 +14339,6 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) } } } - - /* Functions above can change dc->pc, so re-align db->pc_next */ - dc->base.pc_next = dc->pc; } static void aarch64_tr_disas_log(const DisasContextBase *dcbase,