#include "translate.h"
#include "internals.h"
#include "qemu/host-utils.h"
-#include "qemu/qemu-print.h"
-#include "exec/semihost.h"
+#include "hw/semihosting/semihost.h"
#include "exec/gen-icount.h"
#include "exec/helper-proto.h"
typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
-typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, TCGMemOp);
+typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
/* initialize TCG globals. */
void a64_translate_init(void)
s->btype = -1;
}
-void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- uint32_t psr = pstate_read(env);
- int i;
- int el = arm_current_el(env);
- const char *ns_status;
-
- qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
- for (i = 0; i < 32; i++) {
- if (i == 31) {
- qemu_fprintf(f, " SP=%016" PRIx64 "\n", env->xregs[i]);
- } else {
- qemu_fprintf(f, "X%02d=%016" PRIx64 "%s", i, env->xregs[i],
- (i + 2) % 3 ? " " : "\n");
- }
- }
-
- if (arm_feature(env, ARM_FEATURE_EL3) && el != 3) {
- ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
- } else {
- ns_status = "";
- }
- qemu_fprintf(f, "PSTATE=%08x %c%c%c%c %sEL%d%c",
- psr,
- psr & PSTATE_N ? 'N' : '-',
- psr & PSTATE_Z ? 'Z' : '-',
- psr & PSTATE_C ? 'C' : '-',
- psr & PSTATE_V ? 'V' : '-',
- ns_status,
- el,
- psr & PSTATE_SP ? 'h' : 't');
-
- if (cpu_isar_feature(aa64_bti, cpu)) {
- qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10);
- }
- if (!(flags & CPU_DUMP_FPU)) {
- qemu_fprintf(f, "\n");
- return;
- }
- if (fp_exception_el(env, el) != 0) {
- qemu_fprintf(f, " FPU disabled\n");
- return;
- }
- qemu_fprintf(f, " FPCR=%08x FPSR=%08x\n",
- vfp_get_fpcr(env), vfp_get_fpsr(env));
-
- if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) {
- int j, zcr_len = sve_zcr_len_for_el(env, el);
-
- for (i = 0; i <= FFR_PRED_NUM; i++) {
- bool eol;
- if (i == FFR_PRED_NUM) {
- qemu_fprintf(f, "FFR=");
- /* It's last, so end the line. */
- eol = true;
- } else {
- qemu_fprintf(f, "P%02d=", i);
- switch (zcr_len) {
- case 0:
- eol = i % 8 == 7;
- break;
- case 1:
- eol = i % 6 == 5;
- break;
- case 2:
- case 3:
- eol = i % 3 == 2;
- break;
- default:
- /* More than one quadword per predicate. */
- eol = true;
- break;
- }
- }
- for (j = zcr_len / 4; j >= 0; j--) {
- int digits;
- if (j * 4 + 4 <= zcr_len + 1) {
- digits = 16;
- } else {
- digits = (zcr_len % 4 + 1) * 4;
- }
- qemu_fprintf(f, "%0*" PRIx64 "%s", digits,
- env->vfp.pregs[i].p[j],
- j ? ":" : eol ? "\n" : " ");
- }
- }
-
- for (i = 0; i < 32; i++) {
- if (zcr_len == 0) {
- qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64 "%s",
- i, env->vfp.zregs[i].d[1],
- env->vfp.zregs[i].d[0], i & 1 ? "\n" : " ");
- } else if (zcr_len == 1) {
- qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64
- ":%016" PRIx64 ":%016" PRIx64 "\n",
- i, env->vfp.zregs[i].d[3], env->vfp.zregs[i].d[2],
- env->vfp.zregs[i].d[1], env->vfp.zregs[i].d[0]);
- } else {
- for (j = zcr_len; j >= 0; j--) {
- bool odd = (zcr_len - j) % 2 != 0;
- if (j == zcr_len) {
- qemu_fprintf(f, "Z%02d[%x-%x]=", i, j, j - 1);
- } else if (!odd) {
- if (j > 0) {
- qemu_fprintf(f, " [%x-%x]=", j, j - 1);
- } else {
- qemu_fprintf(f, " [%x]=", j);
- }
- }
- qemu_fprintf(f, "%016" PRIx64 ":%016" PRIx64 "%s",
- env->vfp.zregs[i].d[j * 2 + 1],
- env->vfp.zregs[i].d[j * 2],
- odd || j == 0 ? "\n" : ":");
- }
- }
- }
- } else {
- for (i = 0; i < 32; i++) {
- uint64_t *q = aa64_vfp_qreg(env, i);
- qemu_fprintf(f, "Q%02d=%016" PRIx64 ":%016" PRIx64 "%s",
- i, q[1], q[0], (i & 1 ? "\n" : " "));
- }
- }
-}
-
void gen_a64_set_pc_im(uint64_t val)
{
tcg_gen_movi_i64(cpu_pc, val);
tcg_temp_free_i32(tcg_excp);
}
-static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
+static void gen_exception_internal_insn(DisasContext *s, uint64_t pc, int excp)
{
- TCGv_i32 tcg_excp = tcg_const_i32(excp);
- TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
- TCGv_i32 tcg_el = tcg_const_i32(target_el);
-
- gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
- tcg_syn, tcg_el);
- tcg_temp_free_i32(tcg_el);
- tcg_temp_free_i32(tcg_syn);
- tcg_temp_free_i32(tcg_excp);
-}
-
-static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
-{
- gen_a64_set_pc_im(s->pc - offset);
+ gen_a64_set_pc_im(pc);
gen_exception_internal(excp);
s->base.is_jmp = DISAS_NORETURN;
}
-static void gen_exception_insn(DisasContext *s, int offset, int excp,
+static void gen_exception_insn(DisasContext *s, uint64_t pc, int excp,
uint32_t syndrome, uint32_t target_el)
{
- gen_a64_set_pc_im(s->pc - offset);
+ gen_a64_set_pc_im(pc);
gen_exception(excp, syndrome, target_el);
s->base.is_jmp = DISAS_NORETURN;
}
-static void gen_exception_bkpt_insn(DisasContext *s, int offset,
- uint32_t syndrome)
+static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome)
{
TCGv_i32 tcg_syn;
- gen_a64_set_pc_im(s->pc - offset);
+ gen_a64_set_pc_im(s->pc_curr);
tcg_syn = tcg_const_i32(syndrome);
gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
tcg_temp_free_i32(tcg_syn);
* of the exception, and our syndrome information is always correct.
*/
gen_ss_advance(s);
- gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
- default_exception_el(s));
+ gen_swstep_exception(s, 1, s->is_ldex);
s->base.is_jmp = DISAS_NORETURN;
}
void unallocated_encoding(DisasContext *s)
{
/* Unallocated and reserved encodings are uncategorized */
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
default_exception_el(s));
}
* Dn, Sn, Hn or Bn).
* (Note that this is not the same mapping as for A32; see cpu.h)
*/
-static inline int fp_reg_offset(DisasContext *s, int regno, TCGMemOp size)
+static inline int fp_reg_offset(DisasContext *s, int regno, MemOp size)
{
return vec_reg_offset(s, regno, 0, size);
}
vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s));
}
+/* Expand a 4-operand AdvSIMD vector operation using an expander function. */
+static void gen_gvec_fn4(DisasContext *s, bool is_q, int rd, int rn, int rm,
+ int rx, GVecGen4Fn *gvec_fn, int vece)
+{
+ gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm), vec_full_reg_offset(s, rx),
+ is_q ? 16 : 8, vec_full_reg_size(s));
+}
+
/* Expand a 2-operand + immediate AdvSIMD vector operation using
* an op descriptor.
*/
bool iss_valid, unsigned int iss_srt,
bool iss_sf, bool iss_ar)
{
- TCGMemOp memop = s->be_data + size;
+ MemOp memop = s->be_data + size;
g_assert(size <= 3);
TCGv_i64 tmphi;
if (size < 4) {
- TCGMemOp memop = s->be_data + size;
+ MemOp memop = s->be_data + size;
tmphi = tcg_const_i64(0);
tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
} else {
/* Get value of an element within a vector register */
static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
- int element, TCGMemOp memop)
+ int element, MemOp memop)
{
int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
switch (memop) {
}
static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
- int element, TCGMemOp memop)
+ int element, MemOp memop)
{
int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
switch (memop) {
/* Set value of an element within a vector register */
static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
- int element, TCGMemOp memop)
+ int element, MemOp memop)
{
int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
switch (memop) {
}
static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
- int destidx, int element, TCGMemOp memop)
+ int destidx, int element, MemOp memop)
{
int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
switch (memop) {
/* Store from vector register to memory */
static void do_vec_st(DisasContext *s, int srcidx, int element,
- TCGv_i64 tcg_addr, int size, TCGMemOp endian)
+ TCGv_i64 tcg_addr, int size, MemOp endian)
{
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
/* Load from memory to vector register */
static void do_vec_ld(DisasContext *s, int destidx, int element,
- TCGv_i64 tcg_addr, int size, TCGMemOp endian)
+ TCGv_i64 tcg_addr, int size, MemOp endian)
{
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
return true;
}
- gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false),
- s->fp_excp_el);
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
+ syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
return false;
}
bool sve_access_check(DisasContext *s)
{
if (s->sve_excp_el) {
- gen_exception_insn(s, 4, EXCP_UDEF, syn_sve_access_trap(),
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_sve_access_trap(),
s->sve_excp_el);
return false;
}
*/
static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
{
- uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
+ uint64_t addr = s->pc_curr + sextract32(insn, 0, 26) * 4;
if (insn & (1U << 31)) {
/* BL Branch with link */
- tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
+ tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
}
/* B Branch / BL Branch with link */
sf = extract32(insn, 31, 1);
op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
rt = extract32(insn, 0, 5);
- addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
+ addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
tcg_cmp = read_cpu_reg(s, rt, sf);
label_match = gen_new_label();
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
tcg_cmp, 0, label_match);
- gen_goto_tb(s, 0, s->pc);
+ gen_goto_tb(s, 0, s->base.pc_next);
gen_set_label(label_match);
gen_goto_tb(s, 1, addr);
}
bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
- addr = s->pc + sextract32(insn, 5, 14) * 4 - 4;
+ addr = s->pc_curr + sextract32(insn, 5, 14) * 4;
rt = extract32(insn, 0, 5);
tcg_cmp = tcg_temp_new_i64();
tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
tcg_cmp, 0, label_match);
tcg_temp_free_i64(tcg_cmp);
- gen_goto_tb(s, 0, s->pc);
+ gen_goto_tb(s, 0, s->base.pc_next);
gen_set_label(label_match);
gen_goto_tb(s, 1, addr);
}
unallocated_encoding(s);
return;
}
- addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
+ addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
cond = extract32(insn, 0, 4);
reset_btype(s);
/* genuinely conditional branches */
TCGLabel *label_match = gen_new_label();
arm_gen_test_cc(cond, label_match);
- gen_goto_tb(s, 0, s->pc);
+ gen_goto_tb(s, 0, s->base.pc_next);
gen_set_label(label_match);
gen_goto_tb(s, 1, addr);
} else {
* any pending interrupts immediately.
*/
reset_btype(s);
- gen_goto_tb(s, 0, s->pc);
+ gen_goto_tb(s, 0, s->base.pc_next);
return;
case 7: /* SB */
* MB and end the TB instead.
*/
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
- gen_goto_tb(s, 0, s->pc);
+ gen_goto_tb(s, 0, s->base.pc_next);
return;
default:
TCGv_i32 tcg_syn, tcg_isread;
uint32_t syndrome;
- gen_a64_set_pc_im(s->pc - 4);
+ gen_a64_set_pc_im(s->pc_curr);
tmpptr = tcg_const_ptr(ri);
syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
tcg_syn = tcg_const_i32(syndrome);
tcg_temp_free_ptr(tmpptr);
tcg_temp_free_i32(tcg_syn);
tcg_temp_free_i32(tcg_isread);
+ } else if (ri->type & ARM_CP_RAISES_EXC) {
+ /*
+ * The readfn or writefn might raise an exception;
+ * synchronize the CPU state in case it does.
+ */
+ gen_a64_set_pc_im(s->pc_curr);
}
/* Handle special cases first */
if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
/* I/O operations must end the TB here (whether read or write) */
- gen_io_end();
s->base.is_jmp = DISAS_UPDATE;
- } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
- /* We default to ending the TB on a coprocessor register write,
+ }
+ if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
+ /*
+ * A write to any coprocessor regiser that ends a TB
+ * must rebuild the hflags for the next TB.
+ */
+ TCGv_i32 tcg_el = tcg_const_i32(s->current_el);
+ gen_helper_rebuild_hflags_a64(cpu_env, tcg_el);
+ tcg_temp_free_i32(tcg_el);
+ /*
+ * We default to ending the TB on a coprocessor register write,
* but allow this to be suppressed by the register definition
* (usually only necessary to work around guest bugs).
*/
switch (op2_ll) {
case 1: /* SVC */
gen_ss_advance(s);
- gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16),
- default_exception_el(s));
+ gen_exception_insn(s, s->base.pc_next, EXCP_SWI,
+ syn_aa64_svc(imm16), default_exception_el(s));
break;
case 2: /* HVC */
if (s->current_el == 0) {
/* The pre HVC helper handles cases when HVC gets trapped
* as an undefined insn by runtime configuration.
*/
- gen_a64_set_pc_im(s->pc - 4);
+ gen_a64_set_pc_im(s->pc_curr);
gen_helper_pre_hvc(cpu_env);
gen_ss_advance(s);
- gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16), 2);
+ gen_exception_insn(s, s->base.pc_next, EXCP_HVC,
+ syn_aa64_hvc(imm16), 2);
break;
case 3: /* SMC */
if (s->current_el == 0) {
unallocated_encoding(s);
break;
}
- gen_a64_set_pc_im(s->pc - 4);
+ gen_a64_set_pc_im(s->pc_curr);
tmp = tcg_const_i32(syn_aa64_smc(imm16));
gen_helper_pre_smc(cpu_env, tmp);
tcg_temp_free_i32(tmp);
gen_ss_advance(s);
- gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16), 3);
+ gen_exception_insn(s, s->base.pc_next, EXCP_SMC,
+ syn_aa64_smc(imm16), 3);
break;
default:
unallocated_encoding(s);
break;
}
/* BRK */
- gen_exception_bkpt_insn(s, 4, syn_aa64_bkpt(imm16));
+ gen_exception_bkpt_insn(s, syn_aa64_bkpt(imm16));
break;
case 2:
if (op2_ll != 0) {
break;
}
#endif
- gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
+ gen_exception_internal_insn(s, s->base.pc_next, EXCP_SEMIHOST);
} else {
unsupported_encoding(s, insn);
}
gen_a64_set_pc(s, dst);
/* BLR also needs to load return address */
if (opc == 1) {
- tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
+ tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
}
break;
gen_a64_set_pc(s, dst);
/* BLRAA also needs to load return address */
if (opc == 9) {
- tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
+ tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
}
break;
gen_helper_exception_return(cpu_env, dst);
tcg_temp_free_i64(dst);
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
- }
/* Must exit loop to check un-masked IRQs */
s->base.is_jmp = DISAS_EXIT;
return;
TCGv_i64 addr, int size, bool is_pair)
{
int idx = get_mem_index(s);
- TCGMemOp memop = s->be_data;
+ MemOp memop = s->be_data;
g_assert(size <= 3);
if (is_pair) {
tcg_rt = cpu_reg(s, rt);
- clean_addr = tcg_const_i64((s->pc - 4) + imm);
+ clean_addr = tcg_const_i64(s->pc_curr + imm);
if (is_vector) {
do_fp_ld(s, rt, clean_addr, size);
} else {
bool is_postidx = extract32(insn, 23, 1);
bool is_q = extract32(insn, 30, 1);
TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
- TCGMemOp endian = s->be_data;
+ MemOp endian = s->be_data;
int ebytes; /* bytes per element */
int elements; /* elements per vector */
offset = sextract64(insn, 5, 19);
offset = offset << 2 | extract32(insn, 29, 2);
rd = extract32(insn, 0, 5);
- base = s->pc - 4;
+ base = s->pc_curr;
if (page) {
/* ADRP (page based) */
tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len);
return;
}
- /* opc == 1, BXFIL fall through to deposit */
- tcg_gen_extract_i64(tcg_tmp, tcg_tmp, ri, len);
+ /* opc == 1, BFXIL fall through to deposit */
+ tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
pos = 0;
} else {
/* Handle the ri > si case with a deposit
len = ri;
}
- if (opc == 1) { /* BFM, BXFIL */
+ if (opc == 1) { /* BFM, BFXIL */
tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
} else {
/* SBFM or UBFM: We start with zero, and we haven't modified
} else {
tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
}
- } else if (rm == rn) { /* ROR */
+ } else {
tcg_rm = cpu_reg(s, rm);
+ tcg_rn = cpu_reg(s, rn);
+
if (sf) {
- tcg_gen_rotri_i64(tcg_rd, tcg_rm, imm);
+ /* Specialization to ROR happens in EXTRACT2. */
+ tcg_gen_extract2_i64(tcg_rd, tcg_rm, tcg_rn, imm);
} else {
- TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_extrl_i64_i32(tmp, tcg_rm);
- tcg_gen_rotri_i32(tmp, tmp, imm);
- tcg_gen_extu_i32_i64(tcg_rd, tmp);
- tcg_temp_free_i32(tmp);
- }
- } else {
- tcg_rm = read_cpu_reg(s, rm, sf);
- tcg_rn = read_cpu_reg(s, rn, sf);
- tcg_gen_shri_i64(tcg_rm, tcg_rm, imm);
- tcg_gen_shli_i64(tcg_rn, tcg_rn, bitsize - imm);
- tcg_gen_or_i64(tcg_rd, tcg_rm, tcg_rn);
- if (!sf) {
- tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ TCGv_i32 t0 = tcg_temp_new_i32();
+
+ tcg_gen_extrl_i64_i32(t0, tcg_rm);
+ if (rm == rn) {
+ tcg_gen_rotri_i32(t0, t0, imm);
+ } else {
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(t1, tcg_rn);
+ tcg_gen_extract2_i32(t0, t0, t1, imm);
+ tcg_temp_free_i32(t1);
+ }
+ tcg_gen_extu_i32_i64(tcg_rd, t0);
+ tcg_temp_free_i32(t0);
}
}
}
unsigned int mos, type, rm, cond, rn, rd;
TCGv_i64 t_true, t_false, t_zero;
DisasCompare64 c;
- TCGMemOp sz;
+ MemOp sz;
mos = extract32(insn, 29, 3);
type = extract32(insn, 22, 2);
}
}
-/* The imm8 encodes the sign bit, enough bits to represent an exponent in
- * the range 01....1xx to 10....0xx, and the most significant 4 bits of
- * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
- */
-uint64_t vfp_expand_imm(int size, uint8_t imm8)
-{
- uint64_t imm;
-
- switch (size) {
- case MO_64:
- imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
- (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
- extract32(imm8, 0, 6);
- imm <<= 48;
- break;
- case MO_32:
- imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
- (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
- (extract32(imm8, 0, 6) << 3);
- imm <<= 16;
- break;
- case MO_16:
- imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
- (extract32(imm8, 6, 1) ? 0x3000 : 0x4000) |
- (extract32(imm8, 0, 6) << 6);
- break;
- default:
- g_assert_not_reached();
- }
- return imm;
-}
-
/* Floating point immediate
* 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
* +---+---+---+-----------+------+---+------------+-------+------+------+
int mos = extract32(insn, 29, 3);
uint64_t imm;
TCGv_i64 tcg_res;
- TCGMemOp sz;
+ MemOp sz;
if (mos || imm5) {
unallocated_encoding(s);
{
if (esize == size) {
int element;
- TCGMemOp msize = esize == 16 ? MO_16 : MO_32;
+ MemOp msize = esize == 16 ? MO_16 : MO_32;
TCGv_i32 tcg_elem;
/* We should have one register left here */
int shift = (2 * esize) - immhb;
int elements = is_scalar ? 1 : (64 / esize);
bool round = extract32(opcode, 0, 1);
- TCGMemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
+ MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
TCGv_i64 tcg_rn, tcg_rd, tcg_round;
TCGv_i32 tcg_rd_narrowed;
TCGv_i64 tcg_final;
}
};
NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
- TCGMemOp memop = scalar ? size : MO_32;
+ MemOp memop = scalar ? size : MO_32;
int maxpass = scalar ? 1 : is_q ? 4 : 2;
for (pass = 0; pass < maxpass; pass++) {
TCGv_ptr tcg_fpst = get_fpstatus_ptr(size == MO_16);
TCGv_i32 tcg_shift = NULL;
- TCGMemOp mop = size | (is_signed ? MO_SIGN : 0);
+ MemOp mop = size | (is_signed ? MO_SIGN : 0);
int pass;
if (fracbits || size == MO_64) {
if (u) {
tcg_gen_neg_i64(tcg_rd, tcg_rn);
} else {
- TCGv_i64 tcg_zero = tcg_const_i64(0);
- tcg_gen_neg_i64(tcg_rd, tcg_rn);
- tcg_gen_movcond_i64(TCG_COND_GT, tcg_rd, tcg_rn, tcg_zero,
- tcg_rn, tcg_rd);
- tcg_temp_free_i64(tcg_zero);
+ tcg_gen_abs_i64(tcg_rd, tcg_rn);
}
break;
case 0x2f: /* FABS */
int dsize = is_q ? 128 : 64;
int esize = 8 << size;
int elements = dsize/esize;
- TCGMemOp memop = size | (is_u ? 0 : MO_SIGN);
+ MemOp memop = size | (is_u ? 0 : MO_SIGN);
TCGv_i64 tcg_rn = new_tmp_a64(s);
TCGv_i64 tcg_rd = new_tmp_a64(s);
TCGv_i64 tcg_round;
TCGv_i64 tcg_op1 = tcg_temp_new_i64();
TCGv_i64 tcg_op2 = tcg_temp_new_i64();
TCGv_i64 tcg_passres;
- TCGMemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
+ MemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
int elt = pass + is_q * 2;
return;
case 5: /* BSL bitwise select */
- gen_gvec_op3(s, is_q, rd, rn, rm, &bsl_op);
+ gen_gvec_fn4(s, is_q, rd, rd, rn, rm, tcg_gen_gvec_bitsel, 0);
return;
case 6: /* BIT, bitwise insert if true */
- gen_gvec_op3(s, is_q, rd, rn, rm, &bit_op);
+ gen_gvec_fn4(s, is_q, rd, rm, rn, rd, tcg_gen_gvec_bitsel, 0);
return;
case 7: /* BIF, bitwise insert if false */
- gen_gvec_op3(s, is_q, rd, rn, rm, &bif_op);
+ gen_gvec_fn4(s, is_q, rd, rm, rd, rn, tcg_gen_gvec_bitsel, 0);
return;
default:
break;
default:
fprintf(stderr, "%s: insn %#04x, fpop %#2x @ %#" PRIx64 "\n",
- __func__, insn, fpopcode, s->pc);
+ __func__, insn, fpopcode, s->pc_curr);
g_assert_not_reached();
}
if (size == 2) {
/* 32 + 32 -> 64 op */
- TCGMemOp memop = size + (u ? 0 : MO_SIGN);
+ MemOp memop = size + (u ? 0 : MO_SIGN);
for (pass = 0; pass < maxpass; pass++) {
TCGv_i64 tcg_op1 = tcg_temp_new_i64();
}
break;
case 0xb:
- if (u) { /* NEG */
+ if (u) { /* ABS, NEG */
gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size);
- return;
+ } else {
+ gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_abs, size);
}
- break;
+ return;
}
if (size == 3) {
gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
}
break;
- case 0xb: /* ABS, NEG */
- if (u) {
- tcg_gen_neg_i32(tcg_res, tcg_op);
- } else {
- TCGv_i32 tcg_zero = tcg_const_i32(0);
- tcg_gen_neg_i32(tcg_res, tcg_op);
- tcg_gen_movcond_i32(TCG_COND_GT, tcg_res, tcg_op,
- tcg_zero, tcg_op, tcg_res);
- tcg_temp_free_i32(tcg_zero);
- }
- break;
case 0x2f: /* FABS */
gen_helper_vfp_abss(tcg_res, tcg_op);
break;
tcg_temp_free_i32(tcg_zero);
break;
}
- case 0xb: /* ABS, NEG */
- if (u) {
- TCGv_i32 tcg_zero = tcg_const_i32(0);
- if (size) {
- gen_helper_neon_sub_u16(tcg_res, tcg_zero, tcg_op);
- } else {
- gen_helper_neon_sub_u8(tcg_res, tcg_zero, tcg_op);
- }
- tcg_temp_free_i32(tcg_zero);
- } else {
- if (size) {
- gen_helper_neon_abs_s16(tcg_res, tcg_op);
- } else {
- gen_helper_neon_abs_s8(tcg_res, tcg_op);
- }
- }
- break;
case 0x4: /* CLS, CLZ */
if (u) {
if (size == 0) {
switch (is_fp) {
case 1: /* normal fp */
- /* convert insn encoded size to TCGMemOp size */
+ /* convert insn encoded size to MemOp size */
switch (size) {
case 0: /* half-precision */
size = MO_16;
return;
}
- /* Given TCGMemOp size, adjust register and indexing. */
+ /* Given MemOp size, adjust register and indexing. */
switch (size) {
case MO_16:
index = h << 2 | l << 1 | m;
TCGv_i64 tcg_res[2];
int pass;
bool satop = extract32(opcode, 0, 1);
- TCGMemOp memop = MO_32;
+ MemOp memop = MO_32;
if (satop || !u) {
memop |= MO_SIGN;
* table entry even for that case.
*/
return (tlb_hit(entry->addr_code, addr) &&
- env->iotlb[mmu_idx][index].attrs.target_tlb_bit0);
+ env_tlb(env)->d[mmu_idx].iotlb[index].attrs.target_tlb_bit0);
#endif
}
{
uint32_t insn;
- insn = arm_ldl_code(env, s->pc, s->sctlr_b);
+ s->pc_curr = s->base.pc_next;
+ insn = arm_ldl_code(env, s->base.pc_next, s->sctlr_b);
s->insn = insn;
- s->pc += 4;
+ s->base.pc_next += 4;
s->fp_access_checked = false;
if (s->btype != 0
&& s->guarded_page
&& !btype_destination_ok(insn, s->bt, s->btype)) {
- gen_exception_insn(s, 4, EXCP_UDEF, syn_btitrap(s->btype),
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
+ syn_btitrap(s->btype),
default_exception_el(s));
return;
}
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
CPUARMState *env = cpu->env_ptr;
- ARMCPU *arm_cpu = arm_env_get_cpu(env);
+ ARMCPU *arm_cpu = env_archcpu(env);
uint32_t tb_flags = dc->base.tb->flags;
int bound, core_mmu_idx;
dc->isar = &arm_cpu->isar;
- dc->pc = dc->base.pc_first;
dc->condjmp = 0;
dc->aarch64 = 1;
dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
dc->is_ldex = false;
- dc->ss_same_el = (arm_debug_target_el(env) == dc->current_el);
+ dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
/* Bound the number of insns to execute to those left on the page. */
bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- tcg_gen_insn_start(dc->pc, 0, 0);
+ tcg_gen_insn_start(dc->base.pc_next, 0, 0);
dc->insn_start = tcg_last_op();
}
DisasContext *dc = container_of(dcbase, DisasContext, base);
if (bp->flags & BP_CPU) {
- gen_a64_set_pc_im(dc->pc);
+ gen_a64_set_pc_im(dc->base.pc_next);
gen_helper_check_breakpoints(cpu_env);
/* End the TB early; it likely won't be executed */
dc->base.is_jmp = DISAS_TOO_MANY;
} else {
- gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
+ gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
/* The address covered by the breakpoint must be
included in [tb->pc, tb->pc + tb->size) in order
to for it to be properly cleared -- thus we
increment the PC here so that the logic setting
tb->size below does the right thing. */
- dc->pc += 4;
+ dc->base.pc_next += 4;
dc->base.is_jmp = DISAS_NORETURN;
}
* bits should be zero.
*/
assert(dc->base.num_insns == 1);
- gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
- default_exception_el(dc));
+ gen_swstep_exception(dc, 0, 0);
dc->base.is_jmp = DISAS_NORETURN;
} else {
disas_a64_insn(env, dc);
}
- dc->base.pc_next = dc->pc;
translator_loop_temp_check(&dc->base);
}
*/
switch (dc->base.is_jmp) {
default:
- gen_a64_set_pc_im(dc->pc);
+ gen_a64_set_pc_im(dc->base.pc_next);
/* fall through */
case DISAS_EXIT:
case DISAS_JUMP:
switch (dc->base.is_jmp) {
case DISAS_NEXT:
case DISAS_TOO_MANY:
- gen_goto_tb(dc, 1, dc->pc);
+ gen_goto_tb(dc, 1, dc->base.pc_next);
break;
default:
case DISAS_UPDATE:
- gen_a64_set_pc_im(dc->pc);
+ gen_a64_set_pc_im(dc->base.pc_next);
/* fall through */
case DISAS_EXIT:
tcg_gen_exit_tb(NULL, 0);
case DISAS_SWI:
break;
case DISAS_WFE:
- gen_a64_set_pc_im(dc->pc);
+ gen_a64_set_pc_im(dc->base.pc_next);
gen_helper_wfe(cpu_env);
break;
case DISAS_YIELD:
- gen_a64_set_pc_im(dc->pc);
+ gen_a64_set_pc_im(dc->base.pc_next);
gen_helper_yield(cpu_env);
break;
case DISAS_WFI:
*/
TCGv_i32 tmp = tcg_const_i32(4);
- gen_a64_set_pc_im(dc->pc);
+ gen_a64_set_pc_im(dc->base.pc_next);
gen_helper_wfi(cpu_env, tmp);
tcg_temp_free_i32(tmp);
/* The helper doesn't necessarily throw an exception, but we
}
}
}
-
- /* Functions above can change dc->pc, so re-align db->pc_next */
- dc->base.pc_next = dc->pc;
}
static void aarch64_tr_disas_log(const DisasContextBase *dcbase,