#include "exec/log.h"
#include "trace-tcg.h"
+#include "translate-a64.h"
+#include "qemu/atomic128.h"
static TCGv_i64 cpu_X[32];
static TCGv_i64 cpu_pc;
/* Load/store exclusive handling */
static TCGv_i64 cpu_exclusive_high;
-static TCGv_i64 cpu_reg(DisasContext *s, int reg);
static const char *regnames[] = {
"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, TCGMemOp);
-/* Note that the gvec expanders operate on offsets + sizes. */
-typedef void GVecGen2Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t);
-typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
- uint32_t, uint32_t);
-typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
- uint32_t, uint32_t, uint32_t);
-
/* initialize TCG globals. */
void a64_translate_init(void)
{
int el = arm_current_el(env);
const char *ns_status;
- cpu_fprintf(f, "PC=%016"PRIx64" SP=%016"PRIx64"\n",
- env->pc, env->xregs[31]);
- for (i = 0; i < 31; i++) {
- cpu_fprintf(f, "X%02d=%016"PRIx64, i, env->xregs[i]);
- if ((i % 4) == 3) {
- cpu_fprintf(f, "\n");
+ cpu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
+ for (i = 0; i < 32; i++) {
+ if (i == 31) {
+ cpu_fprintf(f, " SP=%016" PRIx64 "\n", env->xregs[i]);
} else {
- cpu_fprintf(f, " ");
+ cpu_fprintf(f, "X%02d=%016" PRIx64 "%s", i, env->xregs[i],
+ (i + 2) % 3 ? " " : "\n");
}
}
} else {
ns_status = "";
}
-
- cpu_fprintf(f, "\nPSTATE=%08x %c%c%c%c %sEL%d%c\n",
+ cpu_fprintf(f, "PSTATE=%08x %c%c%c%c %sEL%d%c",
psr,
psr & PSTATE_N ? 'N' : '-',
psr & PSTATE_Z ? 'Z' : '-',
el,
psr & PSTATE_SP ? 'h' : 't');
- if (flags & CPU_DUMP_FPU) {
- int numvfpregs = 32;
- for (i = 0; i < numvfpregs; i++) {
+ if (!(flags & CPU_DUMP_FPU)) {
+ cpu_fprintf(f, "\n");
+ return;
+ }
+ if (fp_exception_el(env, el) != 0) {
+ cpu_fprintf(f, " FPU disabled\n");
+ return;
+ }
+ cpu_fprintf(f, " FPCR=%08x FPSR=%08x\n",
+ vfp_get_fpcr(env), vfp_get_fpsr(env));
+
+ if (arm_feature(env, ARM_FEATURE_SVE) && sve_exception_el(env, el) == 0) {
+ int j, zcr_len = sve_zcr_len_for_el(env, el);
+
+ for (i = 0; i <= FFR_PRED_NUM; i++) {
+ bool eol;
+ if (i == FFR_PRED_NUM) {
+ cpu_fprintf(f, "FFR=");
+ /* It's last, so end the line. */
+ eol = true;
+ } else {
+ cpu_fprintf(f, "P%02d=", i);
+ switch (zcr_len) {
+ case 0:
+ eol = i % 8 == 7;
+ break;
+ case 1:
+ eol = i % 6 == 5;
+ break;
+ case 2:
+ case 3:
+ eol = i % 3 == 2;
+ break;
+ default:
+ /* More than one quadword per predicate. */
+ eol = true;
+ break;
+ }
+ }
+ for (j = zcr_len / 4; j >= 0; j--) {
+ int digits;
+ if (j * 4 + 4 <= zcr_len + 1) {
+ digits = 16;
+ } else {
+ digits = (zcr_len % 4 + 1) * 4;
+ }
+ cpu_fprintf(f, "%0*" PRIx64 "%s", digits,
+ env->vfp.pregs[i].p[j],
+ j ? ":" : eol ? "\n" : " ");
+ }
+ }
+
+ for (i = 0; i < 32; i++) {
+ if (zcr_len == 0) {
+ cpu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64 "%s",
+ i, env->vfp.zregs[i].d[1],
+ env->vfp.zregs[i].d[0], i & 1 ? "\n" : " ");
+ } else if (zcr_len == 1) {
+ cpu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64
+ ":%016" PRIx64 ":%016" PRIx64 "\n",
+ i, env->vfp.zregs[i].d[3], env->vfp.zregs[i].d[2],
+ env->vfp.zregs[i].d[1], env->vfp.zregs[i].d[0]);
+ } else {
+ for (j = zcr_len; j >= 0; j--) {
+ bool odd = (zcr_len - j) % 2 != 0;
+ if (j == zcr_len) {
+ cpu_fprintf(f, "Z%02d[%x-%x]=", i, j, j - 1);
+ } else if (!odd) {
+ if (j > 0) {
+ cpu_fprintf(f, " [%x-%x]=", j, j - 1);
+ } else {
+ cpu_fprintf(f, " [%x]=", j);
+ }
+ }
+ cpu_fprintf(f, "%016" PRIx64 ":%016" PRIx64 "%s",
+ env->vfp.zregs[i].d[j * 2 + 1],
+ env->vfp.zregs[i].d[j * 2],
+ odd || j == 0 ? "\n" : ":");
+ }
+ }
+ }
+ } else {
+ for (i = 0; i < 32; i++) {
uint64_t *q = aa64_vfp_qreg(env, i);
- uint64_t vlo = q[0];
- uint64_t vhi = q[1];
- cpu_fprintf(f, "q%02d=%016" PRIx64 ":%016" PRIx64 "%c",
- i, vhi, vlo, (i & 1 ? '\n' : ' '));
+ cpu_fprintf(f, "Q%02d=%016" PRIx64 ":%016" PRIx64 "%s",
+ i, q[1], q[0], (i & 1 ? "\n" : " "));
}
- cpu_fprintf(f, "FPCR: %08x FPSR: %08x\n",
- vfp_get_fpcr(env), vfp_get_fpsr(env));
}
}
if (use_goto_tb(s, n, dest)) {
tcg_gen_goto_tb(n);
gen_a64_set_pc_im(dest);
- tcg_gen_exit_tb((intptr_t)tb + n);
+ tcg_gen_exit_tb(tb, n);
s->base.is_jmp = DISAS_NORETURN;
} else {
gen_a64_set_pc_im(dest);
}
}
-static void unallocated_encoding(DisasContext *s)
+void unallocated_encoding(DisasContext *s)
{
/* Unallocated and reserved encodings are uncategorized */
gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
default_exception_el(s));
}
-#define unsupported_encoding(s, insn) \
- do { \
- qemu_log_mask(LOG_UNIMP, \
- "%s:%d: unsupported instruction encoding 0x%08x " \
- "at pc=%016" PRIx64 "\n", \
- __FILE__, __LINE__, insn, s->pc - 4); \
- unallocated_encoding(s); \
- } while (0)
-
static void init_tmp_a64_array(DisasContext *s)
{
#ifdef CONFIG_DEBUG_TCG
init_tmp_a64_array(s);
}
-static TCGv_i64 new_tmp_a64(DisasContext *s)
+TCGv_i64 new_tmp_a64(DisasContext *s)
{
assert(s->tmp_a64_count < TMP_A64_MAX);
return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
}
-static TCGv_i64 new_tmp_a64_zero(DisasContext *s)
+TCGv_i64 new_tmp_a64_zero(DisasContext *s)
{
TCGv_i64 t = new_tmp_a64(s);
tcg_gen_movi_i64(t, 0);
* to cpu_X[31] and ZR accesses to a temporary which can be discarded.
* This is the point of the _sp forms.
*/
-static TCGv_i64 cpu_reg(DisasContext *s, int reg)
+TCGv_i64 cpu_reg(DisasContext *s, int reg)
{
if (reg == 31) {
return new_tmp_a64_zero(s);
}
/* register access for when 31 == SP */
-static TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
+TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
{
return cpu_X[reg];
}
* representing the register contents. This TCGv is an auto-freed
* temporary so it need not be explicitly freed, and may be modified.
*/
-static TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
+TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
{
TCGv_i64 v = new_tmp_a64(s);
if (reg != 31) {
return v;
}
-static TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
+TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
{
TCGv_i64 v = new_tmp_a64(s);
if (sf) {
return v;
}
-/* We should have at some point before trying to access an FP register
- * done the necessary access check, so assert that
- * (a) we did the check and
- * (b) we didn't then just plough ahead anyway if it failed.
- * Print the instruction pattern in the abort message so we can figure
- * out what we need to fix if a user encounters this problem in the wild.
- */
-static inline void assert_fp_access_checked(DisasContext *s)
-{
-#ifdef CONFIG_DEBUG_TCG
- if (unlikely(!s->fp_access_checked || s->fp_excp_el)) {
- fprintf(stderr, "target-arm: FP access check missing for "
- "instruction 0x%08x\n", s->insn);
- abort();
- }
-#endif
-}
-
-/* Return the offset into CPUARMState of an element of specified
- * size, 'element' places in from the least significant end of
- * the FP/vector register Qn.
- */
-static inline int vec_reg_offset(DisasContext *s, int regno,
- int element, TCGMemOp size)
-{
- int offs = 0;
-#ifdef HOST_WORDS_BIGENDIAN
- /* This is complicated slightly because vfp.zregs[n].d[0] is
- * still the low half and vfp.zregs[n].d[1] the high half
- * of the 128 bit vector, even on big endian systems.
- * Calculate the offset assuming a fully bigendian 128 bits,
- * then XOR to account for the order of the two 64 bit halves.
- */
- offs += (16 - ((element + 1) * (1 << size)));
- offs ^= 8;
-#else
- offs += element * (1 << size);
-#endif
- offs += offsetof(CPUARMState, vfp.zregs[regno]);
- assert_fp_access_checked(s);
- return offs;
-}
-
-/* Return the offset info CPUARMState of the "whole" vector register Qn. */
-static inline int vec_full_reg_offset(DisasContext *s, int regno)
-{
- assert_fp_access_checked(s);
- return offsetof(CPUARMState, vfp.zregs[regno]);
-}
-
-/* Return a newly allocated pointer to the vector register. */
-static TCGv_ptr vec_full_reg_ptr(DisasContext *s, int regno)
-{
- TCGv_ptr ret = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(ret, cpu_env, vec_full_reg_offset(s, regno));
- return ret;
-}
-
-/* Return the byte size of the "whole" vector register, VL / 8. */
-static inline int vec_full_reg_size(DisasContext *s)
-{
- /* FIXME SVE: We should put the composite ZCR_EL* value into tb->flags.
- In the meantime this is just the AdvSIMD length of 128. */
- return 128 / 8;
-}
-
/* Return the offset into CPUARMState of a slice (from
* the least significant end) of FP register Qn (ie
* Dn, Sn, Hn or Bn).
}
}
-static void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
+void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
{
unsigned ofs = fp_reg_offset(s, reg, MO_64);
tcg_temp_free_i64(tmp);
}
-static TCGv_ptr get_fpstatus_ptr(bool is_f16)
+TCGv_ptr get_fpstatus_ptr(bool is_f16)
{
TCGv_ptr statusptr = tcg_temp_new_ptr();
int offset;
vec_full_reg_size(s), gvec_op);
}
+/* Expand a 3-operand operation using an out-of-line helper. */
+static void gen_gvec_op3_ool(DisasContext *s, bool is_q, int rd,
+ int rn, int rm, int data, gen_helper_gvec_3 *fn)
+{
+ tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm),
+ is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
+}
+
/* Expand a 3-operand + env pointer operation using
* an out-of-line helper.
*/
/* Check that SVE access is enabled. If it is, return true.
* If not, emit code to generate an appropriate exception and return false.
*/
-static inline bool sve_access_check(DisasContext *s)
+bool sve_access_check(DisasContext *s)
{
if (s->sve_excp_el) {
gen_exception_insn(s, 4, EXCP_UDEF, syn_sve_access_trap(),
s->sve_excp_el);
return false;
}
- return true;
+ return fp_access_check(s);
}
/*
default:
break;
}
- if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
- return;
- }
if ((ri->type & ARM_CP_FPU) && !fp_access_check(s)) {
return;
+ } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
+ return;
}
if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
get_mem_index(s),
MO_64 | MO_ALIGN | s->be_data);
tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
- } else if (s->be_data == MO_LE) {
- if (tb_cflags(s->base.tb) & CF_PARALLEL) {
+ } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
+ if (!HAVE_CMPXCHG128) {
+ gen_helper_exit_atomic(cpu_env);
+ s->base.is_jmp = DISAS_NORETURN;
+ } else if (s->be_data == MO_LE) {
gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env,
cpu_exclusive_addr,
cpu_reg(s, rt),
cpu_reg(s, rt2));
} else {
- gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
- cpu_reg(s, rt), cpu_reg(s, rt2));
- }
- } else {
- if (tb_cflags(s->base.tb) & CF_PARALLEL) {
gen_helper_paired_cmpxchg64_be_parallel(tmp, cpu_env,
cpu_exclusive_addr,
cpu_reg(s, rt),
cpu_reg(s, rt2));
- } else {
- gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
- cpu_reg(s, rt), cpu_reg(s, rt2));
}
+ } else if (s->be_data == MO_LE) {
+ gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
+ cpu_reg(s, rt), cpu_reg(s, rt2));
+ } else {
+ gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
+ cpu_reg(s, rt), cpu_reg(s, rt2));
}
} else {
tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,
}
tcg_temp_free_i64(cmp);
} else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
- TCGv_i32 tcg_rs = tcg_const_i32(rs);
-
- if (s->be_data == MO_LE) {
- gen_helper_casp_le_parallel(cpu_env, tcg_rs, addr, t1, t2);
+ if (HAVE_CMPXCHG128) {
+ TCGv_i32 tcg_rs = tcg_const_i32(rs);
+ if (s->be_data == MO_LE) {
+ gen_helper_casp_le_parallel(cpu_env, tcg_rs, addr, t1, t2);
+ } else {
+ gen_helper_casp_be_parallel(cpu_env, tcg_rs, addr, t1, t2);
+ }
+ tcg_temp_free_i32(tcg_rs);
} else {
- gen_helper_casp_be_parallel(cpu_env, tcg_rs, addr, t1, t2);
+ gen_helper_exit_atomic(cpu_env);
+ s->base.is_jmp = DISAS_NORETURN;
}
- tcg_temp_free_i32(tcg_rs);
} else {
TCGv_i64 d1 = tcg_temp_new_i64();
TCGv_i64 d2 = tcg_temp_new_i64();
}
if (rt2 == 31
&& ((rt | rs) & 1) == 0
- && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
+ && dc_isar_feature(aa64_atomics, s)) {
/* CASP / CASPL */
gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
return;
}
if (rt2 == 31
&& ((rt | rs) & 1) == 0
- && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
+ && dc_isar_feature(aa64_atomics, s)) {
/* CASPA / CASPAL */
gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
return;
case 0xb: /* CASL */
case 0xe: /* CASA */
case 0xf: /* CASAL */
- if (rt2 == 31 && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
+ if (rt2 == 31 && dc_isar_feature(aa64_atomics, s)) {
gen_compare_and_swap(s, rs, rt, rn, size);
return;
}
int rs = extract32(insn, 16, 5);
int rn = extract32(insn, 5, 5);
int o3_opc = extract32(insn, 12, 4);
- int feature = ARM_FEATURE_V8_ATOMICS;
TCGv_i64 tcg_rn, tcg_rs;
AtomicThreeOpFn *fn;
- if (is_vector) {
+ if (is_vector || !dc_isar_feature(aa64_atomics, s)) {
unallocated_encoding(s);
return;
}
unallocated_encoding(s);
return;
}
- if (!arm_dc_feature(s, feature)) {
- unallocated_encoding(s);
- return;
- }
if (rn == 31) {
gen_check_sp_alignment(s);
* value (ie should cause a guest UNDEF exception), and true if they are
* valid, in which case the decoded bit pattern is written to result.
*/
-static bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
- unsigned int imms, unsigned int immr)
+bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
+ unsigned int imms, unsigned int immr)
{
uint64_t mask;
unsigned e, levels, s, r;
TCGv_i64 tcg_acc, tcg_val;
TCGv_i32 tcg_bytes;
- if (!arm_dc_feature(s, ARM_FEATURE_CRC)
+ if (!dc_isar_feature(aa64_crc32, s)
|| (sf == 1 && sz != 3)
|| (sf == 0 && sz == 3)) {
unallocated_encoding(s);
* the range 01....1xx to 10....0xx, and the most significant 4 bits of
* the mantissa; see VFPExpandImm() in the v8 ARM ARM.
*/
-static uint64_t vfp_expand_imm(int size, uint8_t imm8)
+uint64_t vfp_expand_imm(int size, uint8_t imm8)
{
uint64_t imm;
bool u = extract32(insn, 29, 1);
TCGv_i32 ele1, ele2, ele3;
TCGv_i64 res;
- int feature;
+ bool feature;
switch (u * 16 + opcode) {
case 0x10: /* SQRDMLAH (vector) */
unallocated_encoding(s);
return;
}
- feature = ARM_FEATURE_V8_RDM;
+ feature = dc_isar_feature(aa64_rdm, s);
break;
default:
unallocated_encoding(s);
return;
}
- if (!arm_dc_feature(s, feature)) {
+ if (!feature) {
unallocated_encoding(s);
return;
}
return;
}
if (size == 3) {
- if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
+ if (!dc_isar_feature(aa64_pmull, s)) {
unallocated_encoding(s);
return;
}
int size = extract32(insn, 22, 2);
bool u = extract32(insn, 29, 1);
bool is_q = extract32(insn, 30, 1);
- int feature, rot;
+ bool feature;
+ int rot;
switch (u * 16 + opcode) {
case 0x10: /* SQRDMLAH (vector) */
unallocated_encoding(s);
return;
}
- feature = ARM_FEATURE_V8_RDM;
+ feature = dc_isar_feature(aa64_rdm, s);
break;
- case 0x8: /* FCMLA, #0 */
- case 0x9: /* FCMLA, #90 */
- case 0xa: /* FCMLA, #180 */
- case 0xb: /* FCMLA, #270 */
- case 0xc: /* FCADD, #90 */
- case 0xe: /* FCADD, #270 */
+ case 0x02: /* SDOT (vector) */
+ case 0x12: /* UDOT (vector) */
+ if (size != MO_32) {
+ unallocated_encoding(s);
+ return;
+ }
+ feature = dc_isar_feature(aa64_dp, s);
+ break;
+ case 0x18: /* FCMLA, #0 */
+ case 0x19: /* FCMLA, #90 */
+ case 0x1a: /* FCMLA, #180 */
+ case 0x1b: /* FCMLA, #270 */
+ case 0x1c: /* FCADD, #90 */
+ case 0x1e: /* FCADD, #270 */
if (size == 0
|| (size == 1 && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))
|| (size == 3 && !is_q)) {
unallocated_encoding(s);
return;
}
- feature = ARM_FEATURE_V8_FCMA;
+ feature = dc_isar_feature(aa64_fcma, s);
break;
default:
unallocated_encoding(s);
return;
}
- if (!arm_dc_feature(s, feature)) {
+ if (!feature) {
unallocated_encoding(s);
return;
}
}
return;
+ case 0x2: /* SDOT / UDOT */
+ gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0,
+ u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b);
+ return;
+
case 0x8: /* FCMLA, #0 */
case 0x9: /* FCMLA, #90 */
case 0xa: /* FCMLA, #180 */
break;
case 0x1d: /* SQRDMLAH */
case 0x1f: /* SQRDMLSH */
- if (!arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
+ if (!dc_isar_feature(aa64_rdm, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x0e: /* SDOT */
+ case 0x1e: /* UDOT */
+ if (size != MO_32 || !dc_isar_feature(aa64_dp, s)) {
unallocated_encoding(s);
return;
}
case 0x13: /* FCMLA #90 */
case 0x15: /* FCMLA #180 */
case 0x17: /* FCMLA #270 */
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)) {
+ if (!dc_isar_feature(aa64_fcma, s)) {
unallocated_encoding(s);
return;
}
}
switch (16 * u + opcode) {
+ case 0x0e: /* SDOT */
+ case 0x1e: /* UDOT */
+ gen_gvec_op3_ool(s, is_q, rd, rn, rm, index,
+ u ? gen_helper_gvec_udot_idx_b
+ : gen_helper_gvec_sdot_idx_b);
+ return;
case 0x11: /* FCMLA #0 */
case 0x13: /* FCMLA #90 */
case 0x15: /* FCMLA #180 */
case 0x17: /* FCMLA #270 */
- tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
- vec_full_reg_offset(s, rn),
- vec_reg_offset(s, rm, index, size), fpst,
- is_q ? 16 : 8, vec_full_reg_size(s),
- extract32(insn, 13, 2), /* rot */
- size == MO_64
- ? gen_helper_gvec_fcmlas_idx
- : gen_helper_gvec_fcmlah_idx);
- tcg_temp_free_ptr(fpst);
+ {
+ int rot = extract32(insn, 13, 2);
+ int data = (index << 2) | rot;
+ tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm), fpst,
+ is_q ? 16 : 8, vec_full_reg_size(s), data,
+ size == MO_64
+ ? gen_helper_gvec_fcmlas_idx
+ : gen_helper_gvec_fcmlah_idx);
+ tcg_temp_free_ptr(fpst);
+ }
return;
}
TCGv_i32 tcg_decrypt;
CryptoThreeOpIntFn *genfn;
- if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
- || size != 0) {
+ if (!dc_isar_feature(aa64_aes, s) || size != 0) {
unallocated_encoding(s);
return;
}
int rd = extract32(insn, 0, 5);
CryptoThreeOpFn *genfn;
TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
- int feature = ARM_FEATURE_V8_SHA256;
+ bool feature;
if (size != 0) {
unallocated_encoding(s);
case 2: /* SHA1M */
case 3: /* SHA1SU0 */
genfn = NULL;
- feature = ARM_FEATURE_V8_SHA1;
+ feature = dc_isar_feature(aa64_sha1, s);
break;
case 4: /* SHA256H */
genfn = gen_helper_crypto_sha256h;
+ feature = dc_isar_feature(aa64_sha256, s);
break;
case 5: /* SHA256H2 */
genfn = gen_helper_crypto_sha256h2;
+ feature = dc_isar_feature(aa64_sha256, s);
break;
case 6: /* SHA256SU1 */
genfn = gen_helper_crypto_sha256su1;
+ feature = dc_isar_feature(aa64_sha256, s);
break;
default:
unallocated_encoding(s);
return;
}
- if (!arm_dc_feature(s, feature)) {
+ if (!feature) {
unallocated_encoding(s);
return;
}
int rn = extract32(insn, 5, 5);
int rd = extract32(insn, 0, 5);
CryptoTwoOpFn *genfn;
- int feature;
+ bool feature;
TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
if (size != 0) {
switch (opcode) {
case 0: /* SHA1H */
- feature = ARM_FEATURE_V8_SHA1;
+ feature = dc_isar_feature(aa64_sha1, s);
genfn = gen_helper_crypto_sha1h;
break;
case 1: /* SHA1SU1 */
- feature = ARM_FEATURE_V8_SHA1;
+ feature = dc_isar_feature(aa64_sha1, s);
genfn = gen_helper_crypto_sha1su1;
break;
case 2: /* SHA256SU0 */
- feature = ARM_FEATURE_V8_SHA256;
+ feature = dc_isar_feature(aa64_sha256, s);
genfn = gen_helper_crypto_sha256su0;
break;
default:
return;
}
- if (!arm_dc_feature(s, feature)) {
+ if (!feature) {
unallocated_encoding(s);
return;
}
int rm = extract32(insn, 16, 5);
int rn = extract32(insn, 5, 5);
int rd = extract32(insn, 0, 5);
- int feature;
+ bool feature;
CryptoThreeOpFn *genfn;
if (o == 0) {
switch (opcode) {
case 0: /* SHA512H */
- feature = ARM_FEATURE_V8_SHA512;
+ feature = dc_isar_feature(aa64_sha512, s);
genfn = gen_helper_crypto_sha512h;
break;
case 1: /* SHA512H2 */
- feature = ARM_FEATURE_V8_SHA512;
+ feature = dc_isar_feature(aa64_sha512, s);
genfn = gen_helper_crypto_sha512h2;
break;
case 2: /* SHA512SU1 */
- feature = ARM_FEATURE_V8_SHA512;
+ feature = dc_isar_feature(aa64_sha512, s);
genfn = gen_helper_crypto_sha512su1;
break;
case 3: /* RAX1 */
- feature = ARM_FEATURE_V8_SHA3;
+ feature = dc_isar_feature(aa64_sha3, s);
genfn = NULL;
break;
}
} else {
switch (opcode) {
case 0: /* SM3PARTW1 */
- feature = ARM_FEATURE_V8_SM3;
+ feature = dc_isar_feature(aa64_sm3, s);
genfn = gen_helper_crypto_sm3partw1;
break;
case 1: /* SM3PARTW2 */
- feature = ARM_FEATURE_V8_SM3;
+ feature = dc_isar_feature(aa64_sm3, s);
genfn = gen_helper_crypto_sm3partw2;
break;
case 2: /* SM4EKEY */
- feature = ARM_FEATURE_V8_SM4;
+ feature = dc_isar_feature(aa64_sm4, s);
genfn = gen_helper_crypto_sm4ekey;
break;
default:
}
}
- if (!arm_dc_feature(s, feature)) {
+ if (!feature) {
unallocated_encoding(s);
return;
}
int rn = extract32(insn, 5, 5);
int rd = extract32(insn, 0, 5);
TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
- int feature;
+ bool feature;
CryptoTwoOpFn *genfn;
switch (opcode) {
case 0: /* SHA512SU0 */
- feature = ARM_FEATURE_V8_SHA512;
+ feature = dc_isar_feature(aa64_sha512, s);
genfn = gen_helper_crypto_sha512su0;
break;
case 1: /* SM4E */
- feature = ARM_FEATURE_V8_SM4;
+ feature = dc_isar_feature(aa64_sm4, s);
genfn = gen_helper_crypto_sm4e;
break;
default:
return;
}
- if (!arm_dc_feature(s, feature)) {
+ if (!feature) {
unallocated_encoding(s);
return;
}
int ra = extract32(insn, 10, 5);
int rn = extract32(insn, 5, 5);
int rd = extract32(insn, 0, 5);
- int feature;
+ bool feature;
switch (op0) {
case 0: /* EOR3 */
case 1: /* BCAX */
- feature = ARM_FEATURE_V8_SHA3;
+ feature = dc_isar_feature(aa64_sha3, s);
break;
case 2: /* SM3SS1 */
- feature = ARM_FEATURE_V8_SM3;
+ feature = dc_isar_feature(aa64_sm3, s);
break;
default:
unallocated_encoding(s);
return;
}
- if (!arm_dc_feature(s, feature)) {
+ if (!feature) {
unallocated_encoding(s);
return;
}
TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
int pass;
- if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA3)) {
+ if (!dc_isar_feature(aa64_sha3, s)) {
unallocated_encoding(s);
return;
}
TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
TCGv_i32 tcg_imm2, tcg_opcode;
- if (!arm_dc_feature(s, ARM_FEATURE_V8_SM3)) {
+ if (!dc_isar_feature(aa64_sm3, s)) {
unallocated_encoding(s);
return;
}
s->fp_access_checked = false;
switch (extract32(insn, 25, 4)) {
- case 0x0: case 0x1: case 0x2: case 0x3: /* UNALLOCATED */
+ case 0x0: case 0x1: case 0x3: /* UNALLOCATED */
unallocated_encoding(s);
break;
+ case 0x2:
+ if (!arm_dc_feature(s, ARM_FEATURE_SVE) || !disas_sve(s, insn)) {
+ unallocated_encoding(s);
+ }
+ break;
case 0x8: case 0x9: /* Data processing - immediate */
disas_data_proc_imm(s, insn);
break;
ARMCPU *arm_cpu = arm_env_get_cpu(env);
int bound;
+ dc->isar = &arm_cpu->isar;
dc->pc = dc->base.pc_first;
dc->condjmp = 0;
gen_a64_set_pc_im(dc->pc);
/* fall through */
case DISAS_EXIT:
- tcg_gen_exit_tb(0);
+ tcg_gen_exit_tb(NULL, 0);
break;
case DISAS_JUMP:
tcg_gen_lookup_and_goto_ptr();
/* The helper doesn't necessarily throw an exception, but we
* must go back to the main loop to check for interrupts anyway.
*/
- tcg_gen_exit_tb(0);
+ tcg_gen_exit_tb(NULL, 0);
break;
}
}