typedef void VFPGen3OpDPFn(TCGv_i64 vd,
TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst);
+/*
+ * Types for callbacks for do_vfp_2op_sp() and do_vfp_2op_dp().
+ * The callback should emit code to write a value to vd (which
+ * should be written to only).
+ */
+typedef void VFPGen2OpSPFn(TCGv_i32 vd, TCGv_i32 vm);
+typedef void VFPGen2OpDPFn(TCGv_i64 vd, TCGv_i64 vm);
+
/*
* Perform a 3-operand VFP data processing instruction. fn is the
* callback to do the actual operation; this function deals with the
return true;
}
+static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
+{
+ uint32_t delta_m = 0;
+ uint32_t delta_d = 0;
+ uint32_t bank_mask = 0;
+ int veclen = s->vec_len;
+ TCGv_i32 f0, fd;
+
+ if (!dc_isar_feature(aa32_fpshvec, s) &&
+ (veclen != 0 || s->vec_stride != 0)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ if (veclen > 0) {
+ bank_mask = 0x18;
+
+ /* Figure out what type of vector operation this is. */
+ if ((vd & bank_mask) == 0) {
+ /* scalar */
+ veclen = 0;
+ } else {
+ delta_d = s->vec_stride + 1;
+
+ if ((vm & bank_mask) == 0) {
+ /* mixed scalar/vector */
+ delta_m = 0;
+ } else {
+ /* vector */
+ delta_m = delta_d;
+ }
+ }
+ }
+
+ f0 = tcg_temp_new_i32();
+ fd = tcg_temp_new_i32();
+
+ neon_load_reg32(f0, vm);
+
+ for (;;) {
+ fn(fd, f0);
+ neon_store_reg32(fd, vd);
+
+ if (veclen == 0) {
+ break;
+ }
+
+ if (delta_m == 0) {
+ /* single source one-many */
+ while (veclen--) {
+ vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
+ neon_store_reg32(fd, vd);
+ }
+ break;
+ }
+
+ /* Set up the operands for the next iteration */
+ veclen--;
+ vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
+ vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask);
+ neon_load_reg32(f0, vm);
+ }
+
+ tcg_temp_free_i32(f0);
+ tcg_temp_free_i32(fd);
+
+ return true;
+}
+
+static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
+{
+ uint32_t delta_m = 0;
+ uint32_t delta_d = 0;
+ uint32_t bank_mask = 0;
+ int veclen = s->vec_len;
+ TCGv_i64 f0, fd;
+
+ /* UNDEF accesses to D16-D31 if they don't exist */
+ if (!dc_isar_feature(aa32_fp_d32, s) && ((vd | vm) & 0x10)) {
+ return false;
+ }
+
+ if (!dc_isar_feature(aa32_fpshvec, s) &&
+ (veclen != 0 || s->vec_stride != 0)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ if (veclen > 0) {
+ bank_mask = 0xc;
+
+ /* Figure out what type of vector operation this is. */
+ if ((vd & bank_mask) == 0) {
+ /* scalar */
+ veclen = 0;
+ } else {
+ delta_d = (s->vec_stride >> 1) + 1;
+
+ if ((vm & bank_mask) == 0) {
+ /* mixed scalar/vector */
+ delta_m = 0;
+ } else {
+ /* vector */
+ delta_m = delta_d;
+ }
+ }
+ }
+
+ f0 = tcg_temp_new_i64();
+ fd = tcg_temp_new_i64();
+
+ neon_load_reg64(f0, vm);
+
+ for (;;) {
+ fn(fd, f0);
+ neon_store_reg64(fd, vd);
+
+ if (veclen == 0) {
+ break;
+ }
+
+ if (delta_m == 0) {
+ /* single source one-many */
+ while (veclen--) {
+ vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
+ neon_store_reg64(fd, vd);
+ }
+ break;
+ }
+
+ /* Set up the operands for the next iteration */
+ veclen--;
+ vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
+ vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask);
+ neon_load_reg64(f0, vm);
+ }
+
+ tcg_temp_free_i64(f0);
+ tcg_temp_free_i64(fd);
+
+ return true;
+}
+
static void gen_VMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
{
/* Note that order of inputs to the add matters for NaNs */
tcg_temp_free_i64(fd);
return true;
}
+
+static bool trans_VABS_sp(DisasContext *s, arg_VABS_sp *a)
+{
+ return do_vfp_2op_sp(s, gen_helper_vfp_abss, a->vd, a->vm);
+}
+
+static bool trans_VABS_dp(DisasContext *s, arg_VABS_dp *a)
+{
+ return do_vfp_2op_dp(s, gen_helper_vfp_absd, a->vd, a->vm);
+}