]> Git Repo - qemu.git/blame - target/arm/translate-neon.inc.c
target/arm: Add 'static' and 'const' annotations to VSHLL function arrays
[qemu.git] / target / arm / translate-neon.inc.c
CommitLineData
625e3dd4
PM
1/*
2 * ARM translation: AArch32 Neon instructions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 * Copyright (c) 2020 Linaro, Ltd.
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 */
22
23/*
24 * This file is intended to be included from translate.c; it uses
25 * some macros and definitions provided by that file.
26 * It might be possible to convert it to a standalone .c file eventually.
27 */
28
123ce4e3
PM
29static inline int plus1(DisasContext *s, int x)
30{
31 return x + 1;
32}
33
66432d6b
PM
34static inline int rsub_64(DisasContext *s, int x)
35{
36 return 64 - x;
37}
38
39static inline int rsub_32(DisasContext *s, int x)
40{
41 return 32 - x;
42}
43static inline int rsub_16(DisasContext *s, int x)
44{
45 return 16 - x;
46}
47static inline int rsub_8(DisasContext *s, int x)
48{
49 return 8 - x;
50}
51
625e3dd4
PM
52/* Include the generated Neon decoder */
53#include "decode-neon-dp.inc.c"
54#include "decode-neon-ls.inc.c"
55#include "decode-neon-shared.inc.c"
afff8de0
PM
56
57static bool trans_VCMLA(DisasContext *s, arg_VCMLA *a)
58{
59 int opr_sz;
60 TCGv_ptr fpst;
61 gen_helper_gvec_3_ptr *fn_gvec_ptr;
62
63 if (!dc_isar_feature(aa32_vcma, s)
64 || (!a->size && !dc_isar_feature(aa32_fp16_arith, s))) {
65 return false;
66 }
67
68 /* UNDEF accesses to D16-D31 if they don't exist. */
69 if (!dc_isar_feature(aa32_simd_r32, s) &&
70 ((a->vd | a->vn | a->vm) & 0x10)) {
71 return false;
72 }
73
74 if ((a->vn | a->vm | a->vd) & a->q) {
75 return false;
76 }
77
78 if (!vfp_access_check(s)) {
79 return true;
80 }
81
82 opr_sz = (1 + a->q) * 8;
83 fpst = get_fpstatus_ptr(1);
84 fn_gvec_ptr = a->size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
85 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
86 vfp_reg_offset(1, a->vn),
87 vfp_reg_offset(1, a->vm),
88 fpst, opr_sz, opr_sz, a->rot,
89 fn_gvec_ptr);
90 tcg_temp_free_ptr(fpst);
91 return true;
92}
94d5eb7b
PM
93
94static bool trans_VCADD(DisasContext *s, arg_VCADD *a)
95{
96 int opr_sz;
97 TCGv_ptr fpst;
98 gen_helper_gvec_3_ptr *fn_gvec_ptr;
99
100 if (!dc_isar_feature(aa32_vcma, s)
101 || (!a->size && !dc_isar_feature(aa32_fp16_arith, s))) {
102 return false;
103 }
104
105 /* UNDEF accesses to D16-D31 if they don't exist. */
106 if (!dc_isar_feature(aa32_simd_r32, s) &&
107 ((a->vd | a->vn | a->vm) & 0x10)) {
108 return false;
109 }
110
111 if ((a->vn | a->vm | a->vd) & a->q) {
112 return false;
113 }
114
115 if (!vfp_access_check(s)) {
116 return true;
117 }
118
119 opr_sz = (1 + a->q) * 8;
120 fpst = get_fpstatus_ptr(1);
121 fn_gvec_ptr = a->size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
122 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
123 vfp_reg_offset(1, a->vn),
124 vfp_reg_offset(1, a->vm),
125 fpst, opr_sz, opr_sz, a->rot,
126 fn_gvec_ptr);
127 tcg_temp_free_ptr(fpst);
128 return true;
129}
32da0e33
PM
130
131static bool trans_VDOT(DisasContext *s, arg_VDOT *a)
132{
133 int opr_sz;
134 gen_helper_gvec_3 *fn_gvec;
135
136 if (!dc_isar_feature(aa32_dp, s)) {
137 return false;
138 }
139
140 /* UNDEF accesses to D16-D31 if they don't exist. */
141 if (!dc_isar_feature(aa32_simd_r32, s) &&
142 ((a->vd | a->vn | a->vm) & 0x10)) {
143 return false;
144 }
145
146 if ((a->vn | a->vm | a->vd) & a->q) {
147 return false;
148 }
149
150 if (!vfp_access_check(s)) {
151 return true;
152 }
153
154 opr_sz = (1 + a->q) * 8;
155 fn_gvec = a->u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
156 tcg_gen_gvec_3_ool(vfp_reg_offset(1, a->vd),
157 vfp_reg_offset(1, a->vn),
158 vfp_reg_offset(1, a->vm),
159 opr_sz, opr_sz, 0, fn_gvec);
160 return true;
161}
9a107e7b
PM
162
163static bool trans_VFML(DisasContext *s, arg_VFML *a)
164{
165 int opr_sz;
166
167 if (!dc_isar_feature(aa32_fhm, s)) {
168 return false;
169 }
170
171 /* UNDEF accesses to D16-D31 if they don't exist. */
172 if (!dc_isar_feature(aa32_simd_r32, s) &&
173 (a->vd & 0x10)) {
174 return false;
175 }
176
177 if (a->vd & a->q) {
178 return false;
179 }
180
181 if (!vfp_access_check(s)) {
182 return true;
183 }
184
185 opr_sz = (1 + a->q) * 8;
186 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
187 vfp_reg_offset(a->q, a->vn),
188 vfp_reg_offset(a->q, a->vm),
189 cpu_env, opr_sz, opr_sz, a->s, /* is_2 == 0 */
190 gen_helper_gvec_fmlal_a32);
191 return true;
192}
7e1b5d61
PM
193
194static bool trans_VCMLA_scalar(DisasContext *s, arg_VCMLA_scalar *a)
195{
196 gen_helper_gvec_3_ptr *fn_gvec_ptr;
197 int opr_sz;
198 TCGv_ptr fpst;
199
200 if (!dc_isar_feature(aa32_vcma, s)) {
201 return false;
202 }
203 if (a->size == 0 && !dc_isar_feature(aa32_fp16_arith, s)) {
204 return false;
205 }
206
207 /* UNDEF accesses to D16-D31 if they don't exist. */
208 if (!dc_isar_feature(aa32_simd_r32, s) &&
209 ((a->vd | a->vn | a->vm) & 0x10)) {
210 return false;
211 }
212
213 if ((a->vd | a->vn) & a->q) {
214 return false;
215 }
216
217 if (!vfp_access_check(s)) {
218 return true;
219 }
220
221 fn_gvec_ptr = (a->size ? gen_helper_gvec_fcmlas_idx
222 : gen_helper_gvec_fcmlah_idx);
223 opr_sz = (1 + a->q) * 8;
224 fpst = get_fpstatus_ptr(1);
225 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
226 vfp_reg_offset(1, a->vn),
227 vfp_reg_offset(1, a->vm),
228 fpst, opr_sz, opr_sz,
229 (a->index << 2) | a->rot, fn_gvec_ptr);
230 tcg_temp_free_ptr(fpst);
231 return true;
232}
35f5d4d1
PM
233
234static bool trans_VDOT_scalar(DisasContext *s, arg_VDOT_scalar *a)
235{
236 gen_helper_gvec_3 *fn_gvec;
237 int opr_sz;
238 TCGv_ptr fpst;
239
240 if (!dc_isar_feature(aa32_dp, s)) {
241 return false;
242 }
243
244 /* UNDEF accesses to D16-D31 if they don't exist. */
245 if (!dc_isar_feature(aa32_simd_r32, s) &&
246 ((a->vd | a->vn) & 0x10)) {
247 return false;
248 }
249
250 if ((a->vd | a->vn) & a->q) {
251 return false;
252 }
253
254 if (!vfp_access_check(s)) {
255 return true;
256 }
257
258 fn_gvec = a->u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
259 opr_sz = (1 + a->q) * 8;
260 fpst = get_fpstatus_ptr(1);
261 tcg_gen_gvec_3_ool(vfp_reg_offset(1, a->vd),
262 vfp_reg_offset(1, a->vn),
263 vfp_reg_offset(1, a->rm),
264 opr_sz, opr_sz, a->index, fn_gvec);
265 tcg_temp_free_ptr(fpst);
266 return true;
267}
d27e82f7
PM
268
269static bool trans_VFML_scalar(DisasContext *s, arg_VFML_scalar *a)
270{
271 int opr_sz;
272
273 if (!dc_isar_feature(aa32_fhm, s)) {
274 return false;
275 }
276
277 /* UNDEF accesses to D16-D31 if they don't exist. */
278 if (!dc_isar_feature(aa32_simd_r32, s) &&
279 ((a->vd & 0x10) || (a->q && (a->vn & 0x10)))) {
280 return false;
281 }
282
283 if (a->vd & a->q) {
284 return false;
285 }
286
287 if (!vfp_access_check(s)) {
288 return true;
289 }
290
291 opr_sz = (1 + a->q) * 8;
292 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
293 vfp_reg_offset(a->q, a->vn),
294 vfp_reg_offset(a->q, a->rm),
295 cpu_env, opr_sz, opr_sz,
296 (a->index << 2) | a->s, /* is_2 == 0 */
297 gen_helper_gvec_fmlal_idx_a32);
298 return true;
299}
a27b4630
PM
300
301static struct {
302 int nregs;
303 int interleave;
304 int spacing;
305} const neon_ls_element_type[11] = {
306 {1, 4, 1},
307 {1, 4, 2},
308 {4, 1, 1},
309 {2, 2, 2},
310 {1, 3, 1},
311 {1, 3, 2},
312 {3, 1, 1},
313 {1, 1, 1},
314 {1, 2, 1},
315 {1, 2, 2},
316 {2, 1, 1}
317};
318
319static void gen_neon_ldst_base_update(DisasContext *s, int rm, int rn,
320 int stride)
321{
322 if (rm != 15) {
323 TCGv_i32 base;
324
325 base = load_reg(s, rn);
326 if (rm == 13) {
327 tcg_gen_addi_i32(base, base, stride);
328 } else {
329 TCGv_i32 index;
330 index = load_reg(s, rm);
331 tcg_gen_add_i32(base, base, index);
332 tcg_temp_free_i32(index);
333 }
334 store_reg(s, rn, base);
335 }
336}
337
338static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
339{
340 /* Neon load/store multiple structures */
341 int nregs, interleave, spacing, reg, n;
342 MemOp endian = s->be_data;
343 int mmu_idx = get_mem_index(s);
344 int size = a->size;
345 TCGv_i64 tmp64;
346 TCGv_i32 addr, tmp;
347
348 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
349 return false;
350 }
351
352 /* UNDEF accesses to D16-D31 if they don't exist */
353 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
354 return false;
355 }
356 if (a->itype > 10) {
357 return false;
358 }
359 /* Catch UNDEF cases for bad values of align field */
360 switch (a->itype & 0xc) {
361 case 4:
362 if (a->align >= 2) {
363 return false;
364 }
365 break;
366 case 8:
367 if (a->align == 3) {
368 return false;
369 }
370 break;
371 default:
372 break;
373 }
374 nregs = neon_ls_element_type[a->itype].nregs;
375 interleave = neon_ls_element_type[a->itype].interleave;
376 spacing = neon_ls_element_type[a->itype].spacing;
377 if (size == 3 && (interleave | spacing) != 1) {
378 return false;
379 }
380
381 if (!vfp_access_check(s)) {
382 return true;
383 }
384
385 /* For our purposes, bytes are always little-endian. */
386 if (size == 0) {
387 endian = MO_LE;
388 }
389 /*
390 * Consecutive little-endian elements from a single register
391 * can be promoted to a larger little-endian operation.
392 */
393 if (interleave == 1 && endian == MO_LE) {
394 size = 3;
395 }
396 tmp64 = tcg_temp_new_i64();
397 addr = tcg_temp_new_i32();
398 tmp = tcg_const_i32(1 << size);
399 load_reg_var(s, addr, a->rn);
400 for (reg = 0; reg < nregs; reg++) {
401 for (n = 0; n < 8 >> size; n++) {
402 int xs;
403 for (xs = 0; xs < interleave; xs++) {
404 int tt = a->vd + reg + spacing * xs;
405
406 if (a->l) {
407 gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
408 neon_store_element64(tt, n, size, tmp64);
409 } else {
410 neon_load_element64(tmp64, tt, n, size);
411 gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
412 }
413 tcg_gen_add_i32(addr, addr, tmp);
414 }
415 }
416 }
417 tcg_temp_free_i32(addr);
418 tcg_temp_free_i32(tmp);
419 tcg_temp_free_i64(tmp64);
420
421 gen_neon_ldst_base_update(s, a->rm, a->rn, nregs * interleave * 8);
422 return true;
423}
3698747c
PM
424
425static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
426{
427 /* Neon load single structure to all lanes */
428 int reg, stride, vec_size;
429 int vd = a->vd;
430 int size = a->size;
431 int nregs = a->n + 1;
432 TCGv_i32 addr, tmp;
433
434 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
435 return false;
436 }
437
438 /* UNDEF accesses to D16-D31 if they don't exist */
439 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
440 return false;
441 }
442
443 if (size == 3) {
444 if (nregs != 4 || a->a == 0) {
445 return false;
446 }
447 /* For VLD4 size == 3 a == 1 means 32 bits at 16 byte alignment */
448 size = 2;
449 }
450 if (nregs == 1 && a->a == 1 && size == 0) {
451 return false;
452 }
453 if (nregs == 3 && a->a == 1) {
454 return false;
455 }
456
457 if (!vfp_access_check(s)) {
458 return true;
459 }
460
461 /*
462 * VLD1 to all lanes: T bit indicates how many Dregs to write.
463 * VLD2/3/4 to all lanes: T bit indicates register stride.
464 */
465 stride = a->t ? 2 : 1;
466 vec_size = nregs == 1 ? stride * 8 : 8;
467
468 tmp = tcg_temp_new_i32();
469 addr = tcg_temp_new_i32();
470 load_reg_var(s, addr, a->rn);
471 for (reg = 0; reg < nregs; reg++) {
472 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
473 s->be_data | size);
474 if ((vd & 1) && vec_size == 16) {
475 /*
476 * We cannot write 16 bytes at once because the
477 * destination is unaligned.
478 */
479 tcg_gen_gvec_dup_i32(size, neon_reg_offset(vd, 0),
480 8, 8, tmp);
481 tcg_gen_gvec_mov(0, neon_reg_offset(vd + 1, 0),
482 neon_reg_offset(vd, 0), 8, 8);
483 } else {
484 tcg_gen_gvec_dup_i32(size, neon_reg_offset(vd, 0),
485 vec_size, vec_size, tmp);
486 }
487 tcg_gen_addi_i32(addr, addr, 1 << size);
488 vd += stride;
489 }
490 tcg_temp_free_i32(tmp);
491 tcg_temp_free_i32(addr);
492
493 gen_neon_ldst_base_update(s, a->rm, a->rn, (1 << size) * nregs);
494
495 return true;
496}
123ce4e3
PM
497
498static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
499{
500 /* Neon load/store single structure to one lane */
501 int reg;
502 int nregs = a->n + 1;
503 int vd = a->vd;
504 TCGv_i32 addr, tmp;
505
506 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
507 return false;
508 }
509
510 /* UNDEF accesses to D16-D31 if they don't exist */
511 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
512 return false;
513 }
514
515 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
516 switch (nregs) {
517 case 1:
518 if (((a->align & (1 << a->size)) != 0) ||
519 (a->size == 2 && ((a->align & 3) == 1 || (a->align & 3) == 2))) {
520 return false;
521 }
522 break;
523 case 3:
524 if ((a->align & 1) != 0) {
525 return false;
526 }
527 /* fall through */
528 case 2:
529 if (a->size == 2 && (a->align & 2) != 0) {
530 return false;
531 }
532 break;
533 case 4:
534 if ((a->size == 2) && ((a->align & 3) == 3)) {
535 return false;
536 }
537 break;
538 default:
539 abort();
540 }
541 if ((vd + a->stride * (nregs - 1)) > 31) {
542 /*
543 * Attempts to write off the end of the register file are
544 * UNPREDICTABLE; we choose to UNDEF because otherwise we would
545 * access off the end of the array that holds the register data.
546 */
547 return false;
548 }
549
550 if (!vfp_access_check(s)) {
551 return true;
552 }
553
554 tmp = tcg_temp_new_i32();
555 addr = tcg_temp_new_i32();
556 load_reg_var(s, addr, a->rn);
557 /*
558 * TODO: if we implemented alignment exceptions, we should check
559 * addr against the alignment encoded in a->align here.
560 */
561 for (reg = 0; reg < nregs; reg++) {
562 if (a->l) {
563 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
564 s->be_data | a->size);
565 neon_store_element(vd, a->reg_idx, a->size, tmp);
566 } else { /* Store */
567 neon_load_element(tmp, vd, a->reg_idx, a->size);
568 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
569 s->be_data | a->size);
570 }
571 vd += a->stride;
572 tcg_gen_addi_i32(addr, addr, 1 << a->size);
573 }
574 tcg_temp_free_i32(addr);
575 tcg_temp_free_i32(tmp);
576
577 gen_neon_ldst_base_update(s, a->rm, a->rn, (1 << a->size) * nregs);
578
579 return true;
580}
a4e143ac
PM
581
582static bool do_3same(DisasContext *s, arg_3same *a, GVecGen3Fn fn)
583{
584 int vec_size = a->q ? 16 : 8;
585 int rd_ofs = neon_reg_offset(a->vd, 0);
586 int rn_ofs = neon_reg_offset(a->vn, 0);
587 int rm_ofs = neon_reg_offset(a->vm, 0);
588
589 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
590 return false;
591 }
592
593 /* UNDEF accesses to D16-D31 if they don't exist. */
594 if (!dc_isar_feature(aa32_simd_r32, s) &&
595 ((a->vd | a->vn | a->vm) & 0x10)) {
596 return false;
597 }
598
599 if ((a->vn | a->vm | a->vd) & a->q) {
600 return false;
601 }
602
603 if (!vfp_access_check(s)) {
604 return true;
605 }
606
607 fn(a->size, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
608 return true;
609}
610
611#define DO_3SAME(INSN, FUNC) \
612 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
613 { \
614 return do_3same(s, a, FUNC); \
615 }
616
617DO_3SAME(VADD, tcg_gen_gvec_add)
618DO_3SAME(VSUB, tcg_gen_gvec_sub)
35a548ed
PM
619DO_3SAME(VAND, tcg_gen_gvec_and)
620DO_3SAME(VBIC, tcg_gen_gvec_andc)
621DO_3SAME(VORR, tcg_gen_gvec_or)
622DO_3SAME(VORN, tcg_gen_gvec_orc)
623DO_3SAME(VEOR, tcg_gen_gvec_xor)
8161b753
RH
624DO_3SAME(VSHL_S, gen_gvec_sshl)
625DO_3SAME(VSHL_U, gen_gvec_ushl)
c7715b6b
RH
626DO_3SAME(VQADD_S, gen_gvec_sqadd_qc)
627DO_3SAME(VQADD_U, gen_gvec_uqadd_qc)
628DO_3SAME(VQSUB_S, gen_gvec_sqsub_qc)
629DO_3SAME(VQSUB_U, gen_gvec_uqsub_qc)
35a548ed
PM
630
631/* These insns are all gvec_bitsel but with the inputs in various orders. */
632#define DO_3SAME_BITSEL(INSN, O1, O2, O3) \
633 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
634 uint32_t rn_ofs, uint32_t rm_ofs, \
635 uint32_t oprsz, uint32_t maxsz) \
636 { \
637 tcg_gen_gvec_bitsel(vece, rd_ofs, O1, O2, O3, oprsz, maxsz); \
638 } \
639 DO_3SAME(INSN, gen_##INSN##_3s)
640
641DO_3SAME_BITSEL(VBSL, rd_ofs, rn_ofs, rm_ofs)
642DO_3SAME_BITSEL(VBIT, rm_ofs, rn_ofs, rd_ofs)
643DO_3SAME_BITSEL(VBIF, rm_ofs, rd_ofs, rn_ofs)
36b59310
PM
644
645#define DO_3SAME_NO_SZ_3(INSN, FUNC) \
646 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
647 { \
648 if (a->size == 3) { \
649 return false; \
650 } \
651 return do_3same(s, a, FUNC); \
652 }
653
654DO_3SAME_NO_SZ_3(VMAX_S, tcg_gen_gvec_smax)
655DO_3SAME_NO_SZ_3(VMAX_U, tcg_gen_gvec_umax)
656DO_3SAME_NO_SZ_3(VMIN_S, tcg_gen_gvec_smin)
657DO_3SAME_NO_SZ_3(VMIN_U, tcg_gen_gvec_umin)
0de34fd4 658DO_3SAME_NO_SZ_3(VMUL, tcg_gen_gvec_mul)
27106320
RH
659DO_3SAME_NO_SZ_3(VMLA, gen_gvec_mla)
660DO_3SAME_NO_SZ_3(VMLS, gen_gvec_mls)
8161b753 661DO_3SAME_NO_SZ_3(VTST, gen_gvec_cmtst)
7715098f
PM
662DO_3SAME_NO_SZ_3(VABD_S, gen_gvec_sabd)
663DO_3SAME_NO_SZ_3(VABA_S, gen_gvec_saba)
664DO_3SAME_NO_SZ_3(VABD_U, gen_gvec_uabd)
665DO_3SAME_NO_SZ_3(VABA_U, gen_gvec_uaba)
02bd0cdb
PM
666
667#define DO_3SAME_CMP(INSN, COND) \
668 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
669 uint32_t rn_ofs, uint32_t rm_ofs, \
670 uint32_t oprsz, uint32_t maxsz) \
671 { \
672 tcg_gen_gvec_cmp(COND, vece, rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz); \
673 } \
674 DO_3SAME_NO_SZ_3(INSN, gen_##INSN##_3s)
675
676DO_3SAME_CMP(VCGT_S, TCG_COND_GT)
677DO_3SAME_CMP(VCGT_U, TCG_COND_GTU)
678DO_3SAME_CMP(VCGE_S, TCG_COND_GE)
679DO_3SAME_CMP(VCGE_U, TCG_COND_GEU)
680DO_3SAME_CMP(VCEQ, TCG_COND_EQ)
681
effa992f
RH
682#define WRAP_OOL_FN(WRAPNAME, FUNC) \
683 static void WRAPNAME(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, \
684 uint32_t rm_ofs, uint32_t oprsz, uint32_t maxsz) \
685 { \
686 tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, 0, FUNC); \
687 }
688
689WRAP_OOL_FN(gen_VMUL_p_3s, gen_helper_gvec_pmul_b)
0de34fd4
PM
690
691static bool trans_VMUL_p_3s(DisasContext *s, arg_3same *a)
692{
693 if (a->size != 0) {
694 return false;
695 }
696 return do_3same(s, a, gen_VMUL_p_3s);
697}
a0635695
PM
698
699#define DO_VQRDMLAH(INSN, FUNC) \
700 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
701 { \
702 if (!dc_isar_feature(aa32_rdm, s)) { \
703 return false; \
704 } \
705 if (a->size != 1 && a->size != 2) { \
706 return false; \
707 } \
708 return do_3same(s, a, FUNC); \
709 }
710
711DO_VQRDMLAH(VQRDMLAH, gen_gvec_sqrdmlah_qc)
712DO_VQRDMLAH(VQRDMLSH, gen_gvec_sqrdmlsh_qc)
21290edf 713
afc8b7d3
RH
714#define DO_SHA1(NAME, FUNC) \
715 WRAP_OOL_FN(gen_##NAME##_3s, FUNC) \
716 static bool trans_##NAME##_3s(DisasContext *s, arg_3same *a) \
717 { \
718 if (!dc_isar_feature(aa32_sha1, s)) { \
719 return false; \
720 } \
721 return do_3same(s, a, gen_##NAME##_3s); \
21290edf
PM
722 }
723
afc8b7d3
RH
724DO_SHA1(SHA1C, gen_helper_crypto_sha1c)
725DO_SHA1(SHA1P, gen_helper_crypto_sha1p)
726DO_SHA1(SHA1M, gen_helper_crypto_sha1m)
727DO_SHA1(SHA1SU0, gen_helper_crypto_sha1su0)
21290edf 728
effa992f
RH
729#define DO_SHA2(NAME, FUNC) \
730 WRAP_OOL_FN(gen_##NAME##_3s, FUNC) \
731 static bool trans_##NAME##_3s(DisasContext *s, arg_3same *a) \
732 { \
733 if (!dc_isar_feature(aa32_sha2, s)) { \
734 return false; \
735 } \
736 return do_3same(s, a, gen_##NAME##_3s); \
21290edf
PM
737 }
738
effa992f
RH
739DO_SHA2(SHA256H, gen_helper_crypto_sha256h)
740DO_SHA2(SHA256H2, gen_helper_crypto_sha256h2)
741DO_SHA2(SHA256SU1, gen_helper_crypto_sha256su1)
35d4352f
PM
742
743#define DO_3SAME_64(INSN, FUNC) \
744 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
745 uint32_t rn_ofs, uint32_t rm_ofs, \
746 uint32_t oprsz, uint32_t maxsz) \
747 { \
748 static const GVecGen3 op = { .fni8 = FUNC }; \
749 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &op); \
750 } \
751 DO_3SAME(INSN, gen_##INSN##_3s)
752
753#define DO_3SAME_64_ENV(INSN, FUNC) \
754 static void gen_##INSN##_elt(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m) \
755 { \
756 FUNC(d, cpu_env, n, m); \
757 } \
758 DO_3SAME_64(INSN, gen_##INSN##_elt)
759
760DO_3SAME_64(VRSHL_S64, gen_helper_neon_rshl_s64)
761DO_3SAME_64(VRSHL_U64, gen_helper_neon_rshl_u64)
762DO_3SAME_64_ENV(VQSHL_S64, gen_helper_neon_qshl_s64)
763DO_3SAME_64_ENV(VQSHL_U64, gen_helper_neon_qshl_u64)
764DO_3SAME_64_ENV(VQRSHL_S64, gen_helper_neon_qrshl_s64)
765DO_3SAME_64_ENV(VQRSHL_U64, gen_helper_neon_qrshl_u64)
cb294bca
PM
766
767#define DO_3SAME_32(INSN, FUNC) \
768 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
769 uint32_t rn_ofs, uint32_t rm_ofs, \
770 uint32_t oprsz, uint32_t maxsz) \
771 { \
772 static const GVecGen3 ops[4] = { \
773 { .fni4 = gen_helper_neon_##FUNC##8 }, \
774 { .fni4 = gen_helper_neon_##FUNC##16 }, \
775 { .fni4 = gen_helper_neon_##FUNC##32 }, \
776 { 0 }, \
777 }; \
778 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops[vece]); \
779 } \
780 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
781 { \
782 if (a->size > 2) { \
783 return false; \
784 } \
785 return do_3same(s, a, gen_##INSN##_3s); \
786 }
787
6812dfdc
PM
788/*
789 * Some helper functions need to be passed the cpu_env. In order
790 * to use those with the gvec APIs like tcg_gen_gvec_3() we need
791 * to create wrapper functions whose prototype is a NeonGenTwoOpFn()
792 * and which call a NeonGenTwoOpEnvFn().
793 */
794#define WRAP_ENV_FN(WRAPNAME, FUNC) \
795 static void WRAPNAME(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m) \
796 { \
797 FUNC(d, cpu_env, n, m); \
798 }
799
800#define DO_3SAME_32_ENV(INSN, FUNC) \
801 WRAP_ENV_FN(gen_##INSN##_tramp8, gen_helper_neon_##FUNC##8); \
802 WRAP_ENV_FN(gen_##INSN##_tramp16, gen_helper_neon_##FUNC##16); \
803 WRAP_ENV_FN(gen_##INSN##_tramp32, gen_helper_neon_##FUNC##32); \
804 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
805 uint32_t rn_ofs, uint32_t rm_ofs, \
806 uint32_t oprsz, uint32_t maxsz) \
807 { \
808 static const GVecGen3 ops[4] = { \
809 { .fni4 = gen_##INSN##_tramp8 }, \
810 { .fni4 = gen_##INSN##_tramp16 }, \
811 { .fni4 = gen_##INSN##_tramp32 }, \
812 { 0 }, \
813 }; \
814 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops[vece]); \
815 } \
816 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
817 { \
818 if (a->size > 2) { \
819 return false; \
820 } \
821 return do_3same(s, a, gen_##INSN##_3s); \
822 }
823
cb294bca
PM
824DO_3SAME_32(VHADD_S, hadd_s)
825DO_3SAME_32(VHADD_U, hadd_u)
8e44d03f
PM
826DO_3SAME_32(VHSUB_S, hsub_s)
827DO_3SAME_32(VHSUB_U, hsub_u)
828DO_3SAME_32(VRHADD_S, rhadd_s)
829DO_3SAME_32(VRHADD_U, rhadd_u)
6812dfdc
PM
830DO_3SAME_32(VRSHL_S, rshl_s)
831DO_3SAME_32(VRSHL_U, rshl_u)
832
833DO_3SAME_32_ENV(VQSHL_S, qshl_s)
834DO_3SAME_32_ENV(VQSHL_U, qshl_u)
835DO_3SAME_32_ENV(VQRSHL_S, qrshl_s)
836DO_3SAME_32_ENV(VQRSHL_U, qrshl_u)
059c2398
PM
837
838static bool do_3same_pair(DisasContext *s, arg_3same *a, NeonGenTwoOpFn *fn)
839{
840 /* Operations handled pairwise 32 bits at a time */
841 TCGv_i32 tmp, tmp2, tmp3;
842
843 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
844 return false;
845 }
846
847 /* UNDEF accesses to D16-D31 if they don't exist. */
848 if (!dc_isar_feature(aa32_simd_r32, s) &&
849 ((a->vd | a->vn | a->vm) & 0x10)) {
850 return false;
851 }
852
853 if (a->size == 3) {
854 return false;
855 }
856
857 if (!vfp_access_check(s)) {
858 return true;
859 }
860
861 assert(a->q == 0); /* enforced by decode patterns */
862
863 /*
864 * Note that we have to be careful not to clobber the source operands
865 * in the "vm == vd" case by storing the result of the first pass too
866 * early. Since Q is 0 there are always just two passes, so instead
867 * of a complicated loop over each pass we just unroll.
868 */
869 tmp = neon_load_reg(a->vn, 0);
870 tmp2 = neon_load_reg(a->vn, 1);
871 fn(tmp, tmp, tmp2);
872 tcg_temp_free_i32(tmp2);
873
874 tmp3 = neon_load_reg(a->vm, 0);
875 tmp2 = neon_load_reg(a->vm, 1);
876 fn(tmp3, tmp3, tmp2);
877 tcg_temp_free_i32(tmp2);
878
879 neon_store_reg(a->vd, 0, tmp);
880 neon_store_reg(a->vd, 1, tmp3);
881 return true;
882}
883
884#define DO_3SAME_PAIR(INSN, func) \
885 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
886 { \
887 static NeonGenTwoOpFn * const fns[] = { \
888 gen_helper_neon_##func##8, \
889 gen_helper_neon_##func##16, \
890 gen_helper_neon_##func##32, \
891 }; \
892 if (a->size > 2) { \
893 return false; \
894 } \
895 return do_3same_pair(s, a, fns[a->size]); \
896 }
897
898/* 32-bit pairwise ops end up the same as the elementwise versions. */
899#define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
900#define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
901#define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
902#define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
fa22827d 903#define gen_helper_neon_padd_u32 tcg_gen_add_i32
059c2398
PM
904
905DO_3SAME_PAIR(VPMAX_S, pmax_s)
906DO_3SAME_PAIR(VPMIN_S, pmin_s)
907DO_3SAME_PAIR(VPMAX_U, pmax_u)
908DO_3SAME_PAIR(VPMIN_U, pmin_u)
fa22827d 909DO_3SAME_PAIR(VPADD, padd_u)
7ecc28bc
PM
910
911#define DO_3SAME_VQDMULH(INSN, FUNC) \
912 WRAP_ENV_FN(gen_##INSN##_tramp16, gen_helper_neon_##FUNC##_s16); \
913 WRAP_ENV_FN(gen_##INSN##_tramp32, gen_helper_neon_##FUNC##_s32); \
914 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
915 uint32_t rn_ofs, uint32_t rm_ofs, \
916 uint32_t oprsz, uint32_t maxsz) \
917 { \
918 static const GVecGen3 ops[2] = { \
919 { .fni4 = gen_##INSN##_tramp16 }, \
920 { .fni4 = gen_##INSN##_tramp32 }, \
921 }; \
922 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops[vece - 1]); \
923 } \
924 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
925 { \
926 if (a->size != 1 && a->size != 2) { \
927 return false; \
928 } \
929 return do_3same(s, a, gen_##INSN##_3s); \
930 }
931
932DO_3SAME_VQDMULH(VQDMULH, qdmulh)
933DO_3SAME_VQDMULH(VQRDMULH, qrdmulh)
a26a352b 934
8aa71ead
PM
935static bool do_3same_fp(DisasContext *s, arg_3same *a, VFPGen3OpSPFn *fn,
936 bool reads_vd)
937{
938 /*
939 * FP operations handled elementwise 32 bits at a time.
940 * If reads_vd is true then the old value of Vd will be
941 * loaded before calling the callback function. This is
942 * used for multiply-accumulate type operations.
943 */
944 TCGv_i32 tmp, tmp2;
945 int pass;
946
947 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
948 return false;
949 }
950
951 /* UNDEF accesses to D16-D31 if they don't exist. */
952 if (!dc_isar_feature(aa32_simd_r32, s) &&
953 ((a->vd | a->vn | a->vm) & 0x10)) {
954 return false;
955 }
956
957 if ((a->vn | a->vm | a->vd) & a->q) {
958 return false;
959 }
960
961 if (!vfp_access_check(s)) {
962 return true;
963 }
964
965 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
966 for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
967 tmp = neon_load_reg(a->vn, pass);
968 tmp2 = neon_load_reg(a->vm, pass);
969 if (reads_vd) {
970 TCGv_i32 tmp_rd = neon_load_reg(a->vd, pass);
971 fn(tmp_rd, tmp, tmp2, fpstatus);
972 neon_store_reg(a->vd, pass, tmp_rd);
973 tcg_temp_free_i32(tmp);
974 } else {
975 fn(tmp, tmp, tmp2, fpstatus);
976 neon_store_reg(a->vd, pass, tmp);
977 }
978 tcg_temp_free_i32(tmp2);
979 }
980 tcg_temp_free_ptr(fpstatus);
981 return true;
982}
983
a26a352b
PM
984/*
985 * For all the functions using this macro, size == 1 means fp16,
986 * which is an architecture extension we don't implement yet.
987 */
988#define DO_3S_FP_GVEC(INSN,FUNC) \
989 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
990 uint32_t rn_ofs, uint32_t rm_ofs, \
991 uint32_t oprsz, uint32_t maxsz) \
992 { \
993 TCGv_ptr fpst = get_fpstatus_ptr(1); \
994 tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, fpst, \
995 oprsz, maxsz, 0, FUNC); \
996 tcg_temp_free_ptr(fpst); \
997 } \
998 static bool trans_##INSN##_fp_3s(DisasContext *s, arg_3same *a) \
999 { \
1000 if (a->size != 0) { \
1001 /* TODO fp16 support */ \
1002 return false; \
1003 } \
1004 return do_3same(s, a, gen_##INSN##_3s); \
1005 }
1006
1007
1008DO_3S_FP_GVEC(VADD, gen_helper_gvec_fadd_s)
1009DO_3S_FP_GVEC(VSUB, gen_helper_gvec_fsub_s)
1010DO_3S_FP_GVEC(VABD, gen_helper_gvec_fabd_s)
8aa71ead
PM
1011DO_3S_FP_GVEC(VMUL, gen_helper_gvec_fmul_s)
1012
1013/*
1014 * For all the functions using this macro, size == 1 means fp16,
1015 * which is an architecture extension we don't implement yet.
1016 */
1017#define DO_3S_FP(INSN,FUNC,READS_VD) \
1018 static bool trans_##INSN##_fp_3s(DisasContext *s, arg_3same *a) \
1019 { \
1020 if (a->size != 0) { \
1021 /* TODO fp16 support */ \
1022 return false; \
1023 } \
1024 return do_3same_fp(s, a, FUNC, READS_VD); \
1025 }
1026
727ff1d6
PM
1027DO_3S_FP(VCEQ, gen_helper_neon_ceq_f32, false)
1028DO_3S_FP(VCGE, gen_helper_neon_cge_f32, false)
1029DO_3S_FP(VCGT, gen_helper_neon_cgt_f32, false)
1030DO_3S_FP(VACGE, gen_helper_neon_acge_f32, false)
1031DO_3S_FP(VACGT, gen_helper_neon_acgt_f32, false)
d5fdf9e9
PM
1032DO_3S_FP(VMAX, gen_helper_vfp_maxs, false)
1033DO_3S_FP(VMIN, gen_helper_vfp_mins, false)
727ff1d6 1034
8aa71ead
PM
1035static void gen_VMLA_fp_3s(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm,
1036 TCGv_ptr fpstatus)
1037{
1038 gen_helper_vfp_muls(vn, vn, vm, fpstatus);
1039 gen_helper_vfp_adds(vd, vd, vn, fpstatus);
1040}
1041
1042static void gen_VMLS_fp_3s(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm,
1043 TCGv_ptr fpstatus)
1044{
1045 gen_helper_vfp_muls(vn, vn, vm, fpstatus);
1046 gen_helper_vfp_subs(vd, vd, vn, fpstatus);
1047}
1048
1049DO_3S_FP(VMLA, gen_VMLA_fp_3s, true)
1050DO_3S_FP(VMLS, gen_VMLS_fp_3s, true)
ab978335 1051
d5fdf9e9
PM
1052static bool trans_VMAXNM_fp_3s(DisasContext *s, arg_3same *a)
1053{
1054 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
1055 return false;
1056 }
1057
1058 if (a->size != 0) {
1059 /* TODO fp16 support */
1060 return false;
1061 }
1062
1063 return do_3same_fp(s, a, gen_helper_vfp_maxnums, false);
1064}
1065
1066static bool trans_VMINNM_fp_3s(DisasContext *s, arg_3same *a)
1067{
1068 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
1069 return false;
1070 }
1071
1072 if (a->size != 0) {
1073 /* TODO fp16 support */
1074 return false;
1075 }
1076
1077 return do_3same_fp(s, a, gen_helper_vfp_minnums, false);
1078}
1079
1080WRAP_ENV_FN(gen_VRECPS_tramp, gen_helper_recps_f32)
1081
1082static void gen_VRECPS_fp_3s(unsigned vece, uint32_t rd_ofs,
1083 uint32_t rn_ofs, uint32_t rm_ofs,
1084 uint32_t oprsz, uint32_t maxsz)
1085{
1086 static const GVecGen3 ops = { .fni4 = gen_VRECPS_tramp };
1087 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops);
1088}
1089
1090static bool trans_VRECPS_fp_3s(DisasContext *s, arg_3same *a)
1091{
1092 if (a->size != 0) {
1093 /* TODO fp16 support */
1094 return false;
1095 }
1096
1097 return do_3same(s, a, gen_VRECPS_fp_3s);
1098}
1099
1100WRAP_ENV_FN(gen_VRSQRTS_tramp, gen_helper_rsqrts_f32)
1101
1102static void gen_VRSQRTS_fp_3s(unsigned vece, uint32_t rd_ofs,
1103 uint32_t rn_ofs, uint32_t rm_ofs,
1104 uint32_t oprsz, uint32_t maxsz)
1105{
1106 static const GVecGen3 ops = { .fni4 = gen_VRSQRTS_tramp };
1107 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops);
1108}
1109
1110static bool trans_VRSQRTS_fp_3s(DisasContext *s, arg_3same *a)
1111{
1112 if (a->size != 0) {
1113 /* TODO fp16 support */
1114 return false;
1115 }
1116
1117 return do_3same(s, a, gen_VRSQRTS_fp_3s);
1118}
1119
e95485f8
PM
1120static void gen_VFMA_fp_3s(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm,
1121 TCGv_ptr fpstatus)
1122{
1123 gen_helper_vfp_muladds(vd, vn, vm, vd, fpstatus);
1124}
1125
1126static bool trans_VFMA_fp_3s(DisasContext *s, arg_3same *a)
1127{
1128 if (!dc_isar_feature(aa32_simdfmac, s)) {
1129 return false;
1130 }
1131
1132 if (a->size != 0) {
1133 /* TODO fp16 support */
1134 return false;
1135 }
1136
1137 return do_3same_fp(s, a, gen_VFMA_fp_3s, true);
1138}
1139
1140static void gen_VFMS_fp_3s(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm,
1141 TCGv_ptr fpstatus)
1142{
1143 gen_helper_vfp_negs(vn, vn);
1144 gen_helper_vfp_muladds(vd, vn, vm, vd, fpstatus);
1145}
1146
1147static bool trans_VFMS_fp_3s(DisasContext *s, arg_3same *a)
1148{
1149 if (!dc_isar_feature(aa32_simdfmac, s)) {
1150 return false;
1151 }
1152
1153 if (a->size != 0) {
1154 /* TODO fp16 support */
1155 return false;
1156 }
1157
1158 return do_3same_fp(s, a, gen_VFMS_fp_3s, true);
1159}
1160
ab978335
PM
1161static bool do_3same_fp_pair(DisasContext *s, arg_3same *a, VFPGen3OpSPFn *fn)
1162{
1163 /* FP operations handled pairwise 32 bits at a time */
1164 TCGv_i32 tmp, tmp2, tmp3;
1165 TCGv_ptr fpstatus;
1166
1167 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1168 return false;
1169 }
1170
1171 /* UNDEF accesses to D16-D31 if they don't exist. */
1172 if (!dc_isar_feature(aa32_simd_r32, s) &&
1173 ((a->vd | a->vn | a->vm) & 0x10)) {
1174 return false;
1175 }
1176
1177 if (!vfp_access_check(s)) {
1178 return true;
1179 }
1180
1181 assert(a->q == 0); /* enforced by decode patterns */
1182
1183 /*
1184 * Note that we have to be careful not to clobber the source operands
1185 * in the "vm == vd" case by storing the result of the first pass too
1186 * early. Since Q is 0 there are always just two passes, so instead
1187 * of a complicated loop over each pass we just unroll.
1188 */
1189 fpstatus = get_fpstatus_ptr(1);
1190 tmp = neon_load_reg(a->vn, 0);
1191 tmp2 = neon_load_reg(a->vn, 1);
1192 fn(tmp, tmp, tmp2, fpstatus);
1193 tcg_temp_free_i32(tmp2);
1194
1195 tmp3 = neon_load_reg(a->vm, 0);
1196 tmp2 = neon_load_reg(a->vm, 1);
1197 fn(tmp3, tmp3, tmp2, fpstatus);
1198 tcg_temp_free_i32(tmp2);
1199 tcg_temp_free_ptr(fpstatus);
1200
1201 neon_store_reg(a->vd, 0, tmp);
1202 neon_store_reg(a->vd, 1, tmp3);
1203 return true;
1204}
1205
1206/*
1207 * For all the functions using this macro, size == 1 means fp16,
1208 * which is an architecture extension we don't implement yet.
1209 */
1210#define DO_3S_FP_PAIR(INSN,FUNC) \
1211 static bool trans_##INSN##_fp_3s(DisasContext *s, arg_3same *a) \
1212 { \
1213 if (a->size != 0) { \
1214 /* TODO fp16 support */ \
1215 return false; \
1216 } \
1217 return do_3same_fp_pair(s, a, FUNC); \
1218 }
1219
1220DO_3S_FP_PAIR(VPADD, gen_helper_vfp_adds)
1221DO_3S_FP_PAIR(VPMAX, gen_helper_vfp_maxs)
1222DO_3S_FP_PAIR(VPMIN, gen_helper_vfp_mins)
d3c8c736
PM
1223
1224static bool do_vector_2sh(DisasContext *s, arg_2reg_shift *a, GVecGen2iFn *fn)
1225{
1226 /* Handle a 2-reg-shift insn which can be vectorized. */
1227 int vec_size = a->q ? 16 : 8;
1228 int rd_ofs = neon_reg_offset(a->vd, 0);
1229 int rm_ofs = neon_reg_offset(a->vm, 0);
1230
1231 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1232 return false;
1233 }
1234
1235 /* UNDEF accesses to D16-D31 if they don't exist. */
1236 if (!dc_isar_feature(aa32_simd_r32, s) &&
1237 ((a->vd | a->vm) & 0x10)) {
1238 return false;
1239 }
1240
1241 if ((a->vm | a->vd) & a->q) {
1242 return false;
1243 }
1244
1245 if (!vfp_access_check(s)) {
1246 return true;
1247 }
1248
1249 fn(a->size, rd_ofs, rm_ofs, a->shift, vec_size, vec_size);
1250 return true;
1251}
1252
1253#define DO_2SH(INSN, FUNC) \
1254 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1255 { \
1256 return do_vector_2sh(s, a, FUNC); \
1257 } \
1258
1259DO_2SH(VSHL, tcg_gen_gvec_shli)
1260DO_2SH(VSLI, gen_gvec_sli)
434f71ef
PM
1261DO_2SH(VSRI, gen_gvec_sri)
1262DO_2SH(VSRA_S, gen_gvec_ssra)
1263DO_2SH(VSRA_U, gen_gvec_usra)
1264DO_2SH(VRSHR_S, gen_gvec_srshr)
1265DO_2SH(VRSHR_U, gen_gvec_urshr)
1266DO_2SH(VRSRA_S, gen_gvec_srsra)
1267DO_2SH(VRSRA_U, gen_gvec_ursra)
66432d6b
PM
1268
1269static bool trans_VSHR_S_2sh(DisasContext *s, arg_2reg_shift *a)
1270{
1271 /* Signed shift out of range results in all-sign-bits */
1272 a->shift = MIN(a->shift, (8 << a->size) - 1);
1273 return do_vector_2sh(s, a, tcg_gen_gvec_sari);
1274}
1275
1276static void gen_zero_rd_2sh(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
1277 int64_t shift, uint32_t oprsz, uint32_t maxsz)
1278{
1279 tcg_gen_gvec_dup_imm(vece, rd_ofs, oprsz, maxsz, 0);
1280}
1281
1282static bool trans_VSHR_U_2sh(DisasContext *s, arg_2reg_shift *a)
1283{
1284 /* Shift out of range is architecturally valid and results in zero. */
1285 if (a->shift >= (8 << a->size)) {
1286 return do_vector_2sh(s, a, gen_zero_rd_2sh);
1287 } else {
1288 return do_vector_2sh(s, a, tcg_gen_gvec_shri);
1289 }
1290}
37bfce81
PM
1291
1292static bool do_2shift_env_64(DisasContext *s, arg_2reg_shift *a,
1293 NeonGenTwo64OpEnvFn *fn)
1294{
1295 /*
1296 * 2-reg-and-shift operations, size == 3 case, where the
1297 * function needs to be passed cpu_env.
1298 */
1299 TCGv_i64 constimm;
1300 int pass;
1301
1302 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1303 return false;
1304 }
1305
1306 /* UNDEF accesses to D16-D31 if they don't exist. */
1307 if (!dc_isar_feature(aa32_simd_r32, s) &&
1308 ((a->vd | a->vm) & 0x10)) {
1309 return false;
1310 }
1311
1312 if ((a->vm | a->vd) & a->q) {
1313 return false;
1314 }
1315
1316 if (!vfp_access_check(s)) {
1317 return true;
1318 }
1319
1320 /*
1321 * To avoid excessive duplication of ops we implement shift
1322 * by immediate using the variable shift operations.
1323 */
1324 constimm = tcg_const_i64(dup_const(a->size, a->shift));
1325
1326 for (pass = 0; pass < a->q + 1; pass++) {
1327 TCGv_i64 tmp = tcg_temp_new_i64();
1328
1329 neon_load_reg64(tmp, a->vm + pass);
1330 fn(tmp, cpu_env, tmp, constimm);
1331 neon_store_reg64(tmp, a->vd + pass);
1332 }
1333 tcg_temp_free_i64(constimm);
1334 return true;
1335}
1336
1337static bool do_2shift_env_32(DisasContext *s, arg_2reg_shift *a,
1338 NeonGenTwoOpEnvFn *fn)
1339{
1340 /*
1341 * 2-reg-and-shift operations, size < 3 case, where the
1342 * helper needs to be passed cpu_env.
1343 */
1344 TCGv_i32 constimm;
1345 int pass;
1346
1347 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1348 return false;
1349 }
1350
1351 /* UNDEF accesses to D16-D31 if they don't exist. */
1352 if (!dc_isar_feature(aa32_simd_r32, s) &&
1353 ((a->vd | a->vm) & 0x10)) {
1354 return false;
1355 }
1356
1357 if ((a->vm | a->vd) & a->q) {
1358 return false;
1359 }
1360
1361 if (!vfp_access_check(s)) {
1362 return true;
1363 }
1364
1365 /*
1366 * To avoid excessive duplication of ops we implement shift
1367 * by immediate using the variable shift operations.
1368 */
1369 constimm = tcg_const_i32(dup_const(a->size, a->shift));
1370
1371 for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
1372 TCGv_i32 tmp = neon_load_reg(a->vm, pass);
1373 fn(tmp, cpu_env, tmp, constimm);
1374 neon_store_reg(a->vd, pass, tmp);
1375 }
1376 tcg_temp_free_i32(constimm);
1377 return true;
1378}
1379
1380#define DO_2SHIFT_ENV(INSN, FUNC) \
1381 static bool trans_##INSN##_64_2sh(DisasContext *s, arg_2reg_shift *a) \
1382 { \
1383 return do_2shift_env_64(s, a, gen_helper_neon_##FUNC##64); \
1384 } \
1385 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1386 { \
1387 static NeonGenTwoOpEnvFn * const fns[] = { \
1388 gen_helper_neon_##FUNC##8, \
1389 gen_helper_neon_##FUNC##16, \
1390 gen_helper_neon_##FUNC##32, \
1391 }; \
1392 assert(a->size < ARRAY_SIZE(fns)); \
1393 return do_2shift_env_32(s, a, fns[a->size]); \
1394 }
1395
1396DO_2SHIFT_ENV(VQSHLU, qshlu_s)
1397DO_2SHIFT_ENV(VQSHL_U, qshl_u)
1398DO_2SHIFT_ENV(VQSHL_S, qshl_s)
712182d3
PM
1399
1400static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a,
1401 NeonGenTwo64OpFn *shiftfn,
1402 NeonGenNarrowEnvFn *narrowfn)
1403{
1404 /* 2-reg-and-shift narrowing-shift operations, size == 3 case */
1405 TCGv_i64 constimm, rm1, rm2;
1406 TCGv_i32 rd;
1407
1408 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1409 return false;
1410 }
1411
1412 /* UNDEF accesses to D16-D31 if they don't exist. */
1413 if (!dc_isar_feature(aa32_simd_r32, s) &&
1414 ((a->vd | a->vm) & 0x10)) {
1415 return false;
1416 }
1417
1418 if (a->vm & 1) {
1419 return false;
1420 }
1421
1422 if (!vfp_access_check(s)) {
1423 return true;
1424 }
1425
1426 /*
1427 * This is always a right shift, and the shiftfn is always a
1428 * left-shift helper, which thus needs the negated shift count.
1429 */
1430 constimm = tcg_const_i64(-a->shift);
1431 rm1 = tcg_temp_new_i64();
1432 rm2 = tcg_temp_new_i64();
1433
1434 /* Load both inputs first to avoid potential overwrite if rm == rd */
1435 neon_load_reg64(rm1, a->vm);
1436 neon_load_reg64(rm2, a->vm + 1);
1437
1438 shiftfn(rm1, rm1, constimm);
1439 rd = tcg_temp_new_i32();
1440 narrowfn(rd, cpu_env, rm1);
1441 neon_store_reg(a->vd, 0, rd);
1442
1443 shiftfn(rm2, rm2, constimm);
1444 rd = tcg_temp_new_i32();
1445 narrowfn(rd, cpu_env, rm2);
1446 neon_store_reg(a->vd, 1, rd);
1447
1448 tcg_temp_free_i64(rm1);
1449 tcg_temp_free_i64(rm2);
1450 tcg_temp_free_i64(constimm);
1451
1452 return true;
1453}
1454
1455static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a,
1456 NeonGenTwoOpFn *shiftfn,
1457 NeonGenNarrowEnvFn *narrowfn)
1458{
1459 /* 2-reg-and-shift narrowing-shift operations, size < 3 case */
1460 TCGv_i32 constimm, rm1, rm2, rm3, rm4;
1461 TCGv_i64 rtmp;
1462 uint32_t imm;
1463
1464 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1465 return false;
1466 }
1467
1468 /* UNDEF accesses to D16-D31 if they don't exist. */
1469 if (!dc_isar_feature(aa32_simd_r32, s) &&
1470 ((a->vd | a->vm) & 0x10)) {
1471 return false;
1472 }
1473
1474 if (a->vm & 1) {
1475 return false;
1476 }
1477
1478 if (!vfp_access_check(s)) {
1479 return true;
1480 }
1481
1482 /*
1483 * This is always a right shift, and the shiftfn is always a
1484 * left-shift helper, which thus needs the negated shift count
1485 * duplicated into each lane of the immediate value.
1486 */
1487 if (a->size == 1) {
1488 imm = (uint16_t)(-a->shift);
1489 imm |= imm << 16;
1490 } else {
1491 /* size == 2 */
1492 imm = -a->shift;
1493 }
1494 constimm = tcg_const_i32(imm);
1495
1496 /* Load all inputs first to avoid potential overwrite */
1497 rm1 = neon_load_reg(a->vm, 0);
1498 rm2 = neon_load_reg(a->vm, 1);
1499 rm3 = neon_load_reg(a->vm + 1, 0);
1500 rm4 = neon_load_reg(a->vm + 1, 1);
1501 rtmp = tcg_temp_new_i64();
1502
1503 shiftfn(rm1, rm1, constimm);
1504 shiftfn(rm2, rm2, constimm);
1505
1506 tcg_gen_concat_i32_i64(rtmp, rm1, rm2);
1507 tcg_temp_free_i32(rm2);
1508
1509 narrowfn(rm1, cpu_env, rtmp);
1510 neon_store_reg(a->vd, 0, rm1);
1511
1512 shiftfn(rm3, rm3, constimm);
1513 shiftfn(rm4, rm4, constimm);
1514 tcg_temp_free_i32(constimm);
1515
1516 tcg_gen_concat_i32_i64(rtmp, rm3, rm4);
1517 tcg_temp_free_i32(rm4);
1518
1519 narrowfn(rm3, cpu_env, rtmp);
1520 tcg_temp_free_i64(rtmp);
1521 neon_store_reg(a->vd, 1, rm3);
1522 return true;
1523}
1524
1525#define DO_2SN_64(INSN, FUNC, NARROWFUNC) \
1526 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1527 { \
1528 return do_2shift_narrow_64(s, a, FUNC, NARROWFUNC); \
1529 }
1530#define DO_2SN_32(INSN, FUNC, NARROWFUNC) \
1531 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1532 { \
1533 return do_2shift_narrow_32(s, a, FUNC, NARROWFUNC); \
1534 }
1535
1536static void gen_neon_narrow_u32(TCGv_i32 dest, TCGv_ptr env, TCGv_i64 src)
1537{
1538 tcg_gen_extrl_i64_i32(dest, src);
1539}
1540
1541static void gen_neon_narrow_u16(TCGv_i32 dest, TCGv_ptr env, TCGv_i64 src)
1542{
1543 gen_helper_neon_narrow_u16(dest, src);
1544}
1545
1546static void gen_neon_narrow_u8(TCGv_i32 dest, TCGv_ptr env, TCGv_i64 src)
1547{
1548 gen_helper_neon_narrow_u8(dest, src);
1549}
1550
1551DO_2SN_64(VSHRN_64, gen_ushl_i64, gen_neon_narrow_u32)
1552DO_2SN_32(VSHRN_32, gen_ushl_i32, gen_neon_narrow_u16)
1553DO_2SN_32(VSHRN_16, gen_helper_neon_shl_u16, gen_neon_narrow_u8)
1554
1555DO_2SN_64(VRSHRN_64, gen_helper_neon_rshl_u64, gen_neon_narrow_u32)
1556DO_2SN_32(VRSHRN_32, gen_helper_neon_rshl_u32, gen_neon_narrow_u16)
1557DO_2SN_32(VRSHRN_16, gen_helper_neon_rshl_u16, gen_neon_narrow_u8)
1558
1559DO_2SN_64(VQSHRUN_64, gen_sshl_i64, gen_helper_neon_unarrow_sat32)
1560DO_2SN_32(VQSHRUN_32, gen_sshl_i32, gen_helper_neon_unarrow_sat16)
1561DO_2SN_32(VQSHRUN_16, gen_helper_neon_shl_s16, gen_helper_neon_unarrow_sat8)
1562
1563DO_2SN_64(VQRSHRUN_64, gen_helper_neon_rshl_s64, gen_helper_neon_unarrow_sat32)
1564DO_2SN_32(VQRSHRUN_32, gen_helper_neon_rshl_s32, gen_helper_neon_unarrow_sat16)
1565DO_2SN_32(VQRSHRUN_16, gen_helper_neon_rshl_s16, gen_helper_neon_unarrow_sat8)
b4a3a77b
PM
1566DO_2SN_64(VQSHRN_S64, gen_sshl_i64, gen_helper_neon_narrow_sat_s32)
1567DO_2SN_32(VQSHRN_S32, gen_sshl_i32, gen_helper_neon_narrow_sat_s16)
1568DO_2SN_32(VQSHRN_S16, gen_helper_neon_shl_s16, gen_helper_neon_narrow_sat_s8)
1569
1570DO_2SN_64(VQRSHRN_S64, gen_helper_neon_rshl_s64, gen_helper_neon_narrow_sat_s32)
1571DO_2SN_32(VQRSHRN_S32, gen_helper_neon_rshl_s32, gen_helper_neon_narrow_sat_s16)
1572DO_2SN_32(VQRSHRN_S16, gen_helper_neon_rshl_s16, gen_helper_neon_narrow_sat_s8)
1573
1574DO_2SN_64(VQSHRN_U64, gen_ushl_i64, gen_helper_neon_narrow_sat_u32)
1575DO_2SN_32(VQSHRN_U32, gen_ushl_i32, gen_helper_neon_narrow_sat_u16)
1576DO_2SN_32(VQSHRN_U16, gen_helper_neon_shl_u16, gen_helper_neon_narrow_sat_u8)
1577
1578DO_2SN_64(VQRSHRN_U64, gen_helper_neon_rshl_u64, gen_helper_neon_narrow_sat_u32)
1579DO_2SN_32(VQRSHRN_U32, gen_helper_neon_rshl_u32, gen_helper_neon_narrow_sat_u16)
1580DO_2SN_32(VQRSHRN_U16, gen_helper_neon_rshl_u16, gen_helper_neon_narrow_sat_u8)
968bf842
PM
1581
1582static bool do_vshll_2sh(DisasContext *s, arg_2reg_shift *a,
1583 NeonGenWidenFn *widenfn, bool u)
1584{
1585 TCGv_i64 tmp;
1586 TCGv_i32 rm0, rm1;
1587 uint64_t widen_mask = 0;
1588
1589 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1590 return false;
1591 }
1592
1593 /* UNDEF accesses to D16-D31 if they don't exist. */
1594 if (!dc_isar_feature(aa32_simd_r32, s) &&
1595 ((a->vd | a->vm) & 0x10)) {
1596 return false;
1597 }
1598
1599 if (a->vd & 1) {
1600 return false;
1601 }
1602
1603 if (!vfp_access_check(s)) {
1604 return true;
1605 }
1606
1607 /*
1608 * This is a widen-and-shift operation. The shift is always less
1609 * than the width of the source type, so after widening the input
1610 * vector we can simply shift the whole 64-bit widened register,
1611 * and then clear the potential overflow bits resulting from left
1612 * bits of the narrow input appearing as right bits of the left
1613 * neighbour narrow input. Calculate a mask of bits to clear.
1614 */
1615 if ((a->shift != 0) && (a->size < 2 || u)) {
1616 int esize = 8 << a->size;
1617 widen_mask = MAKE_64BIT_MASK(0, esize);
1618 widen_mask >>= esize - a->shift;
1619 widen_mask = dup_const(a->size + 1, widen_mask);
1620 }
1621
1622 rm0 = neon_load_reg(a->vm, 0);
1623 rm1 = neon_load_reg(a->vm, 1);
1624 tmp = tcg_temp_new_i64();
1625
1626 widenfn(tmp, rm0);
9593a398 1627 tcg_temp_free_i32(rm0);
968bf842
PM
1628 if (a->shift != 0) {
1629 tcg_gen_shli_i64(tmp, tmp, a->shift);
1630 tcg_gen_andi_i64(tmp, tmp, ~widen_mask);
1631 }
1632 neon_store_reg64(tmp, a->vd);
1633
1634 widenfn(tmp, rm1);
9593a398 1635 tcg_temp_free_i32(rm1);
968bf842
PM
1636 if (a->shift != 0) {
1637 tcg_gen_shli_i64(tmp, tmp, a->shift);
1638 tcg_gen_andi_i64(tmp, tmp, ~widen_mask);
1639 }
1640 neon_store_reg64(tmp, a->vd + 1);
1641 tcg_temp_free_i64(tmp);
1642 return true;
1643}
1644
1645static bool trans_VSHLL_S_2sh(DisasContext *s, arg_2reg_shift *a)
1646{
448f0e5f 1647 static NeonGenWidenFn * const widenfn[] = {
968bf842
PM
1648 gen_helper_neon_widen_s8,
1649 gen_helper_neon_widen_s16,
1650 tcg_gen_ext_i32_i64,
1651 };
1652 return do_vshll_2sh(s, a, widenfn[a->size], false);
1653}
1654
1655static bool trans_VSHLL_U_2sh(DisasContext *s, arg_2reg_shift *a)
1656{
448f0e5f 1657 static NeonGenWidenFn * const widenfn[] = {
968bf842
PM
1658 gen_helper_neon_widen_u8,
1659 gen_helper_neon_widen_u16,
1660 tcg_gen_extu_i32_i64,
1661 };
1662 return do_vshll_2sh(s, a, widenfn[a->size], true);
1663}
3da26f11
PM
1664
1665static bool do_fp_2sh(DisasContext *s, arg_2reg_shift *a,
1666 NeonGenTwoSingleOPFn *fn)
1667{
1668 /* FP operations in 2-reg-and-shift group */
1669 TCGv_i32 tmp, shiftv;
1670 TCGv_ptr fpstatus;
1671 int pass;
1672
1673 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1674 return false;
1675 }
1676
1677 /* UNDEF accesses to D16-D31 if they don't exist. */
1678 if (!dc_isar_feature(aa32_simd_r32, s) &&
1679 ((a->vd | a->vm) & 0x10)) {
1680 return false;
1681 }
1682
1683 if ((a->vm | a->vd) & a->q) {
1684 return false;
1685 }
1686
1687 if (!vfp_access_check(s)) {
1688 return true;
1689 }
1690
1691 fpstatus = get_fpstatus_ptr(1);
1692 shiftv = tcg_const_i32(a->shift);
1693 for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
1694 tmp = neon_load_reg(a->vm, pass);
1695 fn(tmp, tmp, shiftv, fpstatus);
1696 neon_store_reg(a->vd, pass, tmp);
1697 }
1698 tcg_temp_free_ptr(fpstatus);
1699 tcg_temp_free_i32(shiftv);
1700 return true;
1701}
1702
1703#define DO_FP_2SH(INSN, FUNC) \
1704 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1705 { \
1706 return do_fp_2sh(s, a, FUNC); \
1707 }
1708
1709DO_FP_2SH(VCVT_SF, gen_helper_vfp_sltos)
1710DO_FP_2SH(VCVT_UF, gen_helper_vfp_ultos)
1711DO_FP_2SH(VCVT_FS, gen_helper_vfp_tosls_round_to_zero)
1712DO_FP_2SH(VCVT_FU, gen_helper_vfp_touls_round_to_zero)
2c35a39e
PM
1713
1714static uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
1715{
1716 /*
1717 * Expand the encoded constant.
1718 * Note that cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
1719 * We choose to not special-case this and will behave as if a
1720 * valid constant encoding of 0 had been given.
1721 * cmode = 15 op = 1 must UNDEF; we assume decode has handled that.
1722 */
1723 switch (cmode) {
1724 case 0: case 1:
1725 /* no-op */
1726 break;
1727 case 2: case 3:
1728 imm <<= 8;
1729 break;
1730 case 4: case 5:
1731 imm <<= 16;
1732 break;
1733 case 6: case 7:
1734 imm <<= 24;
1735 break;
1736 case 8: case 9:
1737 imm |= imm << 16;
1738 break;
1739 case 10: case 11:
1740 imm = (imm << 8) | (imm << 24);
1741 break;
1742 case 12:
1743 imm = (imm << 8) | 0xff;
1744 break;
1745 case 13:
1746 imm = (imm << 16) | 0xffff;
1747 break;
1748 case 14:
1749 if (op) {
1750 /*
1751 * This is the only case where the top and bottom 32 bits
1752 * of the encoded constant differ.
1753 */
1754 uint64_t imm64 = 0;
1755 int n;
1756
1757 for (n = 0; n < 8; n++) {
1758 if (imm & (1 << n)) {
1759 imm64 |= (0xffULL << (n * 8));
1760 }
1761 }
1762 return imm64;
1763 }
1764 imm |= (imm << 8) | (imm << 16) | (imm << 24);
1765 break;
1766 case 15:
1767 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
1768 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
1769 break;
1770 }
1771 if (op) {
1772 imm = ~imm;
1773 }
1774 return dup_const(MO_32, imm);
1775}
1776
1777static bool do_1reg_imm(DisasContext *s, arg_1reg_imm *a,
1778 GVecGen2iFn *fn)
1779{
1780 uint64_t imm;
1781 int reg_ofs, vec_size;
1782
1783 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1784 return false;
1785 }
1786
1787 /* UNDEF accesses to D16-D31 if they don't exist. */
1788 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
1789 return false;
1790 }
1791
1792 if (a->vd & a->q) {
1793 return false;
1794 }
1795
1796 if (!vfp_access_check(s)) {
1797 return true;
1798 }
1799
1800 reg_ofs = neon_reg_offset(a->vd, 0);
1801 vec_size = a->q ? 16 : 8;
1802 imm = asimd_imm_const(a->imm, a->cmode, a->op);
1803
1804 fn(MO_64, reg_ofs, reg_ofs, imm, vec_size, vec_size);
1805 return true;
1806}
1807
1808static void gen_VMOV_1r(unsigned vece, uint32_t dofs, uint32_t aofs,
1809 int64_t c, uint32_t oprsz, uint32_t maxsz)
1810{
1811 tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, c);
1812}
1813
1814static bool trans_Vimm_1r(DisasContext *s, arg_1reg_imm *a)
1815{
1816 /* Handle decode of cmode/op here between VORR/VBIC/VMOV */
1817 GVecGen2iFn *fn;
1818
1819 if ((a->cmode & 1) && a->cmode < 12) {
1820 /* for op=1, the imm will be inverted, so BIC becomes AND. */
1821 fn = a->op ? tcg_gen_gvec_andi : tcg_gen_gvec_ori;
1822 } else {
1823 /* There is one unallocated cmode/op combination in this space */
1824 if (a->cmode == 15 && a->op == 1) {
1825 return false;
1826 }
1827 fn = gen_VMOV_1r;
1828 }
1829 return do_1reg_imm(s, a, fn);
1830}
b28be095
PM
1831
1832static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
1833 NeonGenWidenFn *widenfn,
1834 NeonGenTwo64OpFn *opfn,
1835 bool src1_wide)
1836{
1837 /* 3-regs different lengths, prewidening case (VADDL/VSUBL/VAADW/VSUBW) */
1838 TCGv_i64 rn0_64, rn1_64, rm_64;
1839 TCGv_i32 rm;
1840
1841 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1842 return false;
1843 }
1844
1845 /* UNDEF accesses to D16-D31 if they don't exist. */
1846 if (!dc_isar_feature(aa32_simd_r32, s) &&
1847 ((a->vd | a->vn | a->vm) & 0x10)) {
1848 return false;
1849 }
1850
1851 if (!widenfn || !opfn) {
1852 /* size == 3 case, which is an entirely different insn group */
1853 return false;
1854 }
1855
1856 if ((a->vd & 1) || (src1_wide && (a->vn & 1))) {
1857 return false;
1858 }
1859
1860 if (!vfp_access_check(s)) {
1861 return true;
1862 }
1863
1864 rn0_64 = tcg_temp_new_i64();
1865 rn1_64 = tcg_temp_new_i64();
1866 rm_64 = tcg_temp_new_i64();
1867
1868 if (src1_wide) {
1869 neon_load_reg64(rn0_64, a->vn);
1870 } else {
1871 TCGv_i32 tmp = neon_load_reg(a->vn, 0);
1872 widenfn(rn0_64, tmp);
1873 tcg_temp_free_i32(tmp);
1874 }
1875 rm = neon_load_reg(a->vm, 0);
1876
1877 widenfn(rm_64, rm);
1878 tcg_temp_free_i32(rm);
1879 opfn(rn0_64, rn0_64, rm_64);
1880
1881 /*
1882 * Load second pass inputs before storing the first pass result, to
1883 * avoid incorrect results if a narrow input overlaps with the result.
1884 */
1885 if (src1_wide) {
1886 neon_load_reg64(rn1_64, a->vn + 1);
1887 } else {
1888 TCGv_i32 tmp = neon_load_reg(a->vn, 1);
1889 widenfn(rn1_64, tmp);
1890 tcg_temp_free_i32(tmp);
1891 }
1892 rm = neon_load_reg(a->vm, 1);
1893
1894 neon_store_reg64(rn0_64, a->vd);
1895
1896 widenfn(rm_64, rm);
1897 tcg_temp_free_i32(rm);
1898 opfn(rn1_64, rn1_64, rm_64);
1899 neon_store_reg64(rn1_64, a->vd + 1);
1900
1901 tcg_temp_free_i64(rn0_64);
1902 tcg_temp_free_i64(rn1_64);
1903 tcg_temp_free_i64(rm_64);
1904
1905 return true;
1906}
1907
1908#define DO_PREWIDEN(INSN, S, EXT, OP, SRC1WIDE) \
1909 static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
1910 { \
1911 static NeonGenWidenFn * const widenfn[] = { \
1912 gen_helper_neon_widen_##S##8, \
1913 gen_helper_neon_widen_##S##16, \
1914 tcg_gen_##EXT##_i32_i64, \
1915 NULL, \
1916 }; \
1917 static NeonGenTwo64OpFn * const addfn[] = { \
1918 gen_helper_neon_##OP##l_u16, \
1919 gen_helper_neon_##OP##l_u32, \
1920 tcg_gen_##OP##_i64, \
1921 NULL, \
1922 }; \
1923 return do_prewiden_3d(s, a, widenfn[a->size], \
1924 addfn[a->size], SRC1WIDE); \
1925 }
1926
1927DO_PREWIDEN(VADDL_S, s, ext, add, false)
1928DO_PREWIDEN(VADDL_U, u, extu, add, false)
1929DO_PREWIDEN(VSUBL_S, s, ext, sub, false)
1930DO_PREWIDEN(VSUBL_U, u, extu, sub, false)
1931DO_PREWIDEN(VADDW_S, s, ext, add, true)
1932DO_PREWIDEN(VADDW_U, u, extu, add, true)
1933DO_PREWIDEN(VSUBW_S, s, ext, sub, true)
1934DO_PREWIDEN(VSUBW_U, u, extu, sub, true)
0fa1ab03
PM
1935
1936static bool do_narrow_3d(DisasContext *s, arg_3diff *a,
1937 NeonGenTwo64OpFn *opfn, NeonGenNarrowFn *narrowfn)
1938{
1939 /* 3-regs different lengths, narrowing (VADDHN/VSUBHN/VRADDHN/VRSUBHN) */
1940 TCGv_i64 rn_64, rm_64;
1941 TCGv_i32 rd0, rd1;
1942
1943 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1944 return false;
1945 }
1946
1947 /* UNDEF accesses to D16-D31 if they don't exist. */
1948 if (!dc_isar_feature(aa32_simd_r32, s) &&
1949 ((a->vd | a->vn | a->vm) & 0x10)) {
1950 return false;
1951 }
1952
1953 if (!opfn || !narrowfn) {
1954 /* size == 3 case, which is an entirely different insn group */
1955 return false;
1956 }
1957
1958 if ((a->vn | a->vm) & 1) {
1959 return false;
1960 }
1961
1962 if (!vfp_access_check(s)) {
1963 return true;
1964 }
1965
1966 rn_64 = tcg_temp_new_i64();
1967 rm_64 = tcg_temp_new_i64();
1968 rd0 = tcg_temp_new_i32();
1969 rd1 = tcg_temp_new_i32();
1970
1971 neon_load_reg64(rn_64, a->vn);
1972 neon_load_reg64(rm_64, a->vm);
1973
1974 opfn(rn_64, rn_64, rm_64);
1975
1976 narrowfn(rd0, rn_64);
1977
1978 neon_load_reg64(rn_64, a->vn + 1);
1979 neon_load_reg64(rm_64, a->vm + 1);
1980
1981 opfn(rn_64, rn_64, rm_64);
1982
1983 narrowfn(rd1, rn_64);
1984
1985 neon_store_reg(a->vd, 0, rd0);
1986 neon_store_reg(a->vd, 1, rd1);
1987
1988 tcg_temp_free_i64(rn_64);
1989 tcg_temp_free_i64(rm_64);
1990
1991 return true;
1992}
1993
1994#define DO_NARROW_3D(INSN, OP, NARROWTYPE, EXTOP) \
1995 static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
1996 { \
1997 static NeonGenTwo64OpFn * const addfn[] = { \
1998 gen_helper_neon_##OP##l_u16, \
1999 gen_helper_neon_##OP##l_u32, \
2000 tcg_gen_##OP##_i64, \
2001 NULL, \
2002 }; \
2003 static NeonGenNarrowFn * const narrowfn[] = { \
2004 gen_helper_neon_##NARROWTYPE##_high_u8, \
2005 gen_helper_neon_##NARROWTYPE##_high_u16, \
2006 EXTOP, \
2007 NULL, \
2008 }; \
2009 return do_narrow_3d(s, a, addfn[a->size], narrowfn[a->size]); \
2010 }
2011
2012static void gen_narrow_round_high_u32(TCGv_i32 rd, TCGv_i64 rn)
2013{
2014 tcg_gen_addi_i64(rn, rn, 1u << 31);
2015 tcg_gen_extrh_i64_i32(rd, rn);
2016}
2017
2018DO_NARROW_3D(VADDHN, add, narrow, tcg_gen_extrh_i64_i32)
2019DO_NARROW_3D(VSUBHN, sub, narrow, tcg_gen_extrh_i64_i32)
2020DO_NARROW_3D(VRADDHN, add, narrow_round, gen_narrow_round_high_u32)
2021DO_NARROW_3D(VRSUBHN, sub, narrow_round, gen_narrow_round_high_u32)
f5b28401
PM
2022
2023static bool do_long_3d(DisasContext *s, arg_3diff *a,
2024 NeonGenTwoOpWidenFn *opfn,
2025 NeonGenTwo64OpFn *accfn)
2026{
2027 /*
2028 * 3-regs different lengths, long operations.
2029 * These perform an operation on two inputs that returns a double-width
2030 * result, and then possibly perform an accumulation operation of
2031 * that result into the double-width destination.
2032 */
2033 TCGv_i64 rd0, rd1, tmp;
2034 TCGv_i32 rn, rm;
2035
2036 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
2037 return false;
2038 }
2039
2040 /* UNDEF accesses to D16-D31 if they don't exist. */
2041 if (!dc_isar_feature(aa32_simd_r32, s) &&
2042 ((a->vd | a->vn | a->vm) & 0x10)) {
2043 return false;
2044 }
2045
2046 if (!opfn) {
2047 /* size == 3 case, which is an entirely different insn group */
2048 return false;
2049 }
2050
2051 if (a->vd & 1) {
2052 return false;
2053 }
2054
2055 if (!vfp_access_check(s)) {
2056 return true;
2057 }
2058
2059 rd0 = tcg_temp_new_i64();
2060 rd1 = tcg_temp_new_i64();
2061
2062 rn = neon_load_reg(a->vn, 0);
2063 rm = neon_load_reg(a->vm, 0);
2064 opfn(rd0, rn, rm);
2065 tcg_temp_free_i32(rn);
2066 tcg_temp_free_i32(rm);
2067
2068 rn = neon_load_reg(a->vn, 1);
2069 rm = neon_load_reg(a->vm, 1);
2070 opfn(rd1, rn, rm);
2071 tcg_temp_free_i32(rn);
2072 tcg_temp_free_i32(rm);
2073
2074 /* Don't store results until after all loads: they might overlap */
2075 if (accfn) {
2076 tmp = tcg_temp_new_i64();
2077 neon_load_reg64(tmp, a->vd);
2078 accfn(tmp, tmp, rd0);
2079 neon_store_reg64(tmp, a->vd);
2080 neon_load_reg64(tmp, a->vd + 1);
2081 accfn(tmp, tmp, rd1);
2082 neon_store_reg64(tmp, a->vd + 1);
2083 tcg_temp_free_i64(tmp);
2084 } else {
2085 neon_store_reg64(rd0, a->vd);
2086 neon_store_reg64(rd1, a->vd + 1);
2087 }
2088
2089 tcg_temp_free_i64(rd0);
2090 tcg_temp_free_i64(rd1);
2091
2092 return true;
2093}
2094
2095static bool trans_VABDL_S_3d(DisasContext *s, arg_3diff *a)
2096{
2097 static NeonGenTwoOpWidenFn * const opfn[] = {
2098 gen_helper_neon_abdl_s16,
2099 gen_helper_neon_abdl_s32,
2100 gen_helper_neon_abdl_s64,
2101 NULL,
2102 };
2103
2104 return do_long_3d(s, a, opfn[a->size], NULL);
2105}
2106
2107static bool trans_VABDL_U_3d(DisasContext *s, arg_3diff *a)
2108{
2109 static NeonGenTwoOpWidenFn * const opfn[] = {
2110 gen_helper_neon_abdl_u16,
2111 gen_helper_neon_abdl_u32,
2112 gen_helper_neon_abdl_u64,
2113 NULL,
2114 };
2115
2116 return do_long_3d(s, a, opfn[a->size], NULL);
2117}
2118
2119static bool trans_VABAL_S_3d(DisasContext *s, arg_3diff *a)
2120{
2121 static NeonGenTwoOpWidenFn * const opfn[] = {
2122 gen_helper_neon_abdl_s16,
2123 gen_helper_neon_abdl_s32,
2124 gen_helper_neon_abdl_s64,
2125 NULL,
2126 };
2127 static NeonGenTwo64OpFn * const addfn[] = {
2128 gen_helper_neon_addl_u16,
2129 gen_helper_neon_addl_u32,
2130 tcg_gen_add_i64,
2131 NULL,
2132 };
2133
2134 return do_long_3d(s, a, opfn[a->size], addfn[a->size]);
2135}
2136
2137static bool trans_VABAL_U_3d(DisasContext *s, arg_3diff *a)
2138{
2139 static NeonGenTwoOpWidenFn * const opfn[] = {
2140 gen_helper_neon_abdl_u16,
2141 gen_helper_neon_abdl_u32,
2142 gen_helper_neon_abdl_u64,
2143 NULL,
2144 };
2145 static NeonGenTwo64OpFn * const addfn[] = {
2146 gen_helper_neon_addl_u16,
2147 gen_helper_neon_addl_u32,
2148 tcg_gen_add_i64,
2149 NULL,
2150 };
2151
2152 return do_long_3d(s, a, opfn[a->size], addfn[a->size]);
2153}
3a1d9eb0
PM
2154
2155static void gen_mull_s32(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
2156{
2157 TCGv_i32 lo = tcg_temp_new_i32();
2158 TCGv_i32 hi = tcg_temp_new_i32();
2159
2160 tcg_gen_muls2_i32(lo, hi, rn, rm);
2161 tcg_gen_concat_i32_i64(rd, lo, hi);
2162
2163 tcg_temp_free_i32(lo);
2164 tcg_temp_free_i32(hi);
2165}
2166
2167static void gen_mull_u32(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
2168{
2169 TCGv_i32 lo = tcg_temp_new_i32();
2170 TCGv_i32 hi = tcg_temp_new_i32();
2171
2172 tcg_gen_mulu2_i32(lo, hi, rn, rm);
2173 tcg_gen_concat_i32_i64(rd, lo, hi);
2174
2175 tcg_temp_free_i32(lo);
2176 tcg_temp_free_i32(hi);
2177}
2178
2179static bool trans_VMULL_S_3d(DisasContext *s, arg_3diff *a)
2180{
2181 static NeonGenTwoOpWidenFn * const opfn[] = {
2182 gen_helper_neon_mull_s8,
2183 gen_helper_neon_mull_s16,
2184 gen_mull_s32,
2185 NULL,
2186 };
2187
2188 return do_long_3d(s, a, opfn[a->size], NULL);
2189}
2190
2191static bool trans_VMULL_U_3d(DisasContext *s, arg_3diff *a)
2192{
2193 static NeonGenTwoOpWidenFn * const opfn[] = {
2194 gen_helper_neon_mull_u8,
2195 gen_helper_neon_mull_u16,
2196 gen_mull_u32,
2197 NULL,
2198 };
2199
2200 return do_long_3d(s, a, opfn[a->size], NULL);
2201}
2202
2203#define DO_VMLAL(INSN,MULL,ACC) \
2204 static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
2205 { \
2206 static NeonGenTwoOpWidenFn * const opfn[] = { \
2207 gen_helper_neon_##MULL##8, \
2208 gen_helper_neon_##MULL##16, \
2209 gen_##MULL##32, \
2210 NULL, \
2211 }; \
2212 static NeonGenTwo64OpFn * const accfn[] = { \
2213 gen_helper_neon_##ACC##l_u16, \
2214 gen_helper_neon_##ACC##l_u32, \
2215 tcg_gen_##ACC##_i64, \
2216 NULL, \
2217 }; \
2218 return do_long_3d(s, a, opfn[a->size], accfn[a->size]); \
2219 }
2220
2221DO_VMLAL(VMLAL_S,mull_s,add)
2222DO_VMLAL(VMLAL_U,mull_u,add)
2223DO_VMLAL(VMLSL_S,mull_s,sub)
2224DO_VMLAL(VMLSL_U,mull_u,sub)
9546ca59
PM
2225
2226static void gen_VQDMULL_16(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
2227{
2228 gen_helper_neon_mull_s16(rd, rn, rm);
2229 gen_helper_neon_addl_saturate_s32(rd, cpu_env, rd, rd);
2230}
2231
2232static void gen_VQDMULL_32(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
2233{
2234 gen_mull_s32(rd, rn, rm);
2235 gen_helper_neon_addl_saturate_s64(rd, cpu_env, rd, rd);
2236}
2237
2238static bool trans_VQDMULL_3d(DisasContext *s, arg_3diff *a)
2239{
2240 static NeonGenTwoOpWidenFn * const opfn[] = {
2241 NULL,
2242 gen_VQDMULL_16,
2243 gen_VQDMULL_32,
2244 NULL,
2245 };
2246
2247 return do_long_3d(s, a, opfn[a->size], NULL);
2248}
2249
2250static void gen_VQDMLAL_acc_16(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
2251{
2252 gen_helper_neon_addl_saturate_s32(rd, cpu_env, rn, rm);
2253}
2254
2255static void gen_VQDMLAL_acc_32(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
2256{
2257 gen_helper_neon_addl_saturate_s64(rd, cpu_env, rn, rm);
2258}
2259
2260static bool trans_VQDMLAL_3d(DisasContext *s, arg_3diff *a)
2261{
2262 static NeonGenTwoOpWidenFn * const opfn[] = {
2263 NULL,
2264 gen_VQDMULL_16,
2265 gen_VQDMULL_32,
2266 NULL,
2267 };
2268 static NeonGenTwo64OpFn * const accfn[] = {
2269 NULL,
2270 gen_VQDMLAL_acc_16,
2271 gen_VQDMLAL_acc_32,
2272 NULL,
2273 };
2274
2275 return do_long_3d(s, a, opfn[a->size], accfn[a->size]);
2276}
2277
2278static void gen_VQDMLSL_acc_16(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
2279{
2280 gen_helper_neon_negl_u32(rm, rm);
2281 gen_helper_neon_addl_saturate_s32(rd, cpu_env, rn, rm);
2282}
2283
2284static void gen_VQDMLSL_acc_32(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
2285{
2286 tcg_gen_neg_i64(rm, rm);
2287 gen_helper_neon_addl_saturate_s64(rd, cpu_env, rn, rm);
2288}
2289
2290static bool trans_VQDMLSL_3d(DisasContext *s, arg_3diff *a)
2291{
2292 static NeonGenTwoOpWidenFn * const opfn[] = {
2293 NULL,
2294 gen_VQDMULL_16,
2295 gen_VQDMULL_32,
2296 NULL,
2297 };
2298 static NeonGenTwo64OpFn * const accfn[] = {
2299 NULL,
2300 gen_VQDMLSL_acc_16,
2301 gen_VQDMLSL_acc_32,
2302 NULL,
2303 };
2304
2305 return do_long_3d(s, a, opfn[a->size], accfn[a->size]);
2306}
18fb58d5
PM
2307
2308static bool trans_VMULL_P_3d(DisasContext *s, arg_3diff *a)
2309{
2310 gen_helper_gvec_3 *fn_gvec;
2311
2312 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
2313 return false;
2314 }
2315
2316 /* UNDEF accesses to D16-D31 if they don't exist. */
2317 if (!dc_isar_feature(aa32_simd_r32, s) &&
2318 ((a->vd | a->vn | a->vm) & 0x10)) {
2319 return false;
2320 }
2321
2322 if (a->vd & 1) {
2323 return false;
2324 }
2325
2326 switch (a->size) {
2327 case 0:
2328 fn_gvec = gen_helper_neon_pmull_h;
2329 break;
2330 case 2:
2331 if (!dc_isar_feature(aa32_pmull, s)) {
2332 return false;
2333 }
2334 fn_gvec = gen_helper_gvec_pmull_q;
2335 break;
2336 default:
2337 return false;
2338 }
2339
2340 if (!vfp_access_check(s)) {
2341 return true;
2342 }
2343
2344 tcg_gen_gvec_3_ool(neon_reg_offset(a->vd, 0),
2345 neon_reg_offset(a->vn, 0),
2346 neon_reg_offset(a->vm, 0),
2347 16, 16, 0, fn_gvec);
2348 return true;
2349}
This page took 0.337567 seconds and 4 git commands to generate.