2 * Generic vector operation expansion
4 * Copyright (c) 2018 Linaro
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu-common.h"
24 #include "tcg-op-gvec.h"
25 #include "tcg-gvec-desc.h"
29 /* Verify vector size and alignment rules. OFS should be the OR of all
30 of the operand offsets so that we can check them all at once. */
31 static void check_size_align(uint32_t oprsz, uint32_t maxsz, uint32_t ofs)
33 uint32_t opr_align = oprsz >= 16 ? 15 : 7;
34 uint32_t max_align = maxsz >= 16 || oprsz >= 16 ? 15 : 7;
35 tcg_debug_assert(oprsz > 0);
36 tcg_debug_assert(oprsz <= maxsz);
37 tcg_debug_assert((oprsz & opr_align) == 0);
38 tcg_debug_assert((maxsz & max_align) == 0);
39 tcg_debug_assert((ofs & max_align) == 0);
42 /* Verify vector overlap rules for two operands. */
43 static void check_overlap_2(uint32_t d, uint32_t a, uint32_t s)
45 tcg_debug_assert(d == a || d + s <= a || a + s <= d);
48 /* Verify vector overlap rules for three operands. */
49 static void check_overlap_3(uint32_t d, uint32_t a, uint32_t b, uint32_t s)
51 check_overlap_2(d, a, s);
52 check_overlap_2(d, b, s);
53 check_overlap_2(a, b, s);
56 /* Verify vector overlap rules for four operands. */
57 static void check_overlap_4(uint32_t d, uint32_t a, uint32_t b,
58 uint32_t c, uint32_t s)
60 check_overlap_2(d, a, s);
61 check_overlap_2(d, b, s);
62 check_overlap_2(d, c, s);
63 check_overlap_2(a, b, s);
64 check_overlap_2(a, c, s);
65 check_overlap_2(b, c, s);
68 /* Create a descriptor from components. */
69 uint32_t simd_desc(uint32_t oprsz, uint32_t maxsz, int32_t data)
73 assert(oprsz % 8 == 0 && oprsz <= (8 << SIMD_OPRSZ_BITS));
74 assert(maxsz % 8 == 0 && maxsz <= (8 << SIMD_MAXSZ_BITS));
75 assert(data == sextract32(data, 0, SIMD_DATA_BITS));
77 oprsz = (oprsz / 8) - 1;
78 maxsz = (maxsz / 8) - 1;
79 desc = deposit32(desc, SIMD_OPRSZ_SHIFT, SIMD_OPRSZ_BITS, oprsz);
80 desc = deposit32(desc, SIMD_MAXSZ_SHIFT, SIMD_MAXSZ_BITS, maxsz);
81 desc = deposit32(desc, SIMD_DATA_SHIFT, SIMD_DATA_BITS, data);
86 /* Generate a call to a gvec-style helper with two vector operands. */
87 void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs,
88 uint32_t oprsz, uint32_t maxsz, int32_t data,
89 gen_helper_gvec_2 *fn)
92 TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data));
94 a0 = tcg_temp_new_ptr();
95 a1 = tcg_temp_new_ptr();
97 tcg_gen_addi_ptr(a0, cpu_env, dofs);
98 tcg_gen_addi_ptr(a1, cpu_env, aofs);
102 tcg_temp_free_ptr(a0);
103 tcg_temp_free_ptr(a1);
104 tcg_temp_free_i32(desc);
107 /* Generate a call to a gvec-style helper with two vector operands
108 and one scalar operand. */
109 void tcg_gen_gvec_2i_ool(uint32_t dofs, uint32_t aofs, TCGv_i64 c,
110 uint32_t oprsz, uint32_t maxsz, int32_t data,
111 gen_helper_gvec_2i *fn)
114 TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data));
116 a0 = tcg_temp_new_ptr();
117 a1 = tcg_temp_new_ptr();
119 tcg_gen_addi_ptr(a0, cpu_env, dofs);
120 tcg_gen_addi_ptr(a1, cpu_env, aofs);
124 tcg_temp_free_ptr(a0);
125 tcg_temp_free_ptr(a1);
126 tcg_temp_free_i32(desc);
129 /* Generate a call to a gvec-style helper with three vector operands. */
130 void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
131 uint32_t oprsz, uint32_t maxsz, int32_t data,
132 gen_helper_gvec_3 *fn)
135 TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data));
137 a0 = tcg_temp_new_ptr();
138 a1 = tcg_temp_new_ptr();
139 a2 = tcg_temp_new_ptr();
141 tcg_gen_addi_ptr(a0, cpu_env, dofs);
142 tcg_gen_addi_ptr(a1, cpu_env, aofs);
143 tcg_gen_addi_ptr(a2, cpu_env, bofs);
145 fn(a0, a1, a2, desc);
147 tcg_temp_free_ptr(a0);
148 tcg_temp_free_ptr(a1);
149 tcg_temp_free_ptr(a2);
150 tcg_temp_free_i32(desc);
153 /* Generate a call to a gvec-style helper with four vector operands. */
154 void tcg_gen_gvec_4_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
155 uint32_t cofs, uint32_t oprsz, uint32_t maxsz,
156 int32_t data, gen_helper_gvec_4 *fn)
158 TCGv_ptr a0, a1, a2, a3;
159 TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data));
161 a0 = tcg_temp_new_ptr();
162 a1 = tcg_temp_new_ptr();
163 a2 = tcg_temp_new_ptr();
164 a3 = tcg_temp_new_ptr();
166 tcg_gen_addi_ptr(a0, cpu_env, dofs);
167 tcg_gen_addi_ptr(a1, cpu_env, aofs);
168 tcg_gen_addi_ptr(a2, cpu_env, bofs);
169 tcg_gen_addi_ptr(a3, cpu_env, cofs);
171 fn(a0, a1, a2, a3, desc);
173 tcg_temp_free_ptr(a0);
174 tcg_temp_free_ptr(a1);
175 tcg_temp_free_ptr(a2);
176 tcg_temp_free_ptr(a3);
177 tcg_temp_free_i32(desc);
180 /* Generate a call to a gvec-style helper with five vector operands. */
181 void tcg_gen_gvec_5_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
182 uint32_t cofs, uint32_t xofs, uint32_t oprsz,
183 uint32_t maxsz, int32_t data, gen_helper_gvec_5 *fn)
185 TCGv_ptr a0, a1, a2, a3, a4;
186 TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data));
188 a0 = tcg_temp_new_ptr();
189 a1 = tcg_temp_new_ptr();
190 a2 = tcg_temp_new_ptr();
191 a3 = tcg_temp_new_ptr();
192 a4 = tcg_temp_new_ptr();
194 tcg_gen_addi_ptr(a0, cpu_env, dofs);
195 tcg_gen_addi_ptr(a1, cpu_env, aofs);
196 tcg_gen_addi_ptr(a2, cpu_env, bofs);
197 tcg_gen_addi_ptr(a3, cpu_env, cofs);
198 tcg_gen_addi_ptr(a4, cpu_env, xofs);
200 fn(a0, a1, a2, a3, a4, desc);
202 tcg_temp_free_ptr(a0);
203 tcg_temp_free_ptr(a1);
204 tcg_temp_free_ptr(a2);
205 tcg_temp_free_ptr(a3);
206 tcg_temp_free_ptr(a4);
207 tcg_temp_free_i32(desc);
210 /* Generate a call to a gvec-style helper with three vector operands
211 and an extra pointer operand. */
212 void tcg_gen_gvec_2_ptr(uint32_t dofs, uint32_t aofs,
213 TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz,
214 int32_t data, gen_helper_gvec_2_ptr *fn)
217 TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data));
219 a0 = tcg_temp_new_ptr();
220 a1 = tcg_temp_new_ptr();
222 tcg_gen_addi_ptr(a0, cpu_env, dofs);
223 tcg_gen_addi_ptr(a1, cpu_env, aofs);
225 fn(a0, a1, ptr, desc);
227 tcg_temp_free_ptr(a0);
228 tcg_temp_free_ptr(a1);
229 tcg_temp_free_i32(desc);
232 /* Generate a call to a gvec-style helper with three vector operands
233 and an extra pointer operand. */
234 void tcg_gen_gvec_3_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
235 TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz,
236 int32_t data, gen_helper_gvec_3_ptr *fn)
239 TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data));
241 a0 = tcg_temp_new_ptr();
242 a1 = tcg_temp_new_ptr();
243 a2 = tcg_temp_new_ptr();
245 tcg_gen_addi_ptr(a0, cpu_env, dofs);
246 tcg_gen_addi_ptr(a1, cpu_env, aofs);
247 tcg_gen_addi_ptr(a2, cpu_env, bofs);
249 fn(a0, a1, a2, ptr, desc);
251 tcg_temp_free_ptr(a0);
252 tcg_temp_free_ptr(a1);
253 tcg_temp_free_ptr(a2);
254 tcg_temp_free_i32(desc);
257 /* Generate a call to a gvec-style helper with four vector operands
258 and an extra pointer operand. */
259 void tcg_gen_gvec_4_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
260 uint32_t cofs, TCGv_ptr ptr, uint32_t oprsz,
261 uint32_t maxsz, int32_t data,
262 gen_helper_gvec_4_ptr *fn)
264 TCGv_ptr a0, a1, a2, a3;
265 TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data));
267 a0 = tcg_temp_new_ptr();
268 a1 = tcg_temp_new_ptr();
269 a2 = tcg_temp_new_ptr();
270 a3 = tcg_temp_new_ptr();
272 tcg_gen_addi_ptr(a0, cpu_env, dofs);
273 tcg_gen_addi_ptr(a1, cpu_env, aofs);
274 tcg_gen_addi_ptr(a2, cpu_env, bofs);
275 tcg_gen_addi_ptr(a3, cpu_env, cofs);
277 fn(a0, a1, a2, a3, ptr, desc);
279 tcg_temp_free_ptr(a0);
280 tcg_temp_free_ptr(a1);
281 tcg_temp_free_ptr(a2);
282 tcg_temp_free_ptr(a3);
283 tcg_temp_free_i32(desc);
286 /* Return true if we want to implement something of OPRSZ bytes
287 in units of LNSZ. This limits the expansion of inline code. */
288 static inline bool check_size_impl(uint32_t oprsz, uint32_t lnsz)
290 if (oprsz % lnsz == 0) {
291 uint32_t lnct = oprsz / lnsz;
292 return lnct >= 1 && lnct <= MAX_UNROLL;
297 static void expand_clr(uint32_t dofs, uint32_t maxsz);
299 /* Duplicate C as per VECE. */
300 uint64_t (dup_const)(unsigned vece, uint64_t c)
304 return 0x0101010101010101ull * (uint8_t)c;
306 return 0x0001000100010001ull * (uint16_t)c;
308 return 0x0000000100000001ull * (uint32_t)c;
312 g_assert_not_reached();
316 /* Duplicate IN into OUT as per VECE. */
317 static void gen_dup_i32(unsigned vece, TCGv_i32 out, TCGv_i32 in)
321 tcg_gen_ext8u_i32(out, in);
322 tcg_gen_muli_i32(out, out, 0x01010101);
325 tcg_gen_deposit_i32(out, in, in, 16, 16);
328 tcg_gen_mov_i32(out, in);
331 g_assert_not_reached();
335 static void gen_dup_i64(unsigned vece, TCGv_i64 out, TCGv_i64 in)
339 tcg_gen_ext8u_i64(out, in);
340 tcg_gen_muli_i64(out, out, 0x0101010101010101ull);
343 tcg_gen_ext16u_i64(out, in);
344 tcg_gen_muli_i64(out, out, 0x0001000100010001ull);
347 tcg_gen_deposit_i64(out, in, in, 32, 32);
350 tcg_gen_mov_i64(out, in);
353 g_assert_not_reached();
357 /* Select a supported vector type for implementing an operation on SIZE
358 * bytes. If OP is 0, assume that the real operation to be performed is
359 * required by all backends. Otherwise, make sure than OP can be performed
360 * on elements of size VECE in the selected type. Do not select V64 if
361 * PREFER_I64 is true. Return 0 if no vector type is selected.
363 static TCGType choose_vector_type(TCGOpcode op, unsigned vece, uint32_t size,
366 if (TCG_TARGET_HAS_v256 && check_size_impl(size, 32)) {
368 return TCG_TYPE_V256;
370 /* Recall that ARM SVE allows vector sizes that are not a
371 * power of 2, but always a multiple of 16. The intent is
372 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
373 * It is hard to imagine a case in which v256 is supported
374 * but v128 is not, but check anyway.
376 if (tcg_can_emit_vec_op(op, TCG_TYPE_V256, vece)
378 || tcg_can_emit_vec_op(op, TCG_TYPE_V128, vece))) {
379 return TCG_TYPE_V256;
382 if (TCG_TARGET_HAS_v128 && check_size_impl(size, 16)
383 && (op == 0 || tcg_can_emit_vec_op(op, TCG_TYPE_V128, vece))) {
384 return TCG_TYPE_V128;
386 if (TCG_TARGET_HAS_v64 && !prefer_i64 && check_size_impl(size, 8)
387 && (op == 0 || tcg_can_emit_vec_op(op, TCG_TYPE_V64, vece))) {
393 /* Set OPRSZ bytes at DOFS to replications of IN_32, IN_64 or IN_C.
394 * Only one of IN_32 or IN_64 may be set;
395 * IN_C is used if IN_32 and IN_64 are unset.
397 static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
398 uint32_t maxsz, TCGv_i32 in_32, TCGv_i64 in_64,
403 TCGv_i32 t_32, t_desc;
407 assert(vece <= (in_32 ? MO_32 : MO_64));
408 assert(in_32 == NULL || in_64 == NULL);
410 /* If we're storing 0, expand oprsz to maxsz. */
411 if (in_32 == NULL && in_64 == NULL) {
412 in_c = dup_const(vece, in_c);
418 /* Implement inline with a vector type, if possible.
419 * Prefer integer when 64-bit host and no variable dup.
421 type = choose_vector_type(0, vece, oprsz,
422 (TCG_TARGET_REG_BITS == 64 && in_32 == NULL
423 && (in_64 == NULL || vece == MO_64)));
425 TCGv_vec t_vec = tcg_temp_new_vec(type);
428 tcg_gen_dup_i32_vec(vece, t_vec, in_32);
430 tcg_gen_dup_i64_vec(vece, t_vec, in_64);
434 tcg_gen_dup8i_vec(t_vec, in_c);
437 tcg_gen_dup16i_vec(t_vec, in_c);
440 tcg_gen_dup32i_vec(t_vec, in_c);
443 tcg_gen_dup64i_vec(t_vec, in_c);
451 /* Recall that ARM SVE allows vector sizes that are not a
452 * power of 2, but always a multiple of 16. The intent is
453 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
455 for (; i + 32 <= oprsz; i += 32) {
456 tcg_gen_stl_vec(t_vec, cpu_env, dofs + i, TCG_TYPE_V256);
460 for (; i + 16 <= oprsz; i += 16) {
461 tcg_gen_stl_vec(t_vec, cpu_env, dofs + i, TCG_TYPE_V128);
465 for (; i < oprsz; i += 8) {
466 tcg_gen_stl_vec(t_vec, cpu_env, dofs + i, TCG_TYPE_V64);
470 g_assert_not_reached();
473 tcg_temp_free_vec(t_vec);
477 /* Otherwise, inline with an integer type, unless "large". */
478 if (check_size_impl(oprsz, TCG_TARGET_REG_BITS / 8)) {
483 /* We are given a 32-bit variable input. For a 64-bit host,
484 use a 64-bit operation unless the 32-bit operation would
486 if (TCG_TARGET_REG_BITS == 64
487 && (vece != MO_32 || !check_size_impl(oprsz, 4))) {
488 t_64 = tcg_temp_new_i64();
489 tcg_gen_extu_i32_i64(t_64, in_32);
490 gen_dup_i64(vece, t_64, t_64);
492 t_32 = tcg_temp_new_i32();
493 gen_dup_i32(vece, t_32, in_32);
496 /* We are given a 64-bit variable input. */
497 t_64 = tcg_temp_new_i64();
498 gen_dup_i64(vece, t_64, in_64);
500 /* We are given a constant input. */
501 /* For 64-bit hosts, use 64-bit constants for "simple" constants
502 or when we'd need too many 32-bit stores, or when a 64-bit
503 constant is really required. */
505 || (TCG_TARGET_REG_BITS == 64
506 && (in_c == 0 || in_c == -1
507 || !check_size_impl(oprsz, 4)))) {
508 t_64 = tcg_const_i64(in_c);
510 t_32 = tcg_const_i32(in_c);
514 /* Implement inline if we picked an implementation size above. */
516 for (i = 0; i < oprsz; i += 4) {
517 tcg_gen_st_i32(t_32, cpu_env, dofs + i);
519 tcg_temp_free_i32(t_32);
523 for (i = 0; i < oprsz; i += 8) {
524 tcg_gen_st_i64(t_64, cpu_env, dofs + i);
526 tcg_temp_free_i64(t_64);
531 /* Otherwise implement out of line. */
532 t_ptr = tcg_temp_new_ptr();
533 tcg_gen_addi_ptr(t_ptr, cpu_env, dofs);
534 t_desc = tcg_const_i32(simd_desc(oprsz, maxsz, 0));
538 gen_helper_gvec_dup64(t_ptr, t_desc, in_64);
540 t_64 = tcg_const_i64(in_c);
541 gen_helper_gvec_dup64(t_ptr, t_desc, t_64);
542 tcg_temp_free_i64(t_64);
545 typedef void dup_fn(TCGv_ptr, TCGv_i32, TCGv_i32);
546 static dup_fn * const fns[3] = {
547 gen_helper_gvec_dup8,
548 gen_helper_gvec_dup16,
549 gen_helper_gvec_dup32
553 fns[vece](t_ptr, t_desc, in_32);
555 t_32 = tcg_temp_new_i32();
557 tcg_gen_extrl_i64_i32(t_32, in_64);
558 } else if (vece == MO_8) {
559 tcg_gen_movi_i32(t_32, in_c & 0xff);
560 } else if (vece == MO_16) {
561 tcg_gen_movi_i32(t_32, in_c & 0xffff);
563 tcg_gen_movi_i32(t_32, in_c);
565 fns[vece](t_ptr, t_desc, t_32);
566 tcg_temp_free_i32(t_32);
570 tcg_temp_free_ptr(t_ptr);
571 tcg_temp_free_i32(t_desc);
576 expand_clr(dofs + oprsz, maxsz - oprsz);
580 /* Likewise, but with zero. */
581 static void expand_clr(uint32_t dofs, uint32_t maxsz)
583 do_dup(MO_8, dofs, maxsz, maxsz, NULL, NULL, 0);
586 /* Expand OPSZ bytes worth of two-operand operations using i32 elements. */
587 static void expand_2_i32(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
588 void (*fni)(TCGv_i32, TCGv_i32))
590 TCGv_i32 t0 = tcg_temp_new_i32();
593 for (i = 0; i < oprsz; i += 4) {
594 tcg_gen_ld_i32(t0, cpu_env, aofs + i);
596 tcg_gen_st_i32(t0, cpu_env, dofs + i);
598 tcg_temp_free_i32(t0);
601 static void expand_2i_i32(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
602 int32_t c, bool load_dest,
603 void (*fni)(TCGv_i32, TCGv_i32, int32_t))
605 TCGv_i32 t0 = tcg_temp_new_i32();
606 TCGv_i32 t1 = tcg_temp_new_i32();
609 for (i = 0; i < oprsz; i += 4) {
610 tcg_gen_ld_i32(t0, cpu_env, aofs + i);
612 tcg_gen_ld_i32(t1, cpu_env, dofs + i);
615 tcg_gen_st_i32(t1, cpu_env, dofs + i);
617 tcg_temp_free_i32(t0);
618 tcg_temp_free_i32(t1);
621 static void expand_2s_i32(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
622 TCGv_i32 c, bool scalar_first,
623 void (*fni)(TCGv_i32, TCGv_i32, TCGv_i32))
625 TCGv_i32 t0 = tcg_temp_new_i32();
626 TCGv_i32 t1 = tcg_temp_new_i32();
629 for (i = 0; i < oprsz; i += 4) {
630 tcg_gen_ld_i32(t0, cpu_env, aofs + i);
636 tcg_gen_st_i32(t1, cpu_env, dofs + i);
638 tcg_temp_free_i32(t0);
639 tcg_temp_free_i32(t1);
642 /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */
643 static void expand_3_i32(uint32_t dofs, uint32_t aofs,
644 uint32_t bofs, uint32_t oprsz, bool load_dest,
645 void (*fni)(TCGv_i32, TCGv_i32, TCGv_i32))
647 TCGv_i32 t0 = tcg_temp_new_i32();
648 TCGv_i32 t1 = tcg_temp_new_i32();
649 TCGv_i32 t2 = tcg_temp_new_i32();
652 for (i = 0; i < oprsz; i += 4) {
653 tcg_gen_ld_i32(t0, cpu_env, aofs + i);
654 tcg_gen_ld_i32(t1, cpu_env, bofs + i);
656 tcg_gen_ld_i32(t2, cpu_env, dofs + i);
659 tcg_gen_st_i32(t2, cpu_env, dofs + i);
661 tcg_temp_free_i32(t2);
662 tcg_temp_free_i32(t1);
663 tcg_temp_free_i32(t0);
666 static void expand_3i_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
667 uint32_t oprsz, int32_t c, bool load_dest,
668 void (*fni)(TCGv_i32, TCGv_i32, TCGv_i32, int32_t))
670 TCGv_i32 t0 = tcg_temp_new_i32();
671 TCGv_i32 t1 = tcg_temp_new_i32();
672 TCGv_i32 t2 = tcg_temp_new_i32();
675 for (i = 0; i < oprsz; i += 4) {
676 tcg_gen_ld_i32(t0, cpu_env, aofs + i);
677 tcg_gen_ld_i32(t1, cpu_env, bofs + i);
679 tcg_gen_ld_i32(t2, cpu_env, dofs + i);
682 tcg_gen_st_i32(t2, cpu_env, dofs + i);
684 tcg_temp_free_i32(t0);
685 tcg_temp_free_i32(t1);
686 tcg_temp_free_i32(t2);
689 /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */
690 static void expand_4_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
691 uint32_t cofs, uint32_t oprsz, bool write_aofs,
692 void (*fni)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32))
694 TCGv_i32 t0 = tcg_temp_new_i32();
695 TCGv_i32 t1 = tcg_temp_new_i32();
696 TCGv_i32 t2 = tcg_temp_new_i32();
697 TCGv_i32 t3 = tcg_temp_new_i32();
700 for (i = 0; i < oprsz; i += 4) {
701 tcg_gen_ld_i32(t1, cpu_env, aofs + i);
702 tcg_gen_ld_i32(t2, cpu_env, bofs + i);
703 tcg_gen_ld_i32(t3, cpu_env, cofs + i);
705 tcg_gen_st_i32(t0, cpu_env, dofs + i);
707 tcg_gen_st_i32(t1, cpu_env, aofs + i);
710 tcg_temp_free_i32(t3);
711 tcg_temp_free_i32(t2);
712 tcg_temp_free_i32(t1);
713 tcg_temp_free_i32(t0);
716 /* Expand OPSZ bytes worth of two-operand operations using i64 elements. */
717 static void expand_2_i64(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
718 void (*fni)(TCGv_i64, TCGv_i64))
720 TCGv_i64 t0 = tcg_temp_new_i64();
723 for (i = 0; i < oprsz; i += 8) {
724 tcg_gen_ld_i64(t0, cpu_env, aofs + i);
726 tcg_gen_st_i64(t0, cpu_env, dofs + i);
728 tcg_temp_free_i64(t0);
731 static void expand_2i_i64(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
732 int64_t c, bool load_dest,
733 void (*fni)(TCGv_i64, TCGv_i64, int64_t))
735 TCGv_i64 t0 = tcg_temp_new_i64();
736 TCGv_i64 t1 = tcg_temp_new_i64();
739 for (i = 0; i < oprsz; i += 8) {
740 tcg_gen_ld_i64(t0, cpu_env, aofs + i);
742 tcg_gen_ld_i64(t1, cpu_env, dofs + i);
745 tcg_gen_st_i64(t1, cpu_env, dofs + i);
747 tcg_temp_free_i64(t0);
748 tcg_temp_free_i64(t1);
751 static void expand_2s_i64(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
752 TCGv_i64 c, bool scalar_first,
753 void (*fni)(TCGv_i64, TCGv_i64, TCGv_i64))
755 TCGv_i64 t0 = tcg_temp_new_i64();
756 TCGv_i64 t1 = tcg_temp_new_i64();
759 for (i = 0; i < oprsz; i += 8) {
760 tcg_gen_ld_i64(t0, cpu_env, aofs + i);
766 tcg_gen_st_i64(t1, cpu_env, dofs + i);
768 tcg_temp_free_i64(t0);
769 tcg_temp_free_i64(t1);
772 /* Expand OPSZ bytes worth of three-operand operations using i64 elements. */
773 static void expand_3_i64(uint32_t dofs, uint32_t aofs,
774 uint32_t bofs, uint32_t oprsz, bool load_dest,
775 void (*fni)(TCGv_i64, TCGv_i64, TCGv_i64))
777 TCGv_i64 t0 = tcg_temp_new_i64();
778 TCGv_i64 t1 = tcg_temp_new_i64();
779 TCGv_i64 t2 = tcg_temp_new_i64();
782 for (i = 0; i < oprsz; i += 8) {
783 tcg_gen_ld_i64(t0, cpu_env, aofs + i);
784 tcg_gen_ld_i64(t1, cpu_env, bofs + i);
786 tcg_gen_ld_i64(t2, cpu_env, dofs + i);
789 tcg_gen_st_i64(t2, cpu_env, dofs + i);
791 tcg_temp_free_i64(t2);
792 tcg_temp_free_i64(t1);
793 tcg_temp_free_i64(t0);
796 static void expand_3i_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs,
797 uint32_t oprsz, int64_t c, bool load_dest,
798 void (*fni)(TCGv_i64, TCGv_i64, TCGv_i64, int64_t))
800 TCGv_i64 t0 = tcg_temp_new_i64();
801 TCGv_i64 t1 = tcg_temp_new_i64();
802 TCGv_i64 t2 = tcg_temp_new_i64();
805 for (i = 0; i < oprsz; i += 8) {
806 tcg_gen_ld_i64(t0, cpu_env, aofs + i);
807 tcg_gen_ld_i64(t1, cpu_env, bofs + i);
809 tcg_gen_ld_i64(t2, cpu_env, dofs + i);
812 tcg_gen_st_i64(t2, cpu_env, dofs + i);
814 tcg_temp_free_i64(t0);
815 tcg_temp_free_i64(t1);
816 tcg_temp_free_i64(t2);
819 /* Expand OPSZ bytes worth of three-operand operations using i64 elements. */
820 static void expand_4_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs,
821 uint32_t cofs, uint32_t oprsz, bool write_aofs,
822 void (*fni)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
824 TCGv_i64 t0 = tcg_temp_new_i64();
825 TCGv_i64 t1 = tcg_temp_new_i64();
826 TCGv_i64 t2 = tcg_temp_new_i64();
827 TCGv_i64 t3 = tcg_temp_new_i64();
830 for (i = 0; i < oprsz; i += 8) {
831 tcg_gen_ld_i64(t1, cpu_env, aofs + i);
832 tcg_gen_ld_i64(t2, cpu_env, bofs + i);
833 tcg_gen_ld_i64(t3, cpu_env, cofs + i);
835 tcg_gen_st_i64(t0, cpu_env, dofs + i);
837 tcg_gen_st_i64(t1, cpu_env, aofs + i);
840 tcg_temp_free_i64(t3);
841 tcg_temp_free_i64(t2);
842 tcg_temp_free_i64(t1);
843 tcg_temp_free_i64(t0);
846 /* Expand OPSZ bytes worth of two-operand operations using host vectors. */
847 static void expand_2_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
848 uint32_t oprsz, uint32_t tysz, TCGType type,
849 void (*fni)(unsigned, TCGv_vec, TCGv_vec))
851 TCGv_vec t0 = tcg_temp_new_vec(type);
854 for (i = 0; i < oprsz; i += tysz) {
855 tcg_gen_ld_vec(t0, cpu_env, aofs + i);
857 tcg_gen_st_vec(t0, cpu_env, dofs + i);
859 tcg_temp_free_vec(t0);
862 /* Expand OPSZ bytes worth of two-vector operands and an immediate operand
863 using host vectors. */
864 static void expand_2i_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
865 uint32_t oprsz, uint32_t tysz, TCGType type,
866 int64_t c, bool load_dest,
867 void (*fni)(unsigned, TCGv_vec, TCGv_vec, int64_t))
869 TCGv_vec t0 = tcg_temp_new_vec(type);
870 TCGv_vec t1 = tcg_temp_new_vec(type);
873 for (i = 0; i < oprsz; i += tysz) {
874 tcg_gen_ld_vec(t0, cpu_env, aofs + i);
876 tcg_gen_ld_vec(t1, cpu_env, dofs + i);
878 fni(vece, t1, t0, c);
879 tcg_gen_st_vec(t1, cpu_env, dofs + i);
881 tcg_temp_free_vec(t0);
882 tcg_temp_free_vec(t1);
885 static void expand_2s_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
886 uint32_t oprsz, uint32_t tysz, TCGType type,
887 TCGv_vec c, bool scalar_first,
888 void (*fni)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec))
890 TCGv_vec t0 = tcg_temp_new_vec(type);
891 TCGv_vec t1 = tcg_temp_new_vec(type);
894 for (i = 0; i < oprsz; i += tysz) {
895 tcg_gen_ld_vec(t0, cpu_env, aofs + i);
897 fni(vece, t1, c, t0);
899 fni(vece, t1, t0, c);
901 tcg_gen_st_vec(t1, cpu_env, dofs + i);
903 tcg_temp_free_vec(t0);
904 tcg_temp_free_vec(t1);
907 /* Expand OPSZ bytes worth of three-operand operations using host vectors. */
908 static void expand_3_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
909 uint32_t bofs, uint32_t oprsz,
910 uint32_t tysz, TCGType type, bool load_dest,
911 void (*fni)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec))
913 TCGv_vec t0 = tcg_temp_new_vec(type);
914 TCGv_vec t1 = tcg_temp_new_vec(type);
915 TCGv_vec t2 = tcg_temp_new_vec(type);
918 for (i = 0; i < oprsz; i += tysz) {
919 tcg_gen_ld_vec(t0, cpu_env, aofs + i);
920 tcg_gen_ld_vec(t1, cpu_env, bofs + i);
922 tcg_gen_ld_vec(t2, cpu_env, dofs + i);
924 fni(vece, t2, t0, t1);
925 tcg_gen_st_vec(t2, cpu_env, dofs + i);
927 tcg_temp_free_vec(t2);
928 tcg_temp_free_vec(t1);
929 tcg_temp_free_vec(t0);
933 * Expand OPSZ bytes worth of three-vector operands and an immediate operand
934 * using host vectors.
936 static void expand_3i_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
937 uint32_t bofs, uint32_t oprsz, uint32_t tysz,
938 TCGType type, int64_t c, bool load_dest,
939 void (*fni)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec,
942 TCGv_vec t0 = tcg_temp_new_vec(type);
943 TCGv_vec t1 = tcg_temp_new_vec(type);
944 TCGv_vec t2 = tcg_temp_new_vec(type);
947 for (i = 0; i < oprsz; i += tysz) {
948 tcg_gen_ld_vec(t0, cpu_env, aofs + i);
949 tcg_gen_ld_vec(t1, cpu_env, bofs + i);
951 tcg_gen_ld_vec(t2, cpu_env, dofs + i);
953 fni(vece, t2, t0, t1, c);
954 tcg_gen_st_vec(t2, cpu_env, dofs + i);
956 tcg_temp_free_vec(t0);
957 tcg_temp_free_vec(t1);
958 tcg_temp_free_vec(t2);
961 /* Expand OPSZ bytes worth of four-operand operations using host vectors. */
962 static void expand_4_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
963 uint32_t bofs, uint32_t cofs, uint32_t oprsz,
964 uint32_t tysz, TCGType type, bool write_aofs,
965 void (*fni)(unsigned, TCGv_vec, TCGv_vec,
968 TCGv_vec t0 = tcg_temp_new_vec(type);
969 TCGv_vec t1 = tcg_temp_new_vec(type);
970 TCGv_vec t2 = tcg_temp_new_vec(type);
971 TCGv_vec t3 = tcg_temp_new_vec(type);
974 for (i = 0; i < oprsz; i += tysz) {
975 tcg_gen_ld_vec(t1, cpu_env, aofs + i);
976 tcg_gen_ld_vec(t2, cpu_env, bofs + i);
977 tcg_gen_ld_vec(t3, cpu_env, cofs + i);
978 fni(vece, t0, t1, t2, t3);
979 tcg_gen_st_vec(t0, cpu_env, dofs + i);
981 tcg_gen_st_vec(t1, cpu_env, aofs + i);
984 tcg_temp_free_vec(t3);
985 tcg_temp_free_vec(t2);
986 tcg_temp_free_vec(t1);
987 tcg_temp_free_vec(t0);
990 /* Expand a vector two-operand operation. */
991 void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
992 uint32_t oprsz, uint32_t maxsz, const GVecGen2 *g)
997 check_size_align(oprsz, maxsz, dofs | aofs);
998 check_overlap_2(dofs, aofs, maxsz);
1002 type = choose_vector_type(g->opc, g->vece, oprsz, g->prefer_i64);
1006 /* Recall that ARM SVE allows vector sizes that are not a
1007 * power of 2, but always a multiple of 16. The intent is
1008 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
1010 some = QEMU_ALIGN_DOWN(oprsz, 32);
1011 expand_2_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256, g->fniv);
1012 if (some == oprsz) {
1021 expand_2_vec(g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128, g->fniv);
1024 expand_2_vec(g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64, g->fniv);
1028 if (g->fni8 && check_size_impl(oprsz, 8)) {
1029 expand_2_i64(dofs, aofs, oprsz, g->fni8);
1030 } else if (g->fni4 && check_size_impl(oprsz, 4)) {
1031 expand_2_i32(dofs, aofs, oprsz, g->fni4);
1033 assert(g->fno != NULL);
1034 tcg_gen_gvec_2_ool(dofs, aofs, oprsz, maxsz, g->data, g->fno);
1040 g_assert_not_reached();
1043 if (oprsz < maxsz) {
1044 expand_clr(dofs + oprsz, maxsz - oprsz);
1048 /* Expand a vector operation with two vectors and an immediate. */
1049 void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
1050 uint32_t maxsz, int64_t c, const GVecGen2i *g)
1055 check_size_align(oprsz, maxsz, dofs | aofs);
1056 check_overlap_2(dofs, aofs, maxsz);
1060 type = choose_vector_type(g->opc, g->vece, oprsz, g->prefer_i64);
1064 /* Recall that ARM SVE allows vector sizes that are not a
1065 * power of 2, but always a multiple of 16. The intent is
1066 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
1068 some = QEMU_ALIGN_DOWN(oprsz, 32);
1069 expand_2i_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256,
1070 c, g->load_dest, g->fniv);
1071 if (some == oprsz) {
1080 expand_2i_vec(g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128,
1081 c, g->load_dest, g->fniv);
1084 expand_2i_vec(g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64,
1085 c, g->load_dest, g->fniv);
1089 if (g->fni8 && check_size_impl(oprsz, 8)) {
1090 expand_2i_i64(dofs, aofs, oprsz, c, g->load_dest, g->fni8);
1091 } else if (g->fni4 && check_size_impl(oprsz, 4)) {
1092 expand_2i_i32(dofs, aofs, oprsz, c, g->load_dest, g->fni4);
1095 tcg_gen_gvec_2_ool(dofs, aofs, oprsz, maxsz, c, g->fno);
1097 TCGv_i64 tcg_c = tcg_const_i64(c);
1098 tcg_gen_gvec_2i_ool(dofs, aofs, tcg_c, oprsz,
1100 tcg_temp_free_i64(tcg_c);
1107 g_assert_not_reached();
1110 if (oprsz < maxsz) {
1111 expand_clr(dofs + oprsz, maxsz - oprsz);
1115 /* Expand a vector operation with two vectors and a scalar. */
1116 void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
1117 uint32_t maxsz, TCGv_i64 c, const GVecGen2s *g)
1121 check_size_align(oprsz, maxsz, dofs | aofs);
1122 check_overlap_2(dofs, aofs, maxsz);
1126 type = choose_vector_type(g->opc, g->vece, oprsz, g->prefer_i64);
1129 TCGv_vec t_vec = tcg_temp_new_vec(type);
1132 tcg_gen_dup_i64_vec(g->vece, t_vec, c);
1136 /* Recall that ARM SVE allows vector sizes that are not a
1137 * power of 2, but always a multiple of 16. The intent is
1138 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
1140 some = QEMU_ALIGN_DOWN(oprsz, 32);
1141 expand_2s_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256,
1142 t_vec, g->scalar_first, g->fniv);
1143 if (some == oprsz) {
1153 expand_2s_vec(g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128,
1154 t_vec, g->scalar_first, g->fniv);
1158 expand_2s_vec(g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64,
1159 t_vec, g->scalar_first, g->fniv);
1163 g_assert_not_reached();
1165 tcg_temp_free_vec(t_vec);
1166 } else if (g->fni8 && check_size_impl(oprsz, 8)) {
1167 TCGv_i64 t64 = tcg_temp_new_i64();
1169 gen_dup_i64(g->vece, t64, c);
1170 expand_2s_i64(dofs, aofs, oprsz, t64, g->scalar_first, g->fni8);
1171 tcg_temp_free_i64(t64);
1172 } else if (g->fni4 && check_size_impl(oprsz, 4)) {
1173 TCGv_i32 t32 = tcg_temp_new_i32();
1175 tcg_gen_extrl_i64_i32(t32, c);
1176 gen_dup_i32(g->vece, t32, t32);
1177 expand_2s_i32(dofs, aofs, oprsz, t32, g->scalar_first, g->fni4);
1178 tcg_temp_free_i32(t32);
1180 tcg_gen_gvec_2i_ool(dofs, aofs, c, oprsz, maxsz, 0, g->fno);
1184 if (oprsz < maxsz) {
1185 expand_clr(dofs + oprsz, maxsz - oprsz);
1189 /* Expand a vector three-operand operation. */
1190 void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
1191 uint32_t oprsz, uint32_t maxsz, const GVecGen3 *g)
1196 check_size_align(oprsz, maxsz, dofs | aofs | bofs);
1197 check_overlap_3(dofs, aofs, bofs, maxsz);
1201 type = choose_vector_type(g->opc, g->vece, oprsz, g->prefer_i64);
1205 /* Recall that ARM SVE allows vector sizes that are not a
1206 * power of 2, but always a multiple of 16. The intent is
1207 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
1209 some = QEMU_ALIGN_DOWN(oprsz, 32);
1210 expand_3_vec(g->vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256,
1211 g->load_dest, g->fniv);
1212 if (some == oprsz) {
1222 expand_3_vec(g->vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128,
1223 g->load_dest, g->fniv);
1226 expand_3_vec(g->vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64,
1227 g->load_dest, g->fniv);
1231 if (g->fni8 && check_size_impl(oprsz, 8)) {
1232 expand_3_i64(dofs, aofs, bofs, oprsz, g->load_dest, g->fni8);
1233 } else if (g->fni4 && check_size_impl(oprsz, 4)) {
1234 expand_3_i32(dofs, aofs, bofs, oprsz, g->load_dest, g->fni4);
1236 assert(g->fno != NULL);
1237 tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz,
1238 maxsz, g->data, g->fno);
1244 g_assert_not_reached();
1247 if (oprsz < maxsz) {
1248 expand_clr(dofs + oprsz, maxsz - oprsz);
1252 /* Expand a vector operation with three vectors and an immediate. */
1253 void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs,
1254 uint32_t oprsz, uint32_t maxsz, int64_t c,
1260 check_size_align(oprsz, maxsz, dofs | aofs | bofs);
1261 check_overlap_3(dofs, aofs, bofs, maxsz);
1265 type = choose_vector_type(g->opc, g->vece, oprsz, g->prefer_i64);
1270 * Recall that ARM SVE allows vector sizes that are not a
1271 * power of 2, but always a multiple of 16. The intent is
1272 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
1274 some = QEMU_ALIGN_DOWN(oprsz, 32);
1275 expand_3i_vec(g->vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256,
1276 c, g->load_dest, g->fniv);
1277 if (some == oprsz) {
1287 expand_3i_vec(g->vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128,
1288 c, g->load_dest, g->fniv);
1291 expand_3i_vec(g->vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64,
1292 c, g->load_dest, g->fniv);
1296 if (g->fni8 && check_size_impl(oprsz, 8)) {
1297 expand_3i_i64(dofs, aofs, bofs, oprsz, c, g->load_dest, g->fni8);
1298 } else if (g->fni4 && check_size_impl(oprsz, 4)) {
1299 expand_3i_i32(dofs, aofs, bofs, oprsz, c, g->load_dest, g->fni4);
1301 assert(g->fno != NULL);
1302 tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz, maxsz, c, g->fno);
1308 g_assert_not_reached();
1311 if (oprsz < maxsz) {
1312 expand_clr(dofs + oprsz, maxsz - oprsz);
1316 /* Expand a vector four-operand operation. */
1317 void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
1318 uint32_t oprsz, uint32_t maxsz, const GVecGen4 *g)
1323 check_size_align(oprsz, maxsz, dofs | aofs | bofs | cofs);
1324 check_overlap_4(dofs, aofs, bofs, cofs, maxsz);
1328 type = choose_vector_type(g->opc, g->vece, oprsz, g->prefer_i64);
1332 /* Recall that ARM SVE allows vector sizes that are not a
1333 * power of 2, but always a multiple of 16. The intent is
1334 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
1336 some = QEMU_ALIGN_DOWN(oprsz, 32);
1337 expand_4_vec(g->vece, dofs, aofs, bofs, cofs, some,
1338 32, TCG_TYPE_V256, g->write_aofs, g->fniv);
1339 if (some == oprsz) {
1350 expand_4_vec(g->vece, dofs, aofs, bofs, cofs, oprsz,
1351 16, TCG_TYPE_V128, g->write_aofs, g->fniv);
1354 expand_4_vec(g->vece, dofs, aofs, bofs, cofs, oprsz,
1355 8, TCG_TYPE_V64, g->write_aofs, g->fniv);
1359 if (g->fni8 && check_size_impl(oprsz, 8)) {
1360 expand_4_i64(dofs, aofs, bofs, cofs, oprsz,
1361 g->write_aofs, g->fni8);
1362 } else if (g->fni4 && check_size_impl(oprsz, 4)) {
1363 expand_4_i32(dofs, aofs, bofs, cofs, oprsz,
1364 g->write_aofs, g->fni4);
1366 assert(g->fno != NULL);
1367 tcg_gen_gvec_4_ool(dofs, aofs, bofs, cofs,
1368 oprsz, maxsz, g->data, g->fno);
1374 g_assert_not_reached();
1377 if (oprsz < maxsz) {
1378 expand_clr(dofs + oprsz, maxsz - oprsz);
1383 * Expand specific vector operations.
1386 static void vec_mov2(unsigned vece, TCGv_vec a, TCGv_vec b)
1388 tcg_gen_mov_vec(a, b);
1391 void tcg_gen_gvec_mov(unsigned vece, uint32_t dofs, uint32_t aofs,
1392 uint32_t oprsz, uint32_t maxsz)
1394 static const GVecGen2 g = {
1395 .fni8 = tcg_gen_mov_i64,
1397 .fno = gen_helper_gvec_mov,
1398 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1401 tcg_gen_gvec_2(dofs, aofs, oprsz, maxsz, &g);
1403 check_size_align(oprsz, maxsz, dofs);
1404 if (oprsz < maxsz) {
1405 expand_clr(dofs + oprsz, maxsz - oprsz);
1410 void tcg_gen_gvec_dup_i32(unsigned vece, uint32_t dofs, uint32_t oprsz,
1411 uint32_t maxsz, TCGv_i32 in)
1413 check_size_align(oprsz, maxsz, dofs);
1414 tcg_debug_assert(vece <= MO_32);
1415 do_dup(vece, dofs, oprsz, maxsz, in, NULL, 0);
1418 void tcg_gen_gvec_dup_i64(unsigned vece, uint32_t dofs, uint32_t oprsz,
1419 uint32_t maxsz, TCGv_i64 in)
1421 check_size_align(oprsz, maxsz, dofs);
1422 tcg_debug_assert(vece <= MO_64);
1423 do_dup(vece, dofs, oprsz, maxsz, NULL, in, 0);
1426 void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
1427 uint32_t oprsz, uint32_t maxsz)
1429 if (vece <= MO_32) {
1430 TCGv_i32 in = tcg_temp_new_i32();
1433 tcg_gen_ld8u_i32(in, cpu_env, aofs);
1436 tcg_gen_ld16u_i32(in, cpu_env, aofs);
1439 tcg_gen_ld_i32(in, cpu_env, aofs);
1442 tcg_gen_gvec_dup_i32(vece, dofs, oprsz, maxsz, in);
1443 tcg_temp_free_i32(in);
1444 } else if (vece == MO_64) {
1445 TCGv_i64 in = tcg_temp_new_i64();
1446 tcg_gen_ld_i64(in, cpu_env, aofs);
1447 tcg_gen_gvec_dup_i64(MO_64, dofs, oprsz, maxsz, in);
1448 tcg_temp_free_i64(in);
1450 /* 128-bit duplicate. */
1451 /* ??? Dup to 256-bit vector. */
1454 tcg_debug_assert(vece == 4);
1455 tcg_debug_assert(oprsz >= 16);
1456 if (TCG_TARGET_HAS_v128) {
1457 TCGv_vec in = tcg_temp_new_vec(TCG_TYPE_V128);
1459 tcg_gen_ld_vec(in, cpu_env, aofs);
1460 for (i = 0; i < oprsz; i += 16) {
1461 tcg_gen_st_vec(in, cpu_env, dofs + i);
1463 tcg_temp_free_vec(in);
1465 TCGv_i64 in0 = tcg_temp_new_i64();
1466 TCGv_i64 in1 = tcg_temp_new_i64();
1468 tcg_gen_ld_i64(in0, cpu_env, aofs);
1469 tcg_gen_ld_i64(in1, cpu_env, aofs + 8);
1470 for (i = 0; i < oprsz; i += 16) {
1471 tcg_gen_st_i64(in0, cpu_env, dofs + i);
1472 tcg_gen_st_i64(in1, cpu_env, dofs + i + 8);
1474 tcg_temp_free_i64(in0);
1475 tcg_temp_free_i64(in1);
1480 void tcg_gen_gvec_dup64i(uint32_t dofs, uint32_t oprsz,
1481 uint32_t maxsz, uint64_t x)
1483 check_size_align(oprsz, maxsz, dofs);
1484 do_dup(MO_64, dofs, oprsz, maxsz, NULL, NULL, x);
1487 void tcg_gen_gvec_dup32i(uint32_t dofs, uint32_t oprsz,
1488 uint32_t maxsz, uint32_t x)
1490 check_size_align(oprsz, maxsz, dofs);
1491 do_dup(MO_32, dofs, oprsz, maxsz, NULL, NULL, x);
1494 void tcg_gen_gvec_dup16i(uint32_t dofs, uint32_t oprsz,
1495 uint32_t maxsz, uint16_t x)
1497 check_size_align(oprsz, maxsz, dofs);
1498 do_dup(MO_16, dofs, oprsz, maxsz, NULL, NULL, x);
1501 void tcg_gen_gvec_dup8i(uint32_t dofs, uint32_t oprsz,
1502 uint32_t maxsz, uint8_t x)
1504 check_size_align(oprsz, maxsz, dofs);
1505 do_dup(MO_8, dofs, oprsz, maxsz, NULL, NULL, x);
1508 void tcg_gen_gvec_not(unsigned vece, uint32_t dofs, uint32_t aofs,
1509 uint32_t oprsz, uint32_t maxsz)
1511 static const GVecGen2 g = {
1512 .fni8 = tcg_gen_not_i64,
1513 .fniv = tcg_gen_not_vec,
1514 .fno = gen_helper_gvec_not,
1515 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1517 tcg_gen_gvec_2(dofs, aofs, oprsz, maxsz, &g);
1520 /* Perform a vector addition using normal addition and a mask. The mask
1521 should be the sign bit of each lane. This 6-operation form is more
1522 efficient than separate additions when there are 4 or more lanes in
1523 the 64-bit operation. */
1524 static void gen_addv_mask(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 m)
1526 TCGv_i64 t1 = tcg_temp_new_i64();
1527 TCGv_i64 t2 = tcg_temp_new_i64();
1528 TCGv_i64 t3 = tcg_temp_new_i64();
1530 tcg_gen_andc_i64(t1, a, m);
1531 tcg_gen_andc_i64(t2, b, m);
1532 tcg_gen_xor_i64(t3, a, b);
1533 tcg_gen_add_i64(d, t1, t2);
1534 tcg_gen_and_i64(t3, t3, m);
1535 tcg_gen_xor_i64(d, d, t3);
1537 tcg_temp_free_i64(t1);
1538 tcg_temp_free_i64(t2);
1539 tcg_temp_free_i64(t3);
1542 void tcg_gen_vec_add8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1544 TCGv_i64 m = tcg_const_i64(dup_const(MO_8, 0x80));
1545 gen_addv_mask(d, a, b, m);
1546 tcg_temp_free_i64(m);
1549 void tcg_gen_vec_add16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1551 TCGv_i64 m = tcg_const_i64(dup_const(MO_16, 0x8000));
1552 gen_addv_mask(d, a, b, m);
1553 tcg_temp_free_i64(m);
1556 void tcg_gen_vec_add32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1558 TCGv_i64 t1 = tcg_temp_new_i64();
1559 TCGv_i64 t2 = tcg_temp_new_i64();
1561 tcg_gen_andi_i64(t1, a, ~0xffffffffull);
1562 tcg_gen_add_i64(t2, a, b);
1563 tcg_gen_add_i64(t1, t1, b);
1564 tcg_gen_deposit_i64(d, t1, t2, 0, 32);
1566 tcg_temp_free_i64(t1);
1567 tcg_temp_free_i64(t2);
1570 void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs,
1571 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
1573 static const GVecGen3 g[4] = {
1574 { .fni8 = tcg_gen_vec_add8_i64,
1575 .fniv = tcg_gen_add_vec,
1576 .fno = gen_helper_gvec_add8,
1577 .opc = INDEX_op_add_vec,
1579 { .fni8 = tcg_gen_vec_add16_i64,
1580 .fniv = tcg_gen_add_vec,
1581 .fno = gen_helper_gvec_add16,
1582 .opc = INDEX_op_add_vec,
1584 { .fni4 = tcg_gen_add_i32,
1585 .fniv = tcg_gen_add_vec,
1586 .fno = gen_helper_gvec_add32,
1587 .opc = INDEX_op_add_vec,
1589 { .fni8 = tcg_gen_add_i64,
1590 .fniv = tcg_gen_add_vec,
1591 .fno = gen_helper_gvec_add64,
1592 .opc = INDEX_op_add_vec,
1593 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1597 tcg_debug_assert(vece <= MO_64);
1598 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
1601 void tcg_gen_gvec_adds(unsigned vece, uint32_t dofs, uint32_t aofs,
1602 TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
1604 static const GVecGen2s g[4] = {
1605 { .fni8 = tcg_gen_vec_add8_i64,
1606 .fniv = tcg_gen_add_vec,
1607 .fno = gen_helper_gvec_adds8,
1608 .opc = INDEX_op_add_vec,
1610 { .fni8 = tcg_gen_vec_add16_i64,
1611 .fniv = tcg_gen_add_vec,
1612 .fno = gen_helper_gvec_adds16,
1613 .opc = INDEX_op_add_vec,
1615 { .fni4 = tcg_gen_add_i32,
1616 .fniv = tcg_gen_add_vec,
1617 .fno = gen_helper_gvec_adds32,
1618 .opc = INDEX_op_add_vec,
1620 { .fni8 = tcg_gen_add_i64,
1621 .fniv = tcg_gen_add_vec,
1622 .fno = gen_helper_gvec_adds64,
1623 .opc = INDEX_op_add_vec,
1624 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1628 tcg_debug_assert(vece <= MO_64);
1629 tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &g[vece]);
1632 void tcg_gen_gvec_addi(unsigned vece, uint32_t dofs, uint32_t aofs,
1633 int64_t c, uint32_t oprsz, uint32_t maxsz)
1635 TCGv_i64 tmp = tcg_const_i64(c);
1636 tcg_gen_gvec_adds(vece, dofs, aofs, tmp, oprsz, maxsz);
1637 tcg_temp_free_i64(tmp);
1640 void tcg_gen_gvec_subs(unsigned vece, uint32_t dofs, uint32_t aofs,
1641 TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
1643 static const GVecGen2s g[4] = {
1644 { .fni8 = tcg_gen_vec_sub8_i64,
1645 .fniv = tcg_gen_sub_vec,
1646 .fno = gen_helper_gvec_subs8,
1647 .opc = INDEX_op_sub_vec,
1649 { .fni8 = tcg_gen_vec_sub16_i64,
1650 .fniv = tcg_gen_sub_vec,
1651 .fno = gen_helper_gvec_subs16,
1652 .opc = INDEX_op_sub_vec,
1654 { .fni4 = tcg_gen_sub_i32,
1655 .fniv = tcg_gen_sub_vec,
1656 .fno = gen_helper_gvec_subs32,
1657 .opc = INDEX_op_sub_vec,
1659 { .fni8 = tcg_gen_sub_i64,
1660 .fniv = tcg_gen_sub_vec,
1661 .fno = gen_helper_gvec_subs64,
1662 .opc = INDEX_op_sub_vec,
1663 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1667 tcg_debug_assert(vece <= MO_64);
1668 tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &g[vece]);
1671 /* Perform a vector subtraction using normal subtraction and a mask.
1672 Compare gen_addv_mask above. */
1673 static void gen_subv_mask(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 m)
1675 TCGv_i64 t1 = tcg_temp_new_i64();
1676 TCGv_i64 t2 = tcg_temp_new_i64();
1677 TCGv_i64 t3 = tcg_temp_new_i64();
1679 tcg_gen_or_i64(t1, a, m);
1680 tcg_gen_andc_i64(t2, b, m);
1681 tcg_gen_eqv_i64(t3, a, b);
1682 tcg_gen_sub_i64(d, t1, t2);
1683 tcg_gen_and_i64(t3, t3, m);
1684 tcg_gen_xor_i64(d, d, t3);
1686 tcg_temp_free_i64(t1);
1687 tcg_temp_free_i64(t2);
1688 tcg_temp_free_i64(t3);
1691 void tcg_gen_vec_sub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1693 TCGv_i64 m = tcg_const_i64(dup_const(MO_8, 0x80));
1694 gen_subv_mask(d, a, b, m);
1695 tcg_temp_free_i64(m);
1698 void tcg_gen_vec_sub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1700 TCGv_i64 m = tcg_const_i64(dup_const(MO_16, 0x8000));
1701 gen_subv_mask(d, a, b, m);
1702 tcg_temp_free_i64(m);
1705 void tcg_gen_vec_sub32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1707 TCGv_i64 t1 = tcg_temp_new_i64();
1708 TCGv_i64 t2 = tcg_temp_new_i64();
1710 tcg_gen_andi_i64(t1, b, ~0xffffffffull);
1711 tcg_gen_sub_i64(t2, a, b);
1712 tcg_gen_sub_i64(t1, a, t1);
1713 tcg_gen_deposit_i64(d, t1, t2, 0, 32);
1715 tcg_temp_free_i64(t1);
1716 tcg_temp_free_i64(t2);
1719 void tcg_gen_gvec_sub(unsigned vece, uint32_t dofs, uint32_t aofs,
1720 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
1722 static const GVecGen3 g[4] = {
1723 { .fni8 = tcg_gen_vec_sub8_i64,
1724 .fniv = tcg_gen_sub_vec,
1725 .fno = gen_helper_gvec_sub8,
1726 .opc = INDEX_op_sub_vec,
1728 { .fni8 = tcg_gen_vec_sub16_i64,
1729 .fniv = tcg_gen_sub_vec,
1730 .fno = gen_helper_gvec_sub16,
1731 .opc = INDEX_op_sub_vec,
1733 { .fni4 = tcg_gen_sub_i32,
1734 .fniv = tcg_gen_sub_vec,
1735 .fno = gen_helper_gvec_sub32,
1736 .opc = INDEX_op_sub_vec,
1738 { .fni8 = tcg_gen_sub_i64,
1739 .fniv = tcg_gen_sub_vec,
1740 .fno = gen_helper_gvec_sub64,
1741 .opc = INDEX_op_sub_vec,
1742 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1746 tcg_debug_assert(vece <= MO_64);
1747 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
1750 void tcg_gen_gvec_mul(unsigned vece, uint32_t dofs, uint32_t aofs,
1751 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
1753 static const GVecGen3 g[4] = {
1754 { .fniv = tcg_gen_mul_vec,
1755 .fno = gen_helper_gvec_mul8,
1756 .opc = INDEX_op_mul_vec,
1758 { .fniv = tcg_gen_mul_vec,
1759 .fno = gen_helper_gvec_mul16,
1760 .opc = INDEX_op_mul_vec,
1762 { .fni4 = tcg_gen_mul_i32,
1763 .fniv = tcg_gen_mul_vec,
1764 .fno = gen_helper_gvec_mul32,
1765 .opc = INDEX_op_mul_vec,
1767 { .fni8 = tcg_gen_mul_i64,
1768 .fniv = tcg_gen_mul_vec,
1769 .fno = gen_helper_gvec_mul64,
1770 .opc = INDEX_op_mul_vec,
1771 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1775 tcg_debug_assert(vece <= MO_64);
1776 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
1779 void tcg_gen_gvec_muls(unsigned vece, uint32_t dofs, uint32_t aofs,
1780 TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
1782 static const GVecGen2s g[4] = {
1783 { .fniv = tcg_gen_mul_vec,
1784 .fno = gen_helper_gvec_muls8,
1785 .opc = INDEX_op_mul_vec,
1787 { .fniv = tcg_gen_mul_vec,
1788 .fno = gen_helper_gvec_muls16,
1789 .opc = INDEX_op_mul_vec,
1791 { .fni4 = tcg_gen_mul_i32,
1792 .fniv = tcg_gen_mul_vec,
1793 .fno = gen_helper_gvec_muls32,
1794 .opc = INDEX_op_mul_vec,
1796 { .fni8 = tcg_gen_mul_i64,
1797 .fniv = tcg_gen_mul_vec,
1798 .fno = gen_helper_gvec_muls64,
1799 .opc = INDEX_op_mul_vec,
1800 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1804 tcg_debug_assert(vece <= MO_64);
1805 tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &g[vece]);
1808 void tcg_gen_gvec_muli(unsigned vece, uint32_t dofs, uint32_t aofs,
1809 int64_t c, uint32_t oprsz, uint32_t maxsz)
1811 TCGv_i64 tmp = tcg_const_i64(c);
1812 tcg_gen_gvec_muls(vece, dofs, aofs, tmp, oprsz, maxsz);
1813 tcg_temp_free_i64(tmp);
1816 void tcg_gen_gvec_ssadd(unsigned vece, uint32_t dofs, uint32_t aofs,
1817 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
1819 static const GVecGen3 g[4] = {
1820 { .fniv = tcg_gen_ssadd_vec,
1821 .fno = gen_helper_gvec_ssadd8,
1822 .opc = INDEX_op_ssadd_vec,
1824 { .fniv = tcg_gen_ssadd_vec,
1825 .fno = gen_helper_gvec_ssadd16,
1826 .opc = INDEX_op_ssadd_vec,
1828 { .fniv = tcg_gen_ssadd_vec,
1829 .fno = gen_helper_gvec_ssadd32,
1830 .opc = INDEX_op_ssadd_vec,
1832 { .fniv = tcg_gen_ssadd_vec,
1833 .fno = gen_helper_gvec_ssadd64,
1834 .opc = INDEX_op_ssadd_vec,
1837 tcg_debug_assert(vece <= MO_64);
1838 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
1841 void tcg_gen_gvec_sssub(unsigned vece, uint32_t dofs, uint32_t aofs,
1842 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
1844 static const GVecGen3 g[4] = {
1845 { .fniv = tcg_gen_sssub_vec,
1846 .fno = gen_helper_gvec_sssub8,
1847 .opc = INDEX_op_sssub_vec,
1849 { .fniv = tcg_gen_sssub_vec,
1850 .fno = gen_helper_gvec_sssub16,
1851 .opc = INDEX_op_sssub_vec,
1853 { .fniv = tcg_gen_sssub_vec,
1854 .fno = gen_helper_gvec_sssub32,
1855 .opc = INDEX_op_sssub_vec,
1857 { .fniv = tcg_gen_sssub_vec,
1858 .fno = gen_helper_gvec_sssub64,
1859 .opc = INDEX_op_sssub_vec,
1862 tcg_debug_assert(vece <= MO_64);
1863 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
1866 static void tcg_gen_usadd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
1868 TCGv_i32 max = tcg_const_i32(-1);
1869 tcg_gen_add_i32(d, a, b);
1870 tcg_gen_movcond_i32(TCG_COND_LTU, d, d, a, max, d);
1871 tcg_temp_free_i32(max);
1874 static void tcg_gen_usadd_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1876 TCGv_i64 max = tcg_const_i64(-1);
1877 tcg_gen_add_i64(d, a, b);
1878 tcg_gen_movcond_i64(TCG_COND_LTU, d, d, a, max, d);
1879 tcg_temp_free_i64(max);
1882 void tcg_gen_gvec_usadd(unsigned vece, uint32_t dofs, uint32_t aofs,
1883 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
1885 static const GVecGen3 g[4] = {
1886 { .fniv = tcg_gen_usadd_vec,
1887 .fno = gen_helper_gvec_usadd8,
1888 .opc = INDEX_op_usadd_vec,
1890 { .fniv = tcg_gen_usadd_vec,
1891 .fno = gen_helper_gvec_usadd16,
1892 .opc = INDEX_op_usadd_vec,
1894 { .fni4 = tcg_gen_usadd_i32,
1895 .fniv = tcg_gen_usadd_vec,
1896 .fno = gen_helper_gvec_usadd32,
1897 .opc = INDEX_op_usadd_vec,
1899 { .fni8 = tcg_gen_usadd_i64,
1900 .fniv = tcg_gen_usadd_vec,
1901 .fno = gen_helper_gvec_usadd64,
1902 .opc = INDEX_op_usadd_vec,
1905 tcg_debug_assert(vece <= MO_64);
1906 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
1909 static void tcg_gen_ussub_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
1911 TCGv_i32 min = tcg_const_i32(0);
1912 tcg_gen_sub_i32(d, a, b);
1913 tcg_gen_movcond_i32(TCG_COND_LTU, d, a, b, min, d);
1914 tcg_temp_free_i32(min);
1917 static void tcg_gen_ussub_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1919 TCGv_i64 min = tcg_const_i64(0);
1920 tcg_gen_sub_i64(d, a, b);
1921 tcg_gen_movcond_i64(TCG_COND_LTU, d, a, b, min, d);
1922 tcg_temp_free_i64(min);
1925 void tcg_gen_gvec_ussub(unsigned vece, uint32_t dofs, uint32_t aofs,
1926 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
1928 static const GVecGen3 g[4] = {
1929 { .fniv = tcg_gen_ussub_vec,
1930 .fno = gen_helper_gvec_ussub8,
1931 .opc = INDEX_op_ussub_vec,
1933 { .fniv = tcg_gen_ussub_vec,
1934 .fno = gen_helper_gvec_ussub16,
1935 .opc = INDEX_op_ussub_vec,
1937 { .fni4 = tcg_gen_ussub_i32,
1938 .fniv = tcg_gen_ussub_vec,
1939 .fno = gen_helper_gvec_ussub32,
1940 .opc = INDEX_op_ussub_vec,
1942 { .fni8 = tcg_gen_ussub_i64,
1943 .fniv = tcg_gen_ussub_vec,
1944 .fno = gen_helper_gvec_ussub64,
1945 .opc = INDEX_op_ussub_vec,
1948 tcg_debug_assert(vece <= MO_64);
1949 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
1952 void tcg_gen_gvec_smin(unsigned vece, uint32_t dofs, uint32_t aofs,
1953 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
1955 static const GVecGen3 g[4] = {
1956 { .fniv = tcg_gen_smin_vec,
1957 .fno = gen_helper_gvec_smin8,
1958 .opc = INDEX_op_smin_vec,
1960 { .fniv = tcg_gen_smin_vec,
1961 .fno = gen_helper_gvec_smin16,
1962 .opc = INDEX_op_smin_vec,
1964 { .fni4 = tcg_gen_smin_i32,
1965 .fniv = tcg_gen_smin_vec,
1966 .fno = gen_helper_gvec_smin32,
1967 .opc = INDEX_op_smin_vec,
1969 { .fni8 = tcg_gen_smin_i64,
1970 .fniv = tcg_gen_smin_vec,
1971 .fno = gen_helper_gvec_smin64,
1972 .opc = INDEX_op_smin_vec,
1975 tcg_debug_assert(vece <= MO_64);
1976 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
1979 void tcg_gen_gvec_umin(unsigned vece, uint32_t dofs, uint32_t aofs,
1980 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
1982 static const GVecGen3 g[4] = {
1983 { .fniv = tcg_gen_umin_vec,
1984 .fno = gen_helper_gvec_umin8,
1985 .opc = INDEX_op_umin_vec,
1987 { .fniv = tcg_gen_umin_vec,
1988 .fno = gen_helper_gvec_umin16,
1989 .opc = INDEX_op_umin_vec,
1991 { .fni4 = tcg_gen_umin_i32,
1992 .fniv = tcg_gen_umin_vec,
1993 .fno = gen_helper_gvec_umin32,
1994 .opc = INDEX_op_umin_vec,
1996 { .fni8 = tcg_gen_umin_i64,
1997 .fniv = tcg_gen_umin_vec,
1998 .fno = gen_helper_gvec_umin64,
1999 .opc = INDEX_op_umin_vec,
2002 tcg_debug_assert(vece <= MO_64);
2003 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
2006 void tcg_gen_gvec_smax(unsigned vece, uint32_t dofs, uint32_t aofs,
2007 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
2009 static const GVecGen3 g[4] = {
2010 { .fniv = tcg_gen_smax_vec,
2011 .fno = gen_helper_gvec_smax8,
2012 .opc = INDEX_op_smax_vec,
2014 { .fniv = tcg_gen_smax_vec,
2015 .fno = gen_helper_gvec_smax16,
2016 .opc = INDEX_op_smax_vec,
2018 { .fni4 = tcg_gen_smax_i32,
2019 .fniv = tcg_gen_smax_vec,
2020 .fno = gen_helper_gvec_smax32,
2021 .opc = INDEX_op_smax_vec,
2023 { .fni8 = tcg_gen_smax_i64,
2024 .fniv = tcg_gen_smax_vec,
2025 .fno = gen_helper_gvec_smax64,
2026 .opc = INDEX_op_smax_vec,
2029 tcg_debug_assert(vece <= MO_64);
2030 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
2033 void tcg_gen_gvec_umax(unsigned vece, uint32_t dofs, uint32_t aofs,
2034 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
2036 static const GVecGen3 g[4] = {
2037 { .fniv = tcg_gen_umax_vec,
2038 .fno = gen_helper_gvec_umax8,
2039 .opc = INDEX_op_umax_vec,
2041 { .fniv = tcg_gen_umax_vec,
2042 .fno = gen_helper_gvec_umax16,
2043 .opc = INDEX_op_umax_vec,
2045 { .fni4 = tcg_gen_umax_i32,
2046 .fniv = tcg_gen_umax_vec,
2047 .fno = gen_helper_gvec_umax32,
2048 .opc = INDEX_op_umax_vec,
2050 { .fni8 = tcg_gen_umax_i64,
2051 .fniv = tcg_gen_umax_vec,
2052 .fno = gen_helper_gvec_umax64,
2053 .opc = INDEX_op_umax_vec,
2056 tcg_debug_assert(vece <= MO_64);
2057 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
2060 /* Perform a vector negation using normal negation and a mask.
2061 Compare gen_subv_mask above. */
2062 static void gen_negv_mask(TCGv_i64 d, TCGv_i64 b, TCGv_i64 m)
2064 TCGv_i64 t2 = tcg_temp_new_i64();
2065 TCGv_i64 t3 = tcg_temp_new_i64();
2067 tcg_gen_andc_i64(t3, m, b);
2068 tcg_gen_andc_i64(t2, b, m);
2069 tcg_gen_sub_i64(d, m, t2);
2070 tcg_gen_xor_i64(d, d, t3);
2072 tcg_temp_free_i64(t2);
2073 tcg_temp_free_i64(t3);
2076 void tcg_gen_vec_neg8_i64(TCGv_i64 d, TCGv_i64 b)
2078 TCGv_i64 m = tcg_const_i64(dup_const(MO_8, 0x80));
2079 gen_negv_mask(d, b, m);
2080 tcg_temp_free_i64(m);
2083 void tcg_gen_vec_neg16_i64(TCGv_i64 d, TCGv_i64 b)
2085 TCGv_i64 m = tcg_const_i64(dup_const(MO_16, 0x8000));
2086 gen_negv_mask(d, b, m);
2087 tcg_temp_free_i64(m);
2090 void tcg_gen_vec_neg32_i64(TCGv_i64 d, TCGv_i64 b)
2092 TCGv_i64 t1 = tcg_temp_new_i64();
2093 TCGv_i64 t2 = tcg_temp_new_i64();
2095 tcg_gen_andi_i64(t1, b, ~0xffffffffull);
2096 tcg_gen_neg_i64(t2, b);
2097 tcg_gen_neg_i64(t1, t1);
2098 tcg_gen_deposit_i64(d, t1, t2, 0, 32);
2100 tcg_temp_free_i64(t1);
2101 tcg_temp_free_i64(t2);
2104 void tcg_gen_gvec_neg(unsigned vece, uint32_t dofs, uint32_t aofs,
2105 uint32_t oprsz, uint32_t maxsz)
2107 static const GVecGen2 g[4] = {
2108 { .fni8 = tcg_gen_vec_neg8_i64,
2109 .fniv = tcg_gen_neg_vec,
2110 .fno = gen_helper_gvec_neg8,
2111 .opc = INDEX_op_neg_vec,
2113 { .fni8 = tcg_gen_vec_neg16_i64,
2114 .fniv = tcg_gen_neg_vec,
2115 .fno = gen_helper_gvec_neg16,
2116 .opc = INDEX_op_neg_vec,
2118 { .fni4 = tcg_gen_neg_i32,
2119 .fniv = tcg_gen_neg_vec,
2120 .fno = gen_helper_gvec_neg32,
2121 .opc = INDEX_op_neg_vec,
2123 { .fni8 = tcg_gen_neg_i64,
2124 .fniv = tcg_gen_neg_vec,
2125 .fno = gen_helper_gvec_neg64,
2126 .opc = INDEX_op_neg_vec,
2127 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
2131 tcg_debug_assert(vece <= MO_64);
2132 tcg_gen_gvec_2(dofs, aofs, oprsz, maxsz, &g[vece]);
2135 void tcg_gen_gvec_and(unsigned vece, uint32_t dofs, uint32_t aofs,
2136 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
2138 static const GVecGen3 g = {
2139 .fni8 = tcg_gen_and_i64,
2140 .fniv = tcg_gen_and_vec,
2141 .fno = gen_helper_gvec_and,
2142 .opc = INDEX_op_and_vec,
2143 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
2147 tcg_gen_gvec_mov(vece, dofs, aofs, oprsz, maxsz);
2149 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
2153 void tcg_gen_gvec_or(unsigned vece, uint32_t dofs, uint32_t aofs,
2154 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
2156 static const GVecGen3 g = {
2157 .fni8 = tcg_gen_or_i64,
2158 .fniv = tcg_gen_or_vec,
2159 .fno = gen_helper_gvec_or,
2160 .opc = INDEX_op_or_vec,
2161 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
2165 tcg_gen_gvec_mov(vece, dofs, aofs, oprsz, maxsz);
2167 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
2171 void tcg_gen_gvec_xor(unsigned vece, uint32_t dofs, uint32_t aofs,
2172 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
2174 static const GVecGen3 g = {
2175 .fni8 = tcg_gen_xor_i64,
2176 .fniv = tcg_gen_xor_vec,
2177 .fno = gen_helper_gvec_xor,
2178 .opc = INDEX_op_xor_vec,
2179 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
2183 tcg_gen_gvec_dup8i(dofs, oprsz, maxsz, 0);
2185 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
2189 void tcg_gen_gvec_andc(unsigned vece, uint32_t dofs, uint32_t aofs,
2190 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
2192 static const GVecGen3 g = {
2193 .fni8 = tcg_gen_andc_i64,
2194 .fniv = tcg_gen_andc_vec,
2195 .fno = gen_helper_gvec_andc,
2196 .opc = INDEX_op_andc_vec,
2197 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
2201 tcg_gen_gvec_dup8i(dofs, oprsz, maxsz, 0);
2203 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
2207 void tcg_gen_gvec_orc(unsigned vece, uint32_t dofs, uint32_t aofs,
2208 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
2210 static const GVecGen3 g = {
2211 .fni8 = tcg_gen_orc_i64,
2212 .fniv = tcg_gen_orc_vec,
2213 .fno = gen_helper_gvec_orc,
2214 .opc = INDEX_op_orc_vec,
2215 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
2219 tcg_gen_gvec_dup8i(dofs, oprsz, maxsz, -1);
2221 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
2225 void tcg_gen_gvec_nand(unsigned vece, uint32_t dofs, uint32_t aofs,
2226 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
2228 static const GVecGen3 g = {
2229 .fni8 = tcg_gen_nand_i64,
2230 .fniv = tcg_gen_nand_vec,
2231 .fno = gen_helper_gvec_nand,
2232 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
2236 tcg_gen_gvec_not(vece, dofs, aofs, oprsz, maxsz);
2238 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
2242 void tcg_gen_gvec_nor(unsigned vece, uint32_t dofs, uint32_t aofs,
2243 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
2245 static const GVecGen3 g = {
2246 .fni8 = tcg_gen_nor_i64,
2247 .fniv = tcg_gen_nor_vec,
2248 .fno = gen_helper_gvec_nor,
2249 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
2253 tcg_gen_gvec_not(vece, dofs, aofs, oprsz, maxsz);
2255 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
2259 void tcg_gen_gvec_eqv(unsigned vece, uint32_t dofs, uint32_t aofs,
2260 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
2262 static const GVecGen3 g = {
2263 .fni8 = tcg_gen_eqv_i64,
2264 .fniv = tcg_gen_eqv_vec,
2265 .fno = gen_helper_gvec_eqv,
2266 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
2270 tcg_gen_gvec_dup8i(dofs, oprsz, maxsz, -1);
2272 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
2276 static const GVecGen2s gop_ands = {
2277 .fni8 = tcg_gen_and_i64,
2278 .fniv = tcg_gen_and_vec,
2279 .fno = gen_helper_gvec_ands,
2280 .opc = INDEX_op_and_vec,
2281 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
2285 void tcg_gen_gvec_ands(unsigned vece, uint32_t dofs, uint32_t aofs,
2286 TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
2288 TCGv_i64 tmp = tcg_temp_new_i64();
2289 gen_dup_i64(vece, tmp, c);
2290 tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ands);
2291 tcg_temp_free_i64(tmp);
2294 void tcg_gen_gvec_andi(unsigned vece, uint32_t dofs, uint32_t aofs,
2295 int64_t c, uint32_t oprsz, uint32_t maxsz)
2297 TCGv_i64 tmp = tcg_const_i64(dup_const(vece, c));
2298 tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ands);
2299 tcg_temp_free_i64(tmp);
2302 static const GVecGen2s gop_xors = {
2303 .fni8 = tcg_gen_xor_i64,
2304 .fniv = tcg_gen_xor_vec,
2305 .fno = gen_helper_gvec_xors,
2306 .opc = INDEX_op_xor_vec,
2307 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
2311 void tcg_gen_gvec_xors(unsigned vece, uint32_t dofs, uint32_t aofs,
2312 TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
2314 TCGv_i64 tmp = tcg_temp_new_i64();
2315 gen_dup_i64(vece, tmp, c);
2316 tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_xors);
2317 tcg_temp_free_i64(tmp);
2320 void tcg_gen_gvec_xori(unsigned vece, uint32_t dofs, uint32_t aofs,
2321 int64_t c, uint32_t oprsz, uint32_t maxsz)
2323 TCGv_i64 tmp = tcg_const_i64(dup_const(vece, c));
2324 tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_xors);
2325 tcg_temp_free_i64(tmp);
2328 static const GVecGen2s gop_ors = {
2329 .fni8 = tcg_gen_or_i64,
2330 .fniv = tcg_gen_or_vec,
2331 .fno = gen_helper_gvec_ors,
2332 .opc = INDEX_op_or_vec,
2333 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
2337 void tcg_gen_gvec_ors(unsigned vece, uint32_t dofs, uint32_t aofs,
2338 TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
2340 TCGv_i64 tmp = tcg_temp_new_i64();
2341 gen_dup_i64(vece, tmp, c);
2342 tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ors);
2343 tcg_temp_free_i64(tmp);
2346 void tcg_gen_gvec_ori(unsigned vece, uint32_t dofs, uint32_t aofs,
2347 int64_t c, uint32_t oprsz, uint32_t maxsz)
2349 TCGv_i64 tmp = tcg_const_i64(dup_const(vece, c));
2350 tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ors);
2351 tcg_temp_free_i64(tmp);
2354 void tcg_gen_vec_shl8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
2356 uint64_t mask = dup_const(MO_8, 0xff << c);
2357 tcg_gen_shli_i64(d, a, c);
2358 tcg_gen_andi_i64(d, d, mask);
2361 void tcg_gen_vec_shl16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
2363 uint64_t mask = dup_const(MO_16, 0xffff << c);
2364 tcg_gen_shli_i64(d, a, c);
2365 tcg_gen_andi_i64(d, d, mask);
2368 void tcg_gen_gvec_shli(unsigned vece, uint32_t dofs, uint32_t aofs,
2369 int64_t shift, uint32_t oprsz, uint32_t maxsz)
2371 static const GVecGen2i g[4] = {
2372 { .fni8 = tcg_gen_vec_shl8i_i64,
2373 .fniv = tcg_gen_shli_vec,
2374 .fno = gen_helper_gvec_shl8i,
2375 .opc = INDEX_op_shli_vec,
2377 { .fni8 = tcg_gen_vec_shl16i_i64,
2378 .fniv = tcg_gen_shli_vec,
2379 .fno = gen_helper_gvec_shl16i,
2380 .opc = INDEX_op_shli_vec,
2382 { .fni4 = tcg_gen_shli_i32,
2383 .fniv = tcg_gen_shli_vec,
2384 .fno = gen_helper_gvec_shl32i,
2385 .opc = INDEX_op_shli_vec,
2387 { .fni8 = tcg_gen_shli_i64,
2388 .fniv = tcg_gen_shli_vec,
2389 .fno = gen_helper_gvec_shl64i,
2390 .opc = INDEX_op_shli_vec,
2391 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
2395 tcg_debug_assert(vece <= MO_64);
2396 tcg_debug_assert(shift >= 0 && shift < (8 << vece));
2398 tcg_gen_gvec_mov(vece, dofs, aofs, oprsz, maxsz);
2400 tcg_gen_gvec_2i(dofs, aofs, oprsz, maxsz, shift, &g[vece]);
2404 void tcg_gen_vec_shr8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
2406 uint64_t mask = dup_const(MO_8, 0xff >> c);
2407 tcg_gen_shri_i64(d, a, c);
2408 tcg_gen_andi_i64(d, d, mask);
2411 void tcg_gen_vec_shr16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
2413 uint64_t mask = dup_const(MO_16, 0xffff >> c);
2414 tcg_gen_shri_i64(d, a, c);
2415 tcg_gen_andi_i64(d, d, mask);
2418 void tcg_gen_gvec_shri(unsigned vece, uint32_t dofs, uint32_t aofs,
2419 int64_t shift, uint32_t oprsz, uint32_t maxsz)
2421 static const GVecGen2i g[4] = {
2422 { .fni8 = tcg_gen_vec_shr8i_i64,
2423 .fniv = tcg_gen_shri_vec,
2424 .fno = gen_helper_gvec_shr8i,
2425 .opc = INDEX_op_shri_vec,
2427 { .fni8 = tcg_gen_vec_shr16i_i64,
2428 .fniv = tcg_gen_shri_vec,
2429 .fno = gen_helper_gvec_shr16i,
2430 .opc = INDEX_op_shri_vec,
2432 { .fni4 = tcg_gen_shri_i32,
2433 .fniv = tcg_gen_shri_vec,
2434 .fno = gen_helper_gvec_shr32i,
2435 .opc = INDEX_op_shri_vec,
2437 { .fni8 = tcg_gen_shri_i64,
2438 .fniv = tcg_gen_shri_vec,
2439 .fno = gen_helper_gvec_shr64i,
2440 .opc = INDEX_op_shri_vec,
2441 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
2445 tcg_debug_assert(vece <= MO_64);
2446 tcg_debug_assert(shift >= 0 && shift < (8 << vece));
2448 tcg_gen_gvec_mov(vece, dofs, aofs, oprsz, maxsz);
2450 tcg_gen_gvec_2i(dofs, aofs, oprsz, maxsz, shift, &g[vece]);
2454 void tcg_gen_vec_sar8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
2456 uint64_t s_mask = dup_const(MO_8, 0x80 >> c);
2457 uint64_t c_mask = dup_const(MO_8, 0xff >> c);
2458 TCGv_i64 s = tcg_temp_new_i64();
2460 tcg_gen_shri_i64(d, a, c);
2461 tcg_gen_andi_i64(s, d, s_mask); /* isolate (shifted) sign bit */
2462 tcg_gen_muli_i64(s, s, (2 << c) - 2); /* replicate isolated signs */
2463 tcg_gen_andi_i64(d, d, c_mask); /* clear out bits above sign */
2464 tcg_gen_or_i64(d, d, s); /* include sign extension */
2465 tcg_temp_free_i64(s);
2468 void tcg_gen_vec_sar16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
2470 uint64_t s_mask = dup_const(MO_16, 0x8000 >> c);
2471 uint64_t c_mask = dup_const(MO_16, 0xffff >> c);
2472 TCGv_i64 s = tcg_temp_new_i64();
2474 tcg_gen_shri_i64(d, a, c);
2475 tcg_gen_andi_i64(s, d, s_mask); /* isolate (shifted) sign bit */
2476 tcg_gen_andi_i64(d, d, c_mask); /* clear out bits above sign */
2477 tcg_gen_muli_i64(s, s, (2 << c) - 2); /* replicate isolated signs */
2478 tcg_gen_or_i64(d, d, s); /* include sign extension */
2479 tcg_temp_free_i64(s);
2482 void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs,
2483 int64_t shift, uint32_t oprsz, uint32_t maxsz)
2485 static const GVecGen2i g[4] = {
2486 { .fni8 = tcg_gen_vec_sar8i_i64,
2487 .fniv = tcg_gen_sari_vec,
2488 .fno = gen_helper_gvec_sar8i,
2489 .opc = INDEX_op_sari_vec,
2491 { .fni8 = tcg_gen_vec_sar16i_i64,
2492 .fniv = tcg_gen_sari_vec,
2493 .fno = gen_helper_gvec_sar16i,
2494 .opc = INDEX_op_sari_vec,
2496 { .fni4 = tcg_gen_sari_i32,
2497 .fniv = tcg_gen_sari_vec,
2498 .fno = gen_helper_gvec_sar32i,
2499 .opc = INDEX_op_sari_vec,
2501 { .fni8 = tcg_gen_sari_i64,
2502 .fniv = tcg_gen_sari_vec,
2503 .fno = gen_helper_gvec_sar64i,
2504 .opc = INDEX_op_sari_vec,
2505 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
2509 tcg_debug_assert(vece <= MO_64);
2510 tcg_debug_assert(shift >= 0 && shift < (8 << vece));
2512 tcg_gen_gvec_mov(vece, dofs, aofs, oprsz, maxsz);
2514 tcg_gen_gvec_2i(dofs, aofs, oprsz, maxsz, shift, &g[vece]);
2518 /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */
2519 static void expand_cmp_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
2520 uint32_t oprsz, TCGCond cond)
2522 TCGv_i32 t0 = tcg_temp_new_i32();
2523 TCGv_i32 t1 = tcg_temp_new_i32();
2526 for (i = 0; i < oprsz; i += 4) {
2527 tcg_gen_ld_i32(t0, cpu_env, aofs + i);
2528 tcg_gen_ld_i32(t1, cpu_env, bofs + i);
2529 tcg_gen_setcond_i32(cond, t0, t0, t1);
2530 tcg_gen_neg_i32(t0, t0);
2531 tcg_gen_st_i32(t0, cpu_env, dofs + i);
2533 tcg_temp_free_i32(t1);
2534 tcg_temp_free_i32(t0);
2537 static void expand_cmp_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs,
2538 uint32_t oprsz, TCGCond cond)
2540 TCGv_i64 t0 = tcg_temp_new_i64();
2541 TCGv_i64 t1 = tcg_temp_new_i64();
2544 for (i = 0; i < oprsz; i += 8) {
2545 tcg_gen_ld_i64(t0, cpu_env, aofs + i);
2546 tcg_gen_ld_i64(t1, cpu_env, bofs + i);
2547 tcg_gen_setcond_i64(cond, t0, t0, t1);
2548 tcg_gen_neg_i64(t0, t0);
2549 tcg_gen_st_i64(t0, cpu_env, dofs + i);
2551 tcg_temp_free_i64(t1);
2552 tcg_temp_free_i64(t0);
2555 static void expand_cmp_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
2556 uint32_t bofs, uint32_t oprsz, uint32_t tysz,
2557 TCGType type, TCGCond cond)
2559 TCGv_vec t0 = tcg_temp_new_vec(type);
2560 TCGv_vec t1 = tcg_temp_new_vec(type);
2563 for (i = 0; i < oprsz; i += tysz) {
2564 tcg_gen_ld_vec(t0, cpu_env, aofs + i);
2565 tcg_gen_ld_vec(t1, cpu_env, bofs + i);
2566 tcg_gen_cmp_vec(cond, vece, t0, t0, t1);
2567 tcg_gen_st_vec(t0, cpu_env, dofs + i);
2569 tcg_temp_free_vec(t1);
2570 tcg_temp_free_vec(t0);
2573 void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
2574 uint32_t aofs, uint32_t bofs,
2575 uint32_t oprsz, uint32_t maxsz)
2577 static gen_helper_gvec_3 * const eq_fn[4] = {
2578 gen_helper_gvec_eq8, gen_helper_gvec_eq16,
2579 gen_helper_gvec_eq32, gen_helper_gvec_eq64
2581 static gen_helper_gvec_3 * const ne_fn[4] = {
2582 gen_helper_gvec_ne8, gen_helper_gvec_ne16,
2583 gen_helper_gvec_ne32, gen_helper_gvec_ne64
2585 static gen_helper_gvec_3 * const lt_fn[4] = {
2586 gen_helper_gvec_lt8, gen_helper_gvec_lt16,
2587 gen_helper_gvec_lt32, gen_helper_gvec_lt64
2589 static gen_helper_gvec_3 * const le_fn[4] = {
2590 gen_helper_gvec_le8, gen_helper_gvec_le16,
2591 gen_helper_gvec_le32, gen_helper_gvec_le64
2593 static gen_helper_gvec_3 * const ltu_fn[4] = {
2594 gen_helper_gvec_ltu8, gen_helper_gvec_ltu16,
2595 gen_helper_gvec_ltu32, gen_helper_gvec_ltu64
2597 static gen_helper_gvec_3 * const leu_fn[4] = {
2598 gen_helper_gvec_leu8, gen_helper_gvec_leu16,
2599 gen_helper_gvec_leu32, gen_helper_gvec_leu64
2601 static gen_helper_gvec_3 * const * const fns[16] = {
2602 [TCG_COND_EQ] = eq_fn,
2603 [TCG_COND_NE] = ne_fn,
2604 [TCG_COND_LT] = lt_fn,
2605 [TCG_COND_LE] = le_fn,
2606 [TCG_COND_LTU] = ltu_fn,
2607 [TCG_COND_LEU] = leu_fn,
2612 check_size_align(oprsz, maxsz, dofs | aofs | bofs);
2613 check_overlap_3(dofs, aofs, bofs, maxsz);
2615 if (cond == TCG_COND_NEVER || cond == TCG_COND_ALWAYS) {
2616 do_dup(MO_8, dofs, oprsz, maxsz,
2617 NULL, NULL, -(cond == TCG_COND_ALWAYS));
2621 /* Implement inline with a vector type, if possible.
2622 * Prefer integer when 64-bit host and 64-bit comparison.
2624 type = choose_vector_type(INDEX_op_cmp_vec, vece, oprsz,
2625 TCG_TARGET_REG_BITS == 64 && vece == MO_64);
2628 /* Recall that ARM SVE allows vector sizes that are not a
2629 * power of 2, but always a multiple of 16. The intent is
2630 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
2632 some = QEMU_ALIGN_DOWN(oprsz, 32);
2633 expand_cmp_vec(vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256, cond);
2634 if (some == oprsz) {
2644 expand_cmp_vec(vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128, cond);
2647 expand_cmp_vec(vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64, cond);
2651 if (vece == MO_64 && check_size_impl(oprsz, 8)) {
2652 expand_cmp_i64(dofs, aofs, bofs, oprsz, cond);
2653 } else if (vece == MO_32 && check_size_impl(oprsz, 4)) {
2654 expand_cmp_i32(dofs, aofs, bofs, oprsz, cond);
2656 gen_helper_gvec_3 * const *fn = fns[cond];
2660 tmp = aofs, aofs = bofs, bofs = tmp;
2661 cond = tcg_swap_cond(cond);
2665 tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz, maxsz, 0, fn[vece]);
2671 g_assert_not_reached();
2674 if (oprsz < maxsz) {
2675 expand_clr(dofs + oprsz, maxsz - oprsz);