2 * QEMU TCG support -- s390x vector instruction translation functions
4 * Copyright (C) 2019 Red Hat Inc
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 * For most instructions that use the same element size for reads and
15 * writes, we can use real gvec vector expansion, which potantially uses
16 * real host vector instructions. As they only work up to 64 bit elements,
17 * 128 bit elements (vector is a single element) have to be handled
18 * differently. Operations that are too complicated to encode via TCG ops
19 * are handled via gvec ool (out-of-line) handlers.
21 * As soon as instructions use different element sizes for reads and writes
22 * or access elements "out of their element scope" we expand them manually
23 * in fancy loops, as gvec expansion does not deal with actual element
24 * numbers and does also not support access to other elements.
27 * As we only have i32/i64, such elements have to be loaded into two
28 * i64 values and can then be processed e.g. by tcg_gen_add2_i64.
31 * On s390x, the operand size (oprsz) and the maximum size (maxsz) are
32 * always 16 (128 bit). What gvec code calls "vece", s390x calls "es",
33 * a.k.a. "element size". These values nicely map to MO_8 ... MO_64. Only
34 * 128 bit element size has to be treated in a special way (MO_64 + 1).
35 * We will use ES_* instead of MO_* for this reason in this file.
38 * As gvec ool-helpers can currently not return values (besides via
39 * pointers like vectors or cpu_env), whenever we have to set the CC and
40 * can't conclude the value from the result vector, we will directly
41 * set it in "env->cc_op" and mark it as static via set_cc_static()".
42 * Whenever this is done, the helper writes globals (cc_op).
45 #define NUM_VEC_ELEMENT_BYTES(es) (1 << (es))
46 #define NUM_VEC_ELEMENTS(es) (16 / NUM_VEC_ELEMENT_BYTES(es))
47 #define NUM_VEC_ELEMENT_BITS(es) (NUM_VEC_ELEMENT_BYTES(es) * BITS_PER_BYTE)
55 /* Floating-Point Format */
60 static inline bool valid_vec_element(uint8_t enr, MemOp es)
62 return !(enr & ~(NUM_VEC_ELEMENTS(es) - 1));
65 static void read_vec_element_i64(TCGv_i64 dst, uint8_t reg, uint8_t enr,
68 const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE);
72 tcg_gen_ld8u_i64(dst, cpu_env, offs);
75 tcg_gen_ld16u_i64(dst, cpu_env, offs);
78 tcg_gen_ld32u_i64(dst, cpu_env, offs);
81 tcg_gen_ld8s_i64(dst, cpu_env, offs);
84 tcg_gen_ld16s_i64(dst, cpu_env, offs);
87 tcg_gen_ld32s_i64(dst, cpu_env, offs);
91 tcg_gen_ld_i64(dst, cpu_env, offs);
94 g_assert_not_reached();
98 static void read_vec_element_i32(TCGv_i32 dst, uint8_t reg, uint8_t enr,
101 const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE);
105 tcg_gen_ld8u_i32(dst, cpu_env, offs);
108 tcg_gen_ld16u_i32(dst, cpu_env, offs);
111 tcg_gen_ld8s_i32(dst, cpu_env, offs);
113 case ES_16 | MO_SIGN:
114 tcg_gen_ld16s_i32(dst, cpu_env, offs);
117 case ES_32 | MO_SIGN:
118 tcg_gen_ld_i32(dst, cpu_env, offs);
121 g_assert_not_reached();
125 static void write_vec_element_i64(TCGv_i64 src, int reg, uint8_t enr,
128 const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE);
132 tcg_gen_st8_i64(src, cpu_env, offs);
135 tcg_gen_st16_i64(src, cpu_env, offs);
138 tcg_gen_st32_i64(src, cpu_env, offs);
141 tcg_gen_st_i64(src, cpu_env, offs);
144 g_assert_not_reached();
148 static void write_vec_element_i32(TCGv_i32 src, int reg, uint8_t enr,
151 const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE);
155 tcg_gen_st8_i32(src, cpu_env, offs);
158 tcg_gen_st16_i32(src, cpu_env, offs);
161 tcg_gen_st_i32(src, cpu_env, offs);
164 g_assert_not_reached();
168 static void get_vec_element_ptr_i64(TCGv_ptr ptr, uint8_t reg, TCGv_i64 enr,
171 TCGv_i64 tmp = tcg_temp_new_i64();
173 /* mask off invalid parts from the element nr */
174 tcg_gen_andi_i64(tmp, enr, NUM_VEC_ELEMENTS(es) - 1);
176 /* convert it to an element offset relative to cpu_env (vec_reg_offset() */
177 tcg_gen_shli_i64(tmp, tmp, es);
178 #ifndef HOST_WORDS_BIGENDIAN
179 tcg_gen_xori_i64(tmp, tmp, 8 - NUM_VEC_ELEMENT_BYTES(es));
181 tcg_gen_addi_i64(tmp, tmp, vec_full_reg_offset(reg));
183 /* generate the final ptr by adding cpu_env */
184 tcg_gen_trunc_i64_ptr(ptr, tmp);
185 tcg_gen_add_ptr(ptr, ptr, cpu_env);
187 tcg_temp_free_i64(tmp);
190 #define gen_gvec_2(v1, v2, gen) \
191 tcg_gen_gvec_2(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
193 #define gen_gvec_2s(v1, v2, c, gen) \
194 tcg_gen_gvec_2s(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
196 #define gen_gvec_2_ool(v1, v2, data, fn) \
197 tcg_gen_gvec_2_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
199 #define gen_gvec_2i_ool(v1, v2, c, data, fn) \
200 tcg_gen_gvec_2i_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
202 #define gen_gvec_2_ptr(v1, v2, ptr, data, fn) \
203 tcg_gen_gvec_2_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
204 ptr, 16, 16, data, fn)
205 #define gen_gvec_3(v1, v2, v3, gen) \
206 tcg_gen_gvec_3(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
207 vec_full_reg_offset(v3), 16, 16, gen)
208 #define gen_gvec_3_ool(v1, v2, v3, data, fn) \
209 tcg_gen_gvec_3_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
210 vec_full_reg_offset(v3), 16, 16, data, fn)
211 #define gen_gvec_3_ptr(v1, v2, v3, ptr, data, fn) \
212 tcg_gen_gvec_3_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
213 vec_full_reg_offset(v3), ptr, 16, 16, data, fn)
214 #define gen_gvec_3i(v1, v2, v3, c, gen) \
215 tcg_gen_gvec_3i(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
216 vec_full_reg_offset(v3), 16, 16, c, gen)
217 #define gen_gvec_4(v1, v2, v3, v4, gen) \
218 tcg_gen_gvec_4(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
219 vec_full_reg_offset(v3), vec_full_reg_offset(v4), \
221 #define gen_gvec_4_ool(v1, v2, v3, v4, data, fn) \
222 tcg_gen_gvec_4_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
223 vec_full_reg_offset(v3), vec_full_reg_offset(v4), \
225 #define gen_gvec_4_ptr(v1, v2, v3, v4, ptr, data, fn) \
226 tcg_gen_gvec_4_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
227 vec_full_reg_offset(v3), vec_full_reg_offset(v4), \
228 ptr, 16, 16, data, fn)
229 #define gen_gvec_dup_i64(es, v1, c) \
230 tcg_gen_gvec_dup_i64(es, vec_full_reg_offset(v1), 16, 16, c)
231 #define gen_gvec_mov(v1, v2) \
232 tcg_gen_gvec_mov(0, vec_full_reg_offset(v1), vec_full_reg_offset(v2), 16, \
234 #define gen_gvec_dup_imm(es, v1, c) \
235 tcg_gen_gvec_dup_imm(es, vec_full_reg_offset(v1), 16, 16, c);
236 #define gen_gvec_fn_2(fn, es, v1, v2) \
237 tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
239 #define gen_gvec_fn_2i(fn, es, v1, v2, c) \
240 tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
242 #define gen_gvec_fn_2s(fn, es, v1, v2, s) \
243 tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
245 #define gen_gvec_fn_3(fn, es, v1, v2, v3) \
246 tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
247 vec_full_reg_offset(v3), 16, 16)
248 #define gen_gvec_fn_4(fn, es, v1, v2, v3, v4) \
249 tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
250 vec_full_reg_offset(v3), vec_full_reg_offset(v4), 16, 16)
253 * Helper to carry out a 128 bit vector computation using 2 i64 values per
256 typedef void (*gen_gvec128_3_i64_fn)(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
257 TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh);
258 static void gen_gvec128_3_i64(gen_gvec128_3_i64_fn fn, uint8_t d, uint8_t a,
261 TCGv_i64 dh = tcg_temp_new_i64();
262 TCGv_i64 dl = tcg_temp_new_i64();
263 TCGv_i64 ah = tcg_temp_new_i64();
264 TCGv_i64 al = tcg_temp_new_i64();
265 TCGv_i64 bh = tcg_temp_new_i64();
266 TCGv_i64 bl = tcg_temp_new_i64();
268 read_vec_element_i64(ah, a, 0, ES_64);
269 read_vec_element_i64(al, a, 1, ES_64);
270 read_vec_element_i64(bh, b, 0, ES_64);
271 read_vec_element_i64(bl, b, 1, ES_64);
272 fn(dl, dh, al, ah, bl, bh);
273 write_vec_element_i64(dh, d, 0, ES_64);
274 write_vec_element_i64(dl, d, 1, ES_64);
276 tcg_temp_free_i64(dh);
277 tcg_temp_free_i64(dl);
278 tcg_temp_free_i64(ah);
279 tcg_temp_free_i64(al);
280 tcg_temp_free_i64(bh);
281 tcg_temp_free_i64(bl);
284 typedef void (*gen_gvec128_4_i64_fn)(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
285 TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh,
286 TCGv_i64 cl, TCGv_i64 ch);
287 static void gen_gvec128_4_i64(gen_gvec128_4_i64_fn fn, uint8_t d, uint8_t a,
288 uint8_t b, uint8_t c)
290 TCGv_i64 dh = tcg_temp_new_i64();
291 TCGv_i64 dl = tcg_temp_new_i64();
292 TCGv_i64 ah = tcg_temp_new_i64();
293 TCGv_i64 al = tcg_temp_new_i64();
294 TCGv_i64 bh = tcg_temp_new_i64();
295 TCGv_i64 bl = tcg_temp_new_i64();
296 TCGv_i64 ch = tcg_temp_new_i64();
297 TCGv_i64 cl = tcg_temp_new_i64();
299 read_vec_element_i64(ah, a, 0, ES_64);
300 read_vec_element_i64(al, a, 1, ES_64);
301 read_vec_element_i64(bh, b, 0, ES_64);
302 read_vec_element_i64(bl, b, 1, ES_64);
303 read_vec_element_i64(ch, c, 0, ES_64);
304 read_vec_element_i64(cl, c, 1, ES_64);
305 fn(dl, dh, al, ah, bl, bh, cl, ch);
306 write_vec_element_i64(dh, d, 0, ES_64);
307 write_vec_element_i64(dl, d, 1, ES_64);
309 tcg_temp_free_i64(dh);
310 tcg_temp_free_i64(dl);
311 tcg_temp_free_i64(ah);
312 tcg_temp_free_i64(al);
313 tcg_temp_free_i64(bh);
314 tcg_temp_free_i64(bl);
315 tcg_temp_free_i64(ch);
316 tcg_temp_free_i64(cl);
319 static void gen_addi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
322 TCGv_i64 bl = tcg_const_i64(b);
323 TCGv_i64 bh = tcg_const_i64(0);
325 tcg_gen_add2_i64(dl, dh, al, ah, bl, bh);
326 tcg_temp_free_i64(bl);
327 tcg_temp_free_i64(bh);
330 static DisasJumpType op_vbperm(DisasContext *s, DisasOps *o)
332 gen_gvec_3_ool(get_field(s, v1), get_field(s, v2), get_field(s, v3), 0,
333 gen_helper_gvec_vbperm);
338 static DisasJumpType op_vge(DisasContext *s, DisasOps *o)
340 const uint8_t es = s->insn->data;
341 const uint8_t enr = get_field(s, m3);
344 if (!valid_vec_element(enr, es)) {
345 gen_program_exception(s, PGM_SPECIFICATION);
346 return DISAS_NORETURN;
349 tmp = tcg_temp_new_i64();
350 read_vec_element_i64(tmp, get_field(s, v2), enr, es);
351 tcg_gen_add_i64(o->addr1, o->addr1, tmp);
352 gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 0);
354 tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
355 write_vec_element_i64(tmp, get_field(s, v1), enr, es);
356 tcg_temp_free_i64(tmp);
360 static uint64_t generate_byte_mask(uint8_t mask)
365 for (i = 0; i < 8; i++) {
366 if ((mask >> i) & 1) {
367 r |= 0xffull << (i * 8);
373 static DisasJumpType op_vgbm(DisasContext *s, DisasOps *o)
375 const uint16_t i2 = get_field(s, i2);
377 if (i2 == (i2 & 0xff) * 0x0101) {
379 * Masks for both 64 bit elements of the vector are the same.
380 * Trust tcg to produce a good constant loading.
382 gen_gvec_dup_imm(ES_64, get_field(s, v1),
383 generate_byte_mask(i2 & 0xff));
385 TCGv_i64 t = tcg_temp_new_i64();
387 tcg_gen_movi_i64(t, generate_byte_mask(i2 >> 8));
388 write_vec_element_i64(t, get_field(s, v1), 0, ES_64);
389 tcg_gen_movi_i64(t, generate_byte_mask(i2));
390 write_vec_element_i64(t, get_field(s, v1), 1, ES_64);
391 tcg_temp_free_i64(t);
396 static DisasJumpType op_vgm(DisasContext *s, DisasOps *o)
398 const uint8_t es = get_field(s, m4);
399 const uint8_t bits = NUM_VEC_ELEMENT_BITS(es);
400 const uint8_t i2 = get_field(s, i2) & (bits - 1);
401 const uint8_t i3 = get_field(s, i3) & (bits - 1);
406 gen_program_exception(s, PGM_SPECIFICATION);
407 return DISAS_NORETURN;
410 /* generate the mask - take care of wrapping */
411 for (i = i2; ; i = (i + 1) % bits) {
412 mask |= 1ull << (bits - i - 1);
418 gen_gvec_dup_imm(es, get_field(s, v1), mask);
422 static DisasJumpType op_vl(DisasContext *s, DisasOps *o)
424 TCGv_i64 t0 = tcg_temp_new_i64();
425 TCGv_i64 t1 = tcg_temp_new_i64();
427 tcg_gen_qemu_ld_i64(t0, o->addr1, get_mem_index(s), MO_TEQ);
428 gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
429 tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ);
430 write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
431 write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
437 static DisasJumpType op_vlr(DisasContext *s, DisasOps *o)
439 gen_gvec_mov(get_field(s, v1), get_field(s, v2));
443 static DisasJumpType op_vlrep(DisasContext *s, DisasOps *o)
445 const uint8_t es = get_field(s, m3);
449 gen_program_exception(s, PGM_SPECIFICATION);
450 return DISAS_NORETURN;
453 tmp = tcg_temp_new_i64();
454 tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
455 gen_gvec_dup_i64(es, get_field(s, v1), tmp);
456 tcg_temp_free_i64(tmp);
460 static DisasJumpType op_vle(DisasContext *s, DisasOps *o)
462 const uint8_t es = s->insn->data;
463 const uint8_t enr = get_field(s, m3);
466 if (!valid_vec_element(enr, es)) {
467 gen_program_exception(s, PGM_SPECIFICATION);
468 return DISAS_NORETURN;
471 tmp = tcg_temp_new_i64();
472 tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
473 write_vec_element_i64(tmp, get_field(s, v1), enr, es);
474 tcg_temp_free_i64(tmp);
478 static DisasJumpType op_vlei(DisasContext *s, DisasOps *o)
480 const uint8_t es = s->insn->data;
481 const uint8_t enr = get_field(s, m3);
484 if (!valid_vec_element(enr, es)) {
485 gen_program_exception(s, PGM_SPECIFICATION);
486 return DISAS_NORETURN;
489 tmp = tcg_const_i64((int16_t)get_field(s, i2));
490 write_vec_element_i64(tmp, get_field(s, v1), enr, es);
491 tcg_temp_free_i64(tmp);
495 static DisasJumpType op_vlgv(DisasContext *s, DisasOps *o)
497 const uint8_t es = get_field(s, m4);
501 gen_program_exception(s, PGM_SPECIFICATION);
502 return DISAS_NORETURN;
505 /* fast path if we don't need the register content */
506 if (!get_field(s, b2)) {
507 uint8_t enr = get_field(s, d2) & (NUM_VEC_ELEMENTS(es) - 1);
509 read_vec_element_i64(o->out, get_field(s, v3), enr, es);
513 ptr = tcg_temp_new_ptr();
514 get_vec_element_ptr_i64(ptr, get_field(s, v3), o->addr1, es);
517 tcg_gen_ld8u_i64(o->out, ptr, 0);
520 tcg_gen_ld16u_i64(o->out, ptr, 0);
523 tcg_gen_ld32u_i64(o->out, ptr, 0);
526 tcg_gen_ld_i64(o->out, ptr, 0);
529 g_assert_not_reached();
531 tcg_temp_free_ptr(ptr);
536 static DisasJumpType op_vllez(DisasContext *s, DisasOps *o)
538 uint8_t es = get_field(s, m3);
543 /* rightmost sub-element of leftmost doubleword */
556 /* leftmost sub-element of leftmost doubleword */
558 if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
565 gen_program_exception(s, PGM_SPECIFICATION);
566 return DISAS_NORETURN;
569 t = tcg_temp_new_i64();
570 tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TE | es);
571 gen_gvec_dup_imm(es, get_field(s, v1), 0);
572 write_vec_element_i64(t, get_field(s, v1), enr, es);
573 tcg_temp_free_i64(t);
577 static DisasJumpType op_vlm(DisasContext *s, DisasOps *o)
579 const uint8_t v3 = get_field(s, v3);
580 uint8_t v1 = get_field(s, v1);
583 if (v3 < v1 || (v3 - v1 + 1) > 16) {
584 gen_program_exception(s, PGM_SPECIFICATION);
585 return DISAS_NORETURN;
589 * Check for possible access exceptions by trying to load the last
590 * element. The first element will be checked first next.
592 t0 = tcg_temp_new_i64();
593 t1 = tcg_temp_new_i64();
594 gen_addi_and_wrap_i64(s, t0, o->addr1, (v3 - v1) * 16 + 8);
595 tcg_gen_qemu_ld_i64(t0, t0, get_mem_index(s), MO_TEQ);
598 tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ);
599 write_vec_element_i64(t1, v1, 0, ES_64);
603 gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
604 tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ);
605 write_vec_element_i64(t1, v1, 1, ES_64);
606 gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
609 /* Store the last element, loaded first */
610 write_vec_element_i64(t0, v1, 1, ES_64);
612 tcg_temp_free_i64(t0);
613 tcg_temp_free_i64(t1);
617 static DisasJumpType op_vlbb(DisasContext *s, DisasOps *o)
619 const int64_t block_size = (1ull << (get_field(s, m3) + 6));
620 const int v1_offs = vec_full_reg_offset(get_field(s, v1));
624 if (get_field(s, m3) > 6) {
625 gen_program_exception(s, PGM_SPECIFICATION);
626 return DISAS_NORETURN;
629 bytes = tcg_temp_new_i64();
630 a0 = tcg_temp_new_ptr();
631 /* calculate the number of bytes until the next block boundary */
632 tcg_gen_ori_i64(bytes, o->addr1, -block_size);
633 tcg_gen_neg_i64(bytes, bytes);
635 tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
636 gen_helper_vll(cpu_env, a0, o->addr1, bytes);
637 tcg_temp_free_i64(bytes);
638 tcg_temp_free_ptr(a0);
642 static DisasJumpType op_vlvg(DisasContext *s, DisasOps *o)
644 const uint8_t es = get_field(s, m4);
648 gen_program_exception(s, PGM_SPECIFICATION);
649 return DISAS_NORETURN;
652 /* fast path if we don't need the register content */
653 if (!get_field(s, b2)) {
654 uint8_t enr = get_field(s, d2) & (NUM_VEC_ELEMENTS(es) - 1);
656 write_vec_element_i64(o->in2, get_field(s, v1), enr, es);
660 ptr = tcg_temp_new_ptr();
661 get_vec_element_ptr_i64(ptr, get_field(s, v1), o->addr1, es);
664 tcg_gen_st8_i64(o->in2, ptr, 0);
667 tcg_gen_st16_i64(o->in2, ptr, 0);
670 tcg_gen_st32_i64(o->in2, ptr, 0);
673 tcg_gen_st_i64(o->in2, ptr, 0);
676 g_assert_not_reached();
678 tcg_temp_free_ptr(ptr);
683 static DisasJumpType op_vlvgp(DisasContext *s, DisasOps *o)
685 write_vec_element_i64(o->in1, get_field(s, v1), 0, ES_64);
686 write_vec_element_i64(o->in2, get_field(s, v1), 1, ES_64);
690 static DisasJumpType op_vll(DisasContext *s, DisasOps *o)
692 const int v1_offs = vec_full_reg_offset(get_field(s, v1));
693 TCGv_ptr a0 = tcg_temp_new_ptr();
695 /* convert highest index into an actual length */
696 tcg_gen_addi_i64(o->in2, o->in2, 1);
697 tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
698 gen_helper_vll(cpu_env, a0, o->addr1, o->in2);
699 tcg_temp_free_ptr(a0);
703 static DisasJumpType op_vmr(DisasContext *s, DisasOps *o)
705 const uint8_t v1 = get_field(s, v1);
706 const uint8_t v2 = get_field(s, v2);
707 const uint8_t v3 = get_field(s, v3);
708 const uint8_t es = get_field(s, m4);
709 int dst_idx, src_idx;
713 gen_program_exception(s, PGM_SPECIFICATION);
714 return DISAS_NORETURN;
717 tmp = tcg_temp_new_i64();
718 if (s->fields.op2 == 0x61) {
719 /* iterate backwards to avoid overwriting data we might need later */
720 for (dst_idx = NUM_VEC_ELEMENTS(es) - 1; dst_idx >= 0; dst_idx--) {
721 src_idx = dst_idx / 2;
722 if (dst_idx % 2 == 0) {
723 read_vec_element_i64(tmp, v2, src_idx, es);
725 read_vec_element_i64(tmp, v3, src_idx, es);
727 write_vec_element_i64(tmp, v1, dst_idx, es);
730 /* iterate forward to avoid overwriting data we might need later */
731 for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(es); dst_idx++) {
732 src_idx = (dst_idx + NUM_VEC_ELEMENTS(es)) / 2;
733 if (dst_idx % 2 == 0) {
734 read_vec_element_i64(tmp, v2, src_idx, es);
736 read_vec_element_i64(tmp, v3, src_idx, es);
738 write_vec_element_i64(tmp, v1, dst_idx, es);
741 tcg_temp_free_i64(tmp);
745 static DisasJumpType op_vpk(DisasContext *s, DisasOps *o)
747 const uint8_t v1 = get_field(s, v1);
748 const uint8_t v2 = get_field(s, v2);
749 const uint8_t v3 = get_field(s, v3);
750 const uint8_t es = get_field(s, m4);
751 static gen_helper_gvec_3 * const vpk[3] = {
752 gen_helper_gvec_vpk16,
753 gen_helper_gvec_vpk32,
754 gen_helper_gvec_vpk64,
756 static gen_helper_gvec_3 * const vpks[3] = {
757 gen_helper_gvec_vpks16,
758 gen_helper_gvec_vpks32,
759 gen_helper_gvec_vpks64,
761 static gen_helper_gvec_3_ptr * const vpks_cc[3] = {
762 gen_helper_gvec_vpks_cc16,
763 gen_helper_gvec_vpks_cc32,
764 gen_helper_gvec_vpks_cc64,
766 static gen_helper_gvec_3 * const vpkls[3] = {
767 gen_helper_gvec_vpkls16,
768 gen_helper_gvec_vpkls32,
769 gen_helper_gvec_vpkls64,
771 static gen_helper_gvec_3_ptr * const vpkls_cc[3] = {
772 gen_helper_gvec_vpkls_cc16,
773 gen_helper_gvec_vpkls_cc32,
774 gen_helper_gvec_vpkls_cc64,
777 if (es == ES_8 || es > ES_64) {
778 gen_program_exception(s, PGM_SPECIFICATION);
779 return DISAS_NORETURN;
782 switch (s->fields.op2) {
784 if (get_field(s, m5) & 0x1) {
785 gen_gvec_3_ptr(v1, v2, v3, cpu_env, 0, vpks_cc[es - 1]);
788 gen_gvec_3_ool(v1, v2, v3, 0, vpks[es - 1]);
792 if (get_field(s, m5) & 0x1) {
793 gen_gvec_3_ptr(v1, v2, v3, cpu_env, 0, vpkls_cc[es - 1]);
796 gen_gvec_3_ool(v1, v2, v3, 0, vpkls[es - 1]);
800 /* If sources and destination dont't overlap -> fast path */
801 if (v1 != v2 && v1 != v3) {
802 const uint8_t src_es = get_field(s, m4);
803 const uint8_t dst_es = src_es - 1;
804 TCGv_i64 tmp = tcg_temp_new_i64();
805 int dst_idx, src_idx;
807 for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(dst_es); dst_idx++) {
809 if (src_idx < NUM_VEC_ELEMENTS(src_es)) {
810 read_vec_element_i64(tmp, v2, src_idx, src_es);
812 src_idx -= NUM_VEC_ELEMENTS(src_es);
813 read_vec_element_i64(tmp, v3, src_idx, src_es);
815 write_vec_element_i64(tmp, v1, dst_idx, dst_es);
817 tcg_temp_free_i64(tmp);
819 gen_gvec_3_ool(v1, v2, v3, 0, vpk[es - 1]);
823 g_assert_not_reached();
828 static DisasJumpType op_vperm(DisasContext *s, DisasOps *o)
830 gen_gvec_4_ool(get_field(s, v1), get_field(s, v2),
831 get_field(s, v3), get_field(s, v4),
832 0, gen_helper_gvec_vperm);
836 static DisasJumpType op_vpdi(DisasContext *s, DisasOps *o)
838 const uint8_t i2 = extract32(get_field(s, m4), 2, 1);
839 const uint8_t i3 = extract32(get_field(s, m4), 0, 1);
840 TCGv_i64 t0 = tcg_temp_new_i64();
841 TCGv_i64 t1 = tcg_temp_new_i64();
843 read_vec_element_i64(t0, get_field(s, v2), i2, ES_64);
844 read_vec_element_i64(t1, get_field(s, v3), i3, ES_64);
845 write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
846 write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
847 tcg_temp_free_i64(t0);
848 tcg_temp_free_i64(t1);
852 static DisasJumpType op_vrep(DisasContext *s, DisasOps *o)
854 const uint8_t enr = get_field(s, i2);
855 const uint8_t es = get_field(s, m4);
857 if (es > ES_64 || !valid_vec_element(enr, es)) {
858 gen_program_exception(s, PGM_SPECIFICATION);
859 return DISAS_NORETURN;
862 tcg_gen_gvec_dup_mem(es, vec_full_reg_offset(get_field(s, v1)),
863 vec_reg_offset(get_field(s, v3), enr, es),
868 static DisasJumpType op_vrepi(DisasContext *s, DisasOps *o)
870 const int64_t data = (int16_t)get_field(s, i2);
871 const uint8_t es = get_field(s, m3);
874 gen_program_exception(s, PGM_SPECIFICATION);
875 return DISAS_NORETURN;
878 gen_gvec_dup_imm(es, get_field(s, v1), data);
882 static DisasJumpType op_vsce(DisasContext *s, DisasOps *o)
884 const uint8_t es = s->insn->data;
885 const uint8_t enr = get_field(s, m3);
888 if (!valid_vec_element(enr, es)) {
889 gen_program_exception(s, PGM_SPECIFICATION);
890 return DISAS_NORETURN;
893 tmp = tcg_temp_new_i64();
894 read_vec_element_i64(tmp, get_field(s, v2), enr, es);
895 tcg_gen_add_i64(o->addr1, o->addr1, tmp);
896 gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 0);
898 read_vec_element_i64(tmp, get_field(s, v1), enr, es);
899 tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
900 tcg_temp_free_i64(tmp);
904 static DisasJumpType op_vsel(DisasContext *s, DisasOps *o)
906 gen_gvec_fn_4(bitsel, ES_8, get_field(s, v1),
907 get_field(s, v4), get_field(s, v2),
912 static DisasJumpType op_vseg(DisasContext *s, DisasOps *o)
914 const uint8_t es = get_field(s, m3);
932 gen_program_exception(s, PGM_SPECIFICATION);
933 return DISAS_NORETURN;
936 tmp = tcg_temp_new_i64();
937 read_vec_element_i64(tmp, get_field(s, v2), idx1, es | MO_SIGN);
938 write_vec_element_i64(tmp, get_field(s, v1), 0, ES_64);
939 read_vec_element_i64(tmp, get_field(s, v2), idx2, es | MO_SIGN);
940 write_vec_element_i64(tmp, get_field(s, v1), 1, ES_64);
941 tcg_temp_free_i64(tmp);
945 static DisasJumpType op_vst(DisasContext *s, DisasOps *o)
947 TCGv_i64 tmp = tcg_const_i64(16);
949 /* Probe write access before actually modifying memory */
950 gen_helper_probe_write_access(cpu_env, o->addr1, tmp);
952 read_vec_element_i64(tmp, get_field(s, v1), 0, ES_64);
953 tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
954 gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
955 read_vec_element_i64(tmp, get_field(s, v1), 1, ES_64);
956 tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
957 tcg_temp_free_i64(tmp);
961 static DisasJumpType op_vste(DisasContext *s, DisasOps *o)
963 const uint8_t es = s->insn->data;
964 const uint8_t enr = get_field(s, m3);
967 if (!valid_vec_element(enr, es)) {
968 gen_program_exception(s, PGM_SPECIFICATION);
969 return DISAS_NORETURN;
972 tmp = tcg_temp_new_i64();
973 read_vec_element_i64(tmp, get_field(s, v1), enr, es);
974 tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
975 tcg_temp_free_i64(tmp);
979 static DisasJumpType op_vstm(DisasContext *s, DisasOps *o)
981 const uint8_t v3 = get_field(s, v3);
982 uint8_t v1 = get_field(s, v1);
985 while (v3 < v1 || (v3 - v1 + 1) > 16) {
986 gen_program_exception(s, PGM_SPECIFICATION);
987 return DISAS_NORETURN;
990 /* Probe write access before actually modifying memory */
991 tmp = tcg_const_i64((v3 - v1 + 1) * 16);
992 gen_helper_probe_write_access(cpu_env, o->addr1, tmp);
995 read_vec_element_i64(tmp, v1, 0, ES_64);
996 tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
997 gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
998 read_vec_element_i64(tmp, v1, 1, ES_64);
999 tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
1003 gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
1005 tcg_temp_free_i64(tmp);
1009 static DisasJumpType op_vstl(DisasContext *s, DisasOps *o)
1011 const int v1_offs = vec_full_reg_offset(get_field(s, v1));
1012 TCGv_ptr a0 = tcg_temp_new_ptr();
1014 /* convert highest index into an actual length */
1015 tcg_gen_addi_i64(o->in2, o->in2, 1);
1016 tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
1017 gen_helper_vstl(cpu_env, a0, o->addr1, o->in2);
1018 tcg_temp_free_ptr(a0);
1022 static DisasJumpType op_vup(DisasContext *s, DisasOps *o)
1024 const bool logical = s->fields.op2 == 0xd4 || s->fields.op2 == 0xd5;
1025 const uint8_t v1 = get_field(s, v1);
1026 const uint8_t v2 = get_field(s, v2);
1027 const uint8_t src_es = get_field(s, m3);
1028 const uint8_t dst_es = src_es + 1;
1029 int dst_idx, src_idx;
1032 if (src_es > ES_32) {
1033 gen_program_exception(s, PGM_SPECIFICATION);
1034 return DISAS_NORETURN;
1037 tmp = tcg_temp_new_i64();
1038 if (s->fields.op2 == 0xd7 || s->fields.op2 == 0xd5) {
1039 /* iterate backwards to avoid overwriting data we might need later */
1040 for (dst_idx = NUM_VEC_ELEMENTS(dst_es) - 1; dst_idx >= 0; dst_idx--) {
1042 read_vec_element_i64(tmp, v2, src_idx,
1043 src_es | (logical ? 0 : MO_SIGN));
1044 write_vec_element_i64(tmp, v1, dst_idx, dst_es);
1048 /* iterate forward to avoid overwriting data we might need later */
1049 for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(dst_es); dst_idx++) {
1050 src_idx = dst_idx + NUM_VEC_ELEMENTS(src_es) / 2;
1051 read_vec_element_i64(tmp, v2, src_idx,
1052 src_es | (logical ? 0 : MO_SIGN));
1053 write_vec_element_i64(tmp, v1, dst_idx, dst_es);
1056 tcg_temp_free_i64(tmp);
1060 static DisasJumpType op_va(DisasContext *s, DisasOps *o)
1062 const uint8_t es = get_field(s, m4);
1065 gen_program_exception(s, PGM_SPECIFICATION);
1066 return DISAS_NORETURN;
1067 } else if (es == ES_128) {
1068 gen_gvec128_3_i64(tcg_gen_add2_i64, get_field(s, v1),
1069 get_field(s, v2), get_field(s, v3));
1072 gen_gvec_fn_3(add, es, get_field(s, v1), get_field(s, v2),
1077 static void gen_acc(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, uint8_t es)
1079 const uint8_t msb_bit_nr = NUM_VEC_ELEMENT_BITS(es) - 1;
1080 TCGv_i64 msb_mask = tcg_const_i64(dup_const(es, 1ull << msb_bit_nr));
1081 TCGv_i64 t1 = tcg_temp_new_i64();
1082 TCGv_i64 t2 = tcg_temp_new_i64();
1083 TCGv_i64 t3 = tcg_temp_new_i64();
1085 /* Calculate the carry into the MSB, ignoring the old MSBs */
1086 tcg_gen_andc_i64(t1, a, msb_mask);
1087 tcg_gen_andc_i64(t2, b, msb_mask);
1088 tcg_gen_add_i64(t1, t1, t2);
1089 /* Calculate the MSB without any carry into it */
1090 tcg_gen_xor_i64(t3, a, b);
1091 /* Calculate the carry out of the MSB in the MSB bit position */
1092 tcg_gen_and_i64(d, a, b);
1093 tcg_gen_and_i64(t1, t1, t3);
1094 tcg_gen_or_i64(d, d, t1);
1095 /* Isolate and shift the carry into position */
1096 tcg_gen_and_i64(d, d, msb_mask);
1097 tcg_gen_shri_i64(d, d, msb_bit_nr);
1099 tcg_temp_free_i64(t1);
1100 tcg_temp_free_i64(t2);
1101 tcg_temp_free_i64(t3);
1104 static void gen_acc8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1106 gen_acc(d, a, b, ES_8);
1109 static void gen_acc16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1111 gen_acc(d, a, b, ES_16);
1114 static void gen_acc_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
1116 TCGv_i32 t = tcg_temp_new_i32();
1118 tcg_gen_add_i32(t, a, b);
1119 tcg_gen_setcond_i32(TCG_COND_LTU, d, t, b);
1120 tcg_temp_free_i32(t);
1123 static void gen_acc_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1125 TCGv_i64 t = tcg_temp_new_i64();
1127 tcg_gen_add_i64(t, a, b);
1128 tcg_gen_setcond_i64(TCG_COND_LTU, d, t, b);
1129 tcg_temp_free_i64(t);
1132 static void gen_acc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
1133 TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh)
1135 TCGv_i64 th = tcg_temp_new_i64();
1136 TCGv_i64 tl = tcg_temp_new_i64();
1137 TCGv_i64 zero = tcg_const_i64(0);
1139 tcg_gen_add2_i64(tl, th, al, zero, bl, zero);
1140 tcg_gen_add2_i64(tl, th, th, zero, ah, zero);
1141 tcg_gen_add2_i64(tl, dl, tl, th, bh, zero);
1142 tcg_gen_mov_i64(dh, zero);
1144 tcg_temp_free_i64(th);
1145 tcg_temp_free_i64(tl);
1146 tcg_temp_free_i64(zero);
1149 static DisasJumpType op_vacc(DisasContext *s, DisasOps *o)
1151 const uint8_t es = get_field(s, m4);
1152 static const GVecGen3 g[4] = {
1153 { .fni8 = gen_acc8_i64, },
1154 { .fni8 = gen_acc16_i64, },
1155 { .fni4 = gen_acc_i32, },
1156 { .fni8 = gen_acc_i64, },
1160 gen_program_exception(s, PGM_SPECIFICATION);
1161 return DISAS_NORETURN;
1162 } else if (es == ES_128) {
1163 gen_gvec128_3_i64(gen_acc2_i64, get_field(s, v1),
1164 get_field(s, v2), get_field(s, v3));
1167 gen_gvec_3(get_field(s, v1), get_field(s, v2),
1168 get_field(s, v3), &g[es]);
1172 static void gen_ac2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
1173 TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch)
1175 TCGv_i64 tl = tcg_temp_new_i64();
1176 TCGv_i64 th = tcg_const_i64(0);
1178 /* extract the carry only */
1179 tcg_gen_extract_i64(tl, cl, 0, 1);
1180 tcg_gen_add2_i64(dl, dh, al, ah, bl, bh);
1181 tcg_gen_add2_i64(dl, dh, dl, dh, tl, th);
1183 tcg_temp_free_i64(tl);
1184 tcg_temp_free_i64(th);
1187 static DisasJumpType op_vac(DisasContext *s, DisasOps *o)
1189 if (get_field(s, m5) != ES_128) {
1190 gen_program_exception(s, PGM_SPECIFICATION);
1191 return DISAS_NORETURN;
1194 gen_gvec128_4_i64(gen_ac2_i64, get_field(s, v1),
1195 get_field(s, v2), get_field(s, v3),
1200 static void gen_accc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
1201 TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch)
1203 TCGv_i64 tl = tcg_temp_new_i64();
1204 TCGv_i64 th = tcg_temp_new_i64();
1205 TCGv_i64 zero = tcg_const_i64(0);
1207 tcg_gen_andi_i64(tl, cl, 1);
1208 tcg_gen_add2_i64(tl, th, tl, zero, al, zero);
1209 tcg_gen_add2_i64(tl, th, tl, th, bl, zero);
1210 tcg_gen_add2_i64(tl, th, th, zero, ah, zero);
1211 tcg_gen_add2_i64(tl, dl, tl, th, bh, zero);
1212 tcg_gen_mov_i64(dh, zero);
1214 tcg_temp_free_i64(tl);
1215 tcg_temp_free_i64(th);
1216 tcg_temp_free_i64(zero);
1219 static DisasJumpType op_vaccc(DisasContext *s, DisasOps *o)
1221 if (get_field(s, m5) != ES_128) {
1222 gen_program_exception(s, PGM_SPECIFICATION);
1223 return DISAS_NORETURN;
1226 gen_gvec128_4_i64(gen_accc2_i64, get_field(s, v1),
1227 get_field(s, v2), get_field(s, v3),
1232 static DisasJumpType op_vn(DisasContext *s, DisasOps *o)
1234 gen_gvec_fn_3(and, ES_8, get_field(s, v1), get_field(s, v2),
1239 static DisasJumpType op_vnc(DisasContext *s, DisasOps *o)
1241 gen_gvec_fn_3(andc, ES_8, get_field(s, v1),
1242 get_field(s, v2), get_field(s, v3));
1246 static void gen_avg_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
1248 TCGv_i64 t0 = tcg_temp_new_i64();
1249 TCGv_i64 t1 = tcg_temp_new_i64();
1251 tcg_gen_ext_i32_i64(t0, a);
1252 tcg_gen_ext_i32_i64(t1, b);
1253 tcg_gen_add_i64(t0, t0, t1);
1254 tcg_gen_addi_i64(t0, t0, 1);
1255 tcg_gen_shri_i64(t0, t0, 1);
1256 tcg_gen_extrl_i64_i32(d, t0);
1262 static void gen_avg_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl)
1264 TCGv_i64 dh = tcg_temp_new_i64();
1265 TCGv_i64 ah = tcg_temp_new_i64();
1266 TCGv_i64 bh = tcg_temp_new_i64();
1268 /* extending the sign by one bit is sufficient */
1269 tcg_gen_extract_i64(ah, al, 63, 1);
1270 tcg_gen_extract_i64(bh, bl, 63, 1);
1271 tcg_gen_add2_i64(dl, dh, al, ah, bl, bh);
1272 gen_addi2_i64(dl, dh, dl, dh, 1);
1273 tcg_gen_extract2_i64(dl, dl, dh, 1);
1275 tcg_temp_free_i64(dh);
1276 tcg_temp_free_i64(ah);
1277 tcg_temp_free_i64(bh);
1280 static DisasJumpType op_vavg(DisasContext *s, DisasOps *o)
1282 const uint8_t es = get_field(s, m4);
1283 static const GVecGen3 g[4] = {
1284 { .fno = gen_helper_gvec_vavg8, },
1285 { .fno = gen_helper_gvec_vavg16, },
1286 { .fni4 = gen_avg_i32, },
1287 { .fni8 = gen_avg_i64, },
1291 gen_program_exception(s, PGM_SPECIFICATION);
1292 return DISAS_NORETURN;
1294 gen_gvec_3(get_field(s, v1), get_field(s, v2),
1295 get_field(s, v3), &g[es]);
1299 static void gen_avgl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
1301 TCGv_i64 t0 = tcg_temp_new_i64();
1302 TCGv_i64 t1 = tcg_temp_new_i64();
1304 tcg_gen_extu_i32_i64(t0, a);
1305 tcg_gen_extu_i32_i64(t1, b);
1306 tcg_gen_add_i64(t0, t0, t1);
1307 tcg_gen_addi_i64(t0, t0, 1);
1308 tcg_gen_shri_i64(t0, t0, 1);
1309 tcg_gen_extrl_i64_i32(d, t0);
1315 static void gen_avgl_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl)
1317 TCGv_i64 dh = tcg_temp_new_i64();
1318 TCGv_i64 zero = tcg_const_i64(0);
1320 tcg_gen_add2_i64(dl, dh, al, zero, bl, zero);
1321 gen_addi2_i64(dl, dh, dl, dh, 1);
1322 tcg_gen_extract2_i64(dl, dl, dh, 1);
1324 tcg_temp_free_i64(dh);
1325 tcg_temp_free_i64(zero);
1328 static DisasJumpType op_vavgl(DisasContext *s, DisasOps *o)
1330 const uint8_t es = get_field(s, m4);
1331 static const GVecGen3 g[4] = {
1332 { .fno = gen_helper_gvec_vavgl8, },
1333 { .fno = gen_helper_gvec_vavgl16, },
1334 { .fni4 = gen_avgl_i32, },
1335 { .fni8 = gen_avgl_i64, },
1339 gen_program_exception(s, PGM_SPECIFICATION);
1340 return DISAS_NORETURN;
1342 gen_gvec_3(get_field(s, v1), get_field(s, v2),
1343 get_field(s, v3), &g[es]);
1347 static DisasJumpType op_vcksm(DisasContext *s, DisasOps *o)
1349 TCGv_i32 tmp = tcg_temp_new_i32();
1350 TCGv_i32 sum = tcg_temp_new_i32();
1353 read_vec_element_i32(sum, get_field(s, v3), 1, ES_32);
1354 for (i = 0; i < 4; i++) {
1355 read_vec_element_i32(tmp, get_field(s, v2), i, ES_32);
1356 tcg_gen_add2_i32(tmp, sum, sum, sum, tmp, tmp);
1358 gen_gvec_dup_imm(ES_32, get_field(s, v1), 0);
1359 write_vec_element_i32(sum, get_field(s, v1), 1, ES_32);
1361 tcg_temp_free_i32(tmp);
1362 tcg_temp_free_i32(sum);
1366 static DisasJumpType op_vec(DisasContext *s, DisasOps *o)
1368 uint8_t es = get_field(s, m3);
1369 const uint8_t enr = NUM_VEC_ELEMENTS(es) / 2 - 1;
1372 gen_program_exception(s, PGM_SPECIFICATION);
1373 return DISAS_NORETURN;
1375 if (s->fields.op2 == 0xdb) {
1379 o->in1 = tcg_temp_new_i64();
1380 o->in2 = tcg_temp_new_i64();
1381 read_vec_element_i64(o->in1, get_field(s, v1), enr, es);
1382 read_vec_element_i64(o->in2, get_field(s, v2), enr, es);
1386 static DisasJumpType op_vc(DisasContext *s, DisasOps *o)
1388 const uint8_t es = get_field(s, m4);
1389 TCGCond cond = s->insn->data;
1392 gen_program_exception(s, PGM_SPECIFICATION);
1393 return DISAS_NORETURN;
1396 tcg_gen_gvec_cmp(cond, es,
1397 vec_full_reg_offset(get_field(s, v1)),
1398 vec_full_reg_offset(get_field(s, v2)),
1399 vec_full_reg_offset(get_field(s, v3)), 16, 16);
1400 if (get_field(s, m5) & 0x1) {
1401 TCGv_i64 low = tcg_temp_new_i64();
1402 TCGv_i64 high = tcg_temp_new_i64();
1404 read_vec_element_i64(high, get_field(s, v1), 0, ES_64);
1405 read_vec_element_i64(low, get_field(s, v1), 1, ES_64);
1406 gen_op_update2_cc_i64(s, CC_OP_VC, low, high);
1408 tcg_temp_free_i64(low);
1409 tcg_temp_free_i64(high);
1414 static void gen_clz_i32(TCGv_i32 d, TCGv_i32 a)
1416 tcg_gen_clzi_i32(d, a, 32);
1419 static void gen_clz_i64(TCGv_i64 d, TCGv_i64 a)
1421 tcg_gen_clzi_i64(d, a, 64);
1424 static DisasJumpType op_vclz(DisasContext *s, DisasOps *o)
1426 const uint8_t es = get_field(s, m3);
1427 static const GVecGen2 g[4] = {
1428 { .fno = gen_helper_gvec_vclz8, },
1429 { .fno = gen_helper_gvec_vclz16, },
1430 { .fni4 = gen_clz_i32, },
1431 { .fni8 = gen_clz_i64, },
1435 gen_program_exception(s, PGM_SPECIFICATION);
1436 return DISAS_NORETURN;
1438 gen_gvec_2(get_field(s, v1), get_field(s, v2), &g[es]);
1442 static void gen_ctz_i32(TCGv_i32 d, TCGv_i32 a)
1444 tcg_gen_ctzi_i32(d, a, 32);
1447 static void gen_ctz_i64(TCGv_i64 d, TCGv_i64 a)
1449 tcg_gen_ctzi_i64(d, a, 64);
1452 static DisasJumpType op_vctz(DisasContext *s, DisasOps *o)
1454 const uint8_t es = get_field(s, m3);
1455 static const GVecGen2 g[4] = {
1456 { .fno = gen_helper_gvec_vctz8, },
1457 { .fno = gen_helper_gvec_vctz16, },
1458 { .fni4 = gen_ctz_i32, },
1459 { .fni8 = gen_ctz_i64, },
1463 gen_program_exception(s, PGM_SPECIFICATION);
1464 return DISAS_NORETURN;
1466 gen_gvec_2(get_field(s, v1), get_field(s, v2), &g[es]);
1470 static DisasJumpType op_vx(DisasContext *s, DisasOps *o)
1472 gen_gvec_fn_3(xor, ES_8, get_field(s, v1), get_field(s, v2),
1477 static DisasJumpType op_vgfm(DisasContext *s, DisasOps *o)
1479 const uint8_t es = get_field(s, m4);
1480 static const GVecGen3 g[4] = {
1481 { .fno = gen_helper_gvec_vgfm8, },
1482 { .fno = gen_helper_gvec_vgfm16, },
1483 { .fno = gen_helper_gvec_vgfm32, },
1484 { .fno = gen_helper_gvec_vgfm64, },
1488 gen_program_exception(s, PGM_SPECIFICATION);
1489 return DISAS_NORETURN;
1491 gen_gvec_3(get_field(s, v1), get_field(s, v2),
1492 get_field(s, v3), &g[es]);
1496 static DisasJumpType op_vgfma(DisasContext *s, DisasOps *o)
1498 const uint8_t es = get_field(s, m5);
1499 static const GVecGen4 g[4] = {
1500 { .fno = gen_helper_gvec_vgfma8, },
1501 { .fno = gen_helper_gvec_vgfma16, },
1502 { .fno = gen_helper_gvec_vgfma32, },
1503 { .fno = gen_helper_gvec_vgfma64, },
1507 gen_program_exception(s, PGM_SPECIFICATION);
1508 return DISAS_NORETURN;
1510 gen_gvec_4(get_field(s, v1), get_field(s, v2),
1511 get_field(s, v3), get_field(s, v4), &g[es]);
1515 static DisasJumpType op_vlc(DisasContext *s, DisasOps *o)
1517 const uint8_t es = get_field(s, m3);
1520 gen_program_exception(s, PGM_SPECIFICATION);
1521 return DISAS_NORETURN;
1524 gen_gvec_fn_2(neg, es, get_field(s, v1), get_field(s, v2));
1528 static DisasJumpType op_vlp(DisasContext *s, DisasOps *o)
1530 const uint8_t es = get_field(s, m3);
1533 gen_program_exception(s, PGM_SPECIFICATION);
1534 return DISAS_NORETURN;
1537 gen_gvec_fn_2(abs, es, get_field(s, v1), get_field(s, v2));
1541 static DisasJumpType op_vmx(DisasContext *s, DisasOps *o)
1543 const uint8_t v1 = get_field(s, v1);
1544 const uint8_t v2 = get_field(s, v2);
1545 const uint8_t v3 = get_field(s, v3);
1546 const uint8_t es = get_field(s, m4);
1549 gen_program_exception(s, PGM_SPECIFICATION);
1550 return DISAS_NORETURN;
1553 switch (s->fields.op2) {
1555 gen_gvec_fn_3(smax, es, v1, v2, v3);
1558 gen_gvec_fn_3(umax, es, v1, v2, v3);
1561 gen_gvec_fn_3(smin, es, v1, v2, v3);
1564 gen_gvec_fn_3(umin, es, v1, v2, v3);
1567 g_assert_not_reached();
1572 static void gen_mal_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c)
1574 TCGv_i32 t0 = tcg_temp_new_i32();
1576 tcg_gen_mul_i32(t0, a, b);
1577 tcg_gen_add_i32(d, t0, c);
1579 tcg_temp_free_i32(t0);
1582 static void gen_mah_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c)
1584 TCGv_i64 t0 = tcg_temp_new_i64();
1585 TCGv_i64 t1 = tcg_temp_new_i64();
1586 TCGv_i64 t2 = tcg_temp_new_i64();
1588 tcg_gen_ext_i32_i64(t0, a);
1589 tcg_gen_ext_i32_i64(t1, b);
1590 tcg_gen_ext_i32_i64(t2, c);
1591 tcg_gen_mul_i64(t0, t0, t1);
1592 tcg_gen_add_i64(t0, t0, t2);
1593 tcg_gen_extrh_i64_i32(d, t0);
1600 static void gen_malh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c)
1602 TCGv_i64 t0 = tcg_temp_new_i64();
1603 TCGv_i64 t1 = tcg_temp_new_i64();
1604 TCGv_i64 t2 = tcg_temp_new_i64();
1606 tcg_gen_extu_i32_i64(t0, a);
1607 tcg_gen_extu_i32_i64(t1, b);
1608 tcg_gen_extu_i32_i64(t2, c);
1609 tcg_gen_mul_i64(t0, t0, t1);
1610 tcg_gen_add_i64(t0, t0, t2);
1611 tcg_gen_extrh_i64_i32(d, t0);
1618 static DisasJumpType op_vma(DisasContext *s, DisasOps *o)
1620 const uint8_t es = get_field(s, m5);
1621 static const GVecGen4 g_vmal[3] = {
1622 { .fno = gen_helper_gvec_vmal8, },
1623 { .fno = gen_helper_gvec_vmal16, },
1624 { .fni4 = gen_mal_i32, },
1626 static const GVecGen4 g_vmah[3] = {
1627 { .fno = gen_helper_gvec_vmah8, },
1628 { .fno = gen_helper_gvec_vmah16, },
1629 { .fni4 = gen_mah_i32, },
1631 static const GVecGen4 g_vmalh[3] = {
1632 { .fno = gen_helper_gvec_vmalh8, },
1633 { .fno = gen_helper_gvec_vmalh16, },
1634 { .fni4 = gen_malh_i32, },
1636 static const GVecGen4 g_vmae[3] = {
1637 { .fno = gen_helper_gvec_vmae8, },
1638 { .fno = gen_helper_gvec_vmae16, },
1639 { .fno = gen_helper_gvec_vmae32, },
1641 static const GVecGen4 g_vmale[3] = {
1642 { .fno = gen_helper_gvec_vmale8, },
1643 { .fno = gen_helper_gvec_vmale16, },
1644 { .fno = gen_helper_gvec_vmale32, },
1646 static const GVecGen4 g_vmao[3] = {
1647 { .fno = gen_helper_gvec_vmao8, },
1648 { .fno = gen_helper_gvec_vmao16, },
1649 { .fno = gen_helper_gvec_vmao32, },
1651 static const GVecGen4 g_vmalo[3] = {
1652 { .fno = gen_helper_gvec_vmalo8, },
1653 { .fno = gen_helper_gvec_vmalo16, },
1654 { .fno = gen_helper_gvec_vmalo32, },
1659 gen_program_exception(s, PGM_SPECIFICATION);
1660 return DISAS_NORETURN;
1663 switch (s->fields.op2) {
1686 g_assert_not_reached();
1689 gen_gvec_4(get_field(s, v1), get_field(s, v2),
1690 get_field(s, v3), get_field(s, v4), fn);
1694 static void gen_mh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
1696 TCGv_i32 t = tcg_temp_new_i32();
1698 tcg_gen_muls2_i32(t, d, a, b);
1699 tcg_temp_free_i32(t);
1702 static void gen_mlh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
1704 TCGv_i32 t = tcg_temp_new_i32();
1706 tcg_gen_mulu2_i32(t, d, a, b);
1707 tcg_temp_free_i32(t);
1710 static DisasJumpType op_vm(DisasContext *s, DisasOps *o)
1712 const uint8_t es = get_field(s, m4);
1713 static const GVecGen3 g_vmh[3] = {
1714 { .fno = gen_helper_gvec_vmh8, },
1715 { .fno = gen_helper_gvec_vmh16, },
1716 { .fni4 = gen_mh_i32, },
1718 static const GVecGen3 g_vmlh[3] = {
1719 { .fno = gen_helper_gvec_vmlh8, },
1720 { .fno = gen_helper_gvec_vmlh16, },
1721 { .fni4 = gen_mlh_i32, },
1723 static const GVecGen3 g_vme[3] = {
1724 { .fno = gen_helper_gvec_vme8, },
1725 { .fno = gen_helper_gvec_vme16, },
1726 { .fno = gen_helper_gvec_vme32, },
1728 static const GVecGen3 g_vmle[3] = {
1729 { .fno = gen_helper_gvec_vmle8, },
1730 { .fno = gen_helper_gvec_vmle16, },
1731 { .fno = gen_helper_gvec_vmle32, },
1733 static const GVecGen3 g_vmo[3] = {
1734 { .fno = gen_helper_gvec_vmo8, },
1735 { .fno = gen_helper_gvec_vmo16, },
1736 { .fno = gen_helper_gvec_vmo32, },
1738 static const GVecGen3 g_vmlo[3] = {
1739 { .fno = gen_helper_gvec_vmlo8, },
1740 { .fno = gen_helper_gvec_vmlo16, },
1741 { .fno = gen_helper_gvec_vmlo32, },
1746 gen_program_exception(s, PGM_SPECIFICATION);
1747 return DISAS_NORETURN;
1750 switch (s->fields.op2) {
1752 gen_gvec_fn_3(mul, es, get_field(s, v1),
1753 get_field(s, v2), get_field(s, v3));
1774 g_assert_not_reached();
1777 gen_gvec_3(get_field(s, v1), get_field(s, v2),
1778 get_field(s, v3), fn);
1782 static DisasJumpType op_vmsl(DisasContext *s, DisasOps *o)
1784 TCGv_i64 l1, h1, l2, h2;
1786 if (get_field(s, m4) != ES_64) {
1787 gen_program_exception(s, PGM_SPECIFICATION);
1788 return DISAS_NORETURN;
1791 l1 = tcg_temp_new_i64();
1792 h1 = tcg_temp_new_i64();
1793 l2 = tcg_temp_new_i64();
1794 h2 = tcg_temp_new_i64();
1796 /* Multipy both even elements from v2 and v3 */
1797 read_vec_element_i64(l1, get_field(s, v2), 0, ES_64);
1798 read_vec_element_i64(h1, get_field(s, v3), 0, ES_64);
1799 tcg_gen_mulu2_i64(l1, h1, l1, h1);
1800 /* Shift result left by one (x2) if requested */
1801 if (extract32(get_field(s, m6), 3, 1)) {
1802 tcg_gen_add2_i64(l1, h1, l1, h1, l1, h1);
1805 /* Multipy both odd elements from v2 and v3 */
1806 read_vec_element_i64(l2, get_field(s, v2), 1, ES_64);
1807 read_vec_element_i64(h2, get_field(s, v3), 1, ES_64);
1808 tcg_gen_mulu2_i64(l2, h2, l2, h2);
1809 /* Shift result left by one (x2) if requested */
1810 if (extract32(get_field(s, m6), 2, 1)) {
1811 tcg_gen_add2_i64(l2, h2, l2, h2, l2, h2);
1814 /* Add both intermediate results */
1815 tcg_gen_add2_i64(l1, h1, l1, h1, l2, h2);
1817 read_vec_element_i64(h2, get_field(s, v4), 0, ES_64);
1818 read_vec_element_i64(l2, get_field(s, v4), 1, ES_64);
1819 tcg_gen_add2_i64(l1, h1, l1, h1, l2, h2);
1821 /* Store final result into v1. */
1822 write_vec_element_i64(h1, get_field(s, v1), 0, ES_64);
1823 write_vec_element_i64(l1, get_field(s, v1), 1, ES_64);
1825 tcg_temp_free_i64(l1);
1826 tcg_temp_free_i64(h1);
1827 tcg_temp_free_i64(l2);
1828 tcg_temp_free_i64(h2);
1832 static DisasJumpType op_vnn(DisasContext *s, DisasOps *o)
1834 gen_gvec_fn_3(nand, ES_8, get_field(s, v1),
1835 get_field(s, v2), get_field(s, v3));
1839 static DisasJumpType op_vno(DisasContext *s, DisasOps *o)
1841 gen_gvec_fn_3(nor, ES_8, get_field(s, v1), get_field(s, v2),
1846 static DisasJumpType op_vnx(DisasContext *s, DisasOps *o)
1848 gen_gvec_fn_3(eqv, ES_8, get_field(s, v1), get_field(s, v2),
1853 static DisasJumpType op_vo(DisasContext *s, DisasOps *o)
1855 gen_gvec_fn_3(or, ES_8, get_field(s, v1), get_field(s, v2),
1860 static DisasJumpType op_voc(DisasContext *s, DisasOps *o)
1862 gen_gvec_fn_3(orc, ES_8, get_field(s, v1), get_field(s, v2),
1867 static DisasJumpType op_vpopct(DisasContext *s, DisasOps *o)
1869 const uint8_t es = get_field(s, m3);
1870 static const GVecGen2 g[4] = {
1871 { .fno = gen_helper_gvec_vpopct8, },
1872 { .fno = gen_helper_gvec_vpopct16, },
1873 { .fni4 = tcg_gen_ctpop_i32, },
1874 { .fni8 = tcg_gen_ctpop_i64, },
1877 if (es > ES_64 || (es != ES_8 && !s390_has_feat(S390_FEAT_VECTOR_ENH))) {
1878 gen_program_exception(s, PGM_SPECIFICATION);
1879 return DISAS_NORETURN;
1882 gen_gvec_2(get_field(s, v1), get_field(s, v2), &g[es]);
1886 static void gen_rim_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, int32_t c)
1888 TCGv_i32 t = tcg_temp_new_i32();
1890 tcg_gen_rotli_i32(t, a, c & 31);
1891 tcg_gen_and_i32(t, t, b);
1892 tcg_gen_andc_i32(d, d, b);
1893 tcg_gen_or_i32(d, d, t);
1895 tcg_temp_free_i32(t);
1898 static void gen_rim_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, int64_t c)
1900 TCGv_i64 t = tcg_temp_new_i64();
1902 tcg_gen_rotli_i64(t, a, c & 63);
1903 tcg_gen_and_i64(t, t, b);
1904 tcg_gen_andc_i64(d, d, b);
1905 tcg_gen_or_i64(d, d, t);
1907 tcg_temp_free_i64(t);
1910 static DisasJumpType op_verim(DisasContext *s, DisasOps *o)
1912 const uint8_t es = get_field(s, m5);
1913 const uint8_t i4 = get_field(s, i4) &
1914 (NUM_VEC_ELEMENT_BITS(es) - 1);
1915 static const GVecGen3i g[4] = {
1916 { .fno = gen_helper_gvec_verim8, },
1917 { .fno = gen_helper_gvec_verim16, },
1918 { .fni4 = gen_rim_i32,
1919 .load_dest = true, },
1920 { .fni8 = gen_rim_i64,
1921 .load_dest = true, },
1925 gen_program_exception(s, PGM_SPECIFICATION);
1926 return DISAS_NORETURN;
1929 gen_gvec_3i(get_field(s, v1), get_field(s, v2),
1930 get_field(s, v3), i4, &g[es]);
1934 static DisasJumpType op_vesv(DisasContext *s, DisasOps *o)
1936 const uint8_t es = get_field(s, m4);
1937 const uint8_t v1 = get_field(s, v1);
1938 const uint8_t v2 = get_field(s, v2);
1939 const uint8_t v3 = get_field(s, v3);
1942 gen_program_exception(s, PGM_SPECIFICATION);
1943 return DISAS_NORETURN;
1946 switch (s->fields.op2) {
1948 gen_gvec_fn_3(shlv, es, v1, v2, v3);
1951 gen_gvec_fn_3(rotlv, es, v1, v2, v3);
1954 gen_gvec_fn_3(sarv, es, v1, v2, v3);
1957 gen_gvec_fn_3(shrv, es, v1, v2, v3);
1960 g_assert_not_reached();
1965 static DisasJumpType op_ves(DisasContext *s, DisasOps *o)
1967 const uint8_t es = get_field(s, m4);
1968 const uint8_t d2 = get_field(s, d2) &
1969 (NUM_VEC_ELEMENT_BITS(es) - 1);
1970 const uint8_t v1 = get_field(s, v1);
1971 const uint8_t v3 = get_field(s, v3);
1975 gen_program_exception(s, PGM_SPECIFICATION);
1976 return DISAS_NORETURN;
1979 if (likely(!get_field(s, b2))) {
1980 switch (s->fields.op2) {
1982 gen_gvec_fn_2i(shli, es, v1, v3, d2);
1985 gen_gvec_fn_2i(rotli, es, v1, v3, d2);
1988 gen_gvec_fn_2i(sari, es, v1, v3, d2);
1991 gen_gvec_fn_2i(shri, es, v1, v3, d2);
1994 g_assert_not_reached();
1997 shift = tcg_temp_new_i32();
1998 tcg_gen_extrl_i64_i32(shift, o->addr1);
1999 tcg_gen_andi_i32(shift, shift, NUM_VEC_ELEMENT_BITS(es) - 1);
2000 switch (s->fields.op2) {
2002 gen_gvec_fn_2s(shls, es, v1, v3, shift);
2005 gen_gvec_fn_2s(rotls, es, v1, v3, shift);
2008 gen_gvec_fn_2s(sars, es, v1, v3, shift);
2011 gen_gvec_fn_2s(shrs, es, v1, v3, shift);
2014 g_assert_not_reached();
2016 tcg_temp_free_i32(shift);
2021 static DisasJumpType op_vsl(DisasContext *s, DisasOps *o)
2023 TCGv_i64 shift = tcg_temp_new_i64();
2025 read_vec_element_i64(shift, get_field(s, v3), 7, ES_8);
2026 if (s->fields.op2 == 0x74) {
2027 tcg_gen_andi_i64(shift, shift, 0x7);
2029 tcg_gen_andi_i64(shift, shift, 0x78);
2032 gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2),
2033 shift, 0, gen_helper_gvec_vsl);
2034 tcg_temp_free_i64(shift);
2038 static DisasJumpType op_vsldb(DisasContext *s, DisasOps *o)
2040 const uint8_t i4 = get_field(s, i4) & 0xf;
2041 const int left_shift = (i4 & 7) * 8;
2042 const int right_shift = 64 - left_shift;
2043 TCGv_i64 t0 = tcg_temp_new_i64();
2044 TCGv_i64 t1 = tcg_temp_new_i64();
2045 TCGv_i64 t2 = tcg_temp_new_i64();
2047 if ((i4 & 8) == 0) {
2048 read_vec_element_i64(t0, get_field(s, v2), 0, ES_64);
2049 read_vec_element_i64(t1, get_field(s, v2), 1, ES_64);
2050 read_vec_element_i64(t2, get_field(s, v3), 0, ES_64);
2052 read_vec_element_i64(t0, get_field(s, v2), 1, ES_64);
2053 read_vec_element_i64(t1, get_field(s, v3), 0, ES_64);
2054 read_vec_element_i64(t2, get_field(s, v3), 1, ES_64);
2056 tcg_gen_extract2_i64(t0, t1, t0, right_shift);
2057 tcg_gen_extract2_i64(t1, t2, t1, right_shift);
2058 write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
2059 write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
2067 static DisasJumpType op_vsra(DisasContext *s, DisasOps *o)
2069 TCGv_i64 shift = tcg_temp_new_i64();
2071 read_vec_element_i64(shift, get_field(s, v3), 7, ES_8);
2072 if (s->fields.op2 == 0x7e) {
2073 tcg_gen_andi_i64(shift, shift, 0x7);
2075 tcg_gen_andi_i64(shift, shift, 0x78);
2078 gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2),
2079 shift, 0, gen_helper_gvec_vsra);
2080 tcg_temp_free_i64(shift);
2084 static DisasJumpType op_vsrl(DisasContext *s, DisasOps *o)
2086 TCGv_i64 shift = tcg_temp_new_i64();
2088 read_vec_element_i64(shift, get_field(s, v3), 7, ES_8);
2089 if (s->fields.op2 == 0x7c) {
2090 tcg_gen_andi_i64(shift, shift, 0x7);
2092 tcg_gen_andi_i64(shift, shift, 0x78);
2095 gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2),
2096 shift, 0, gen_helper_gvec_vsrl);
2097 tcg_temp_free_i64(shift);
2101 static DisasJumpType op_vs(DisasContext *s, DisasOps *o)
2103 const uint8_t es = get_field(s, m4);
2106 gen_program_exception(s, PGM_SPECIFICATION);
2107 return DISAS_NORETURN;
2108 } else if (es == ES_128) {
2109 gen_gvec128_3_i64(tcg_gen_sub2_i64, get_field(s, v1),
2110 get_field(s, v2), get_field(s, v3));
2113 gen_gvec_fn_3(sub, es, get_field(s, v1), get_field(s, v2),
2118 static void gen_scbi_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
2120 tcg_gen_setcond_i32(TCG_COND_GEU, d, a, b);
2123 static void gen_scbi_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
2125 tcg_gen_setcond_i64(TCG_COND_GEU, d, a, b);
2128 static void gen_scbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
2129 TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh)
2131 TCGv_i64 th = tcg_temp_new_i64();
2132 TCGv_i64 tl = tcg_temp_new_i64();
2133 TCGv_i64 zero = tcg_const_i64(0);
2135 tcg_gen_sub2_i64(tl, th, al, zero, bl, zero);
2136 tcg_gen_andi_i64(th, th, 1);
2137 tcg_gen_sub2_i64(tl, th, ah, zero, th, zero);
2138 tcg_gen_sub2_i64(tl, th, tl, th, bh, zero);
2139 /* "invert" the result: -1 -> 0; 0 -> 1 */
2140 tcg_gen_addi_i64(dl, th, 1);
2141 tcg_gen_mov_i64(dh, zero);
2143 tcg_temp_free_i64(th);
2144 tcg_temp_free_i64(tl);
2145 tcg_temp_free_i64(zero);
2148 static DisasJumpType op_vscbi(DisasContext *s, DisasOps *o)
2150 const uint8_t es = get_field(s, m4);
2151 static const GVecGen3 g[4] = {
2152 { .fno = gen_helper_gvec_vscbi8, },
2153 { .fno = gen_helper_gvec_vscbi16, },
2154 { .fni4 = gen_scbi_i32, },
2155 { .fni8 = gen_scbi_i64, },
2159 gen_program_exception(s, PGM_SPECIFICATION);
2160 return DISAS_NORETURN;
2161 } else if (es == ES_128) {
2162 gen_gvec128_3_i64(gen_scbi2_i64, get_field(s, v1),
2163 get_field(s, v2), get_field(s, v3));
2166 gen_gvec_3(get_field(s, v1), get_field(s, v2),
2167 get_field(s, v3), &g[es]);
2171 static void gen_sbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
2172 TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch)
2174 TCGv_i64 tl = tcg_temp_new_i64();
2175 TCGv_i64 th = tcg_temp_new_i64();
2177 tcg_gen_not_i64(tl, bl);
2178 tcg_gen_not_i64(th, bh);
2179 gen_ac2_i64(dl, dh, al, ah, tl, th, cl, ch);
2180 tcg_temp_free_i64(tl);
2181 tcg_temp_free_i64(th);
2184 static DisasJumpType op_vsbi(DisasContext *s, DisasOps *o)
2186 if (get_field(s, m5) != ES_128) {
2187 gen_program_exception(s, PGM_SPECIFICATION);
2188 return DISAS_NORETURN;
2191 gen_gvec128_4_i64(gen_sbi2_i64, get_field(s, v1),
2192 get_field(s, v2), get_field(s, v3),
2197 static void gen_sbcbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
2198 TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch)
2200 TCGv_i64 th = tcg_temp_new_i64();
2201 TCGv_i64 tl = tcg_temp_new_i64();
2203 tcg_gen_not_i64(tl, bl);
2204 tcg_gen_not_i64(th, bh);
2205 gen_accc2_i64(dl, dh, al, ah, tl, th, cl, ch);
2207 tcg_temp_free_i64(tl);
2208 tcg_temp_free_i64(th);
2211 static DisasJumpType op_vsbcbi(DisasContext *s, DisasOps *o)
2213 if (get_field(s, m5) != ES_128) {
2214 gen_program_exception(s, PGM_SPECIFICATION);
2215 return DISAS_NORETURN;
2218 gen_gvec128_4_i64(gen_sbcbi2_i64, get_field(s, v1),
2219 get_field(s, v2), get_field(s, v3),
2224 static DisasJumpType op_vsumg(DisasContext *s, DisasOps *o)
2226 const uint8_t es = get_field(s, m4);
2230 if (es == ES_8 || es > ES_32) {
2231 gen_program_exception(s, PGM_SPECIFICATION);
2232 return DISAS_NORETURN;
2235 sum = tcg_temp_new_i64();
2236 tmp = tcg_temp_new_i64();
2237 for (dst_idx = 0; dst_idx < 2; dst_idx++) {
2238 uint8_t idx = dst_idx * NUM_VEC_ELEMENTS(es) / 2;
2239 const uint8_t max_idx = idx + NUM_VEC_ELEMENTS(es) / 2 - 1;
2241 read_vec_element_i64(sum, get_field(s, v3), max_idx, es);
2242 for (; idx <= max_idx; idx++) {
2243 read_vec_element_i64(tmp, get_field(s, v2), idx, es);
2244 tcg_gen_add_i64(sum, sum, tmp);
2246 write_vec_element_i64(sum, get_field(s, v1), dst_idx, ES_64);
2248 tcg_temp_free_i64(sum);
2249 tcg_temp_free_i64(tmp);
2253 static DisasJumpType op_vsumq(DisasContext *s, DisasOps *o)
2255 const uint8_t es = get_field(s, m4);
2256 const uint8_t max_idx = NUM_VEC_ELEMENTS(es) - 1;
2257 TCGv_i64 sumh, suml, zero, tmpl;
2260 if (es < ES_32 || es > ES_64) {
2261 gen_program_exception(s, PGM_SPECIFICATION);
2262 return DISAS_NORETURN;
2265 sumh = tcg_const_i64(0);
2266 suml = tcg_temp_new_i64();
2267 zero = tcg_const_i64(0);
2268 tmpl = tcg_temp_new_i64();
2270 read_vec_element_i64(suml, get_field(s, v3), max_idx, es);
2271 for (idx = 0; idx <= max_idx; idx++) {
2272 read_vec_element_i64(tmpl, get_field(s, v2), idx, es);
2273 tcg_gen_add2_i64(suml, sumh, suml, sumh, tmpl, zero);
2275 write_vec_element_i64(sumh, get_field(s, v1), 0, ES_64);
2276 write_vec_element_i64(suml, get_field(s, v1), 1, ES_64);
2278 tcg_temp_free_i64(sumh);
2279 tcg_temp_free_i64(suml);
2280 tcg_temp_free_i64(zero);
2281 tcg_temp_free_i64(tmpl);
2285 static DisasJumpType op_vsum(DisasContext *s, DisasOps *o)
2287 const uint8_t es = get_field(s, m4);
2292 gen_program_exception(s, PGM_SPECIFICATION);
2293 return DISAS_NORETURN;
2296 sum = tcg_temp_new_i32();
2297 tmp = tcg_temp_new_i32();
2298 for (dst_idx = 0; dst_idx < 4; dst_idx++) {
2299 uint8_t idx = dst_idx * NUM_VEC_ELEMENTS(es) / 4;
2300 const uint8_t max_idx = idx + NUM_VEC_ELEMENTS(es) / 4 - 1;
2302 read_vec_element_i32(sum, get_field(s, v3), max_idx, es);
2303 for (; idx <= max_idx; idx++) {
2304 read_vec_element_i32(tmp, get_field(s, v2), idx, es);
2305 tcg_gen_add_i32(sum, sum, tmp);
2307 write_vec_element_i32(sum, get_field(s, v1), dst_idx, ES_32);
2309 tcg_temp_free_i32(sum);
2310 tcg_temp_free_i32(tmp);
2314 static DisasJumpType op_vtm(DisasContext *s, DisasOps *o)
2316 gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2),
2317 cpu_env, 0, gen_helper_gvec_vtm);
2322 static DisasJumpType op_vfae(DisasContext *s, DisasOps *o)
2324 const uint8_t es = get_field(s, m4);
2325 const uint8_t m5 = get_field(s, m5);
2326 static gen_helper_gvec_3 * const g[3] = {
2327 gen_helper_gvec_vfae8,
2328 gen_helper_gvec_vfae16,
2329 gen_helper_gvec_vfae32,
2331 static gen_helper_gvec_3_ptr * const g_cc[3] = {
2332 gen_helper_gvec_vfae_cc8,
2333 gen_helper_gvec_vfae_cc16,
2334 gen_helper_gvec_vfae_cc32,
2337 gen_program_exception(s, PGM_SPECIFICATION);
2338 return DISAS_NORETURN;
2341 if (extract32(m5, 0, 1)) {
2342 gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2),
2343 get_field(s, v3), cpu_env, m5, g_cc[es]);
2346 gen_gvec_3_ool(get_field(s, v1), get_field(s, v2),
2347 get_field(s, v3), m5, g[es]);
2352 static DisasJumpType op_vfee(DisasContext *s, DisasOps *o)
2354 const uint8_t es = get_field(s, m4);
2355 const uint8_t m5 = get_field(s, m5);
2356 static gen_helper_gvec_3 * const g[3] = {
2357 gen_helper_gvec_vfee8,
2358 gen_helper_gvec_vfee16,
2359 gen_helper_gvec_vfee32,
2361 static gen_helper_gvec_3_ptr * const g_cc[3] = {
2362 gen_helper_gvec_vfee_cc8,
2363 gen_helper_gvec_vfee_cc16,
2364 gen_helper_gvec_vfee_cc32,
2367 if (es > ES_32 || m5 & ~0x3) {
2368 gen_program_exception(s, PGM_SPECIFICATION);
2369 return DISAS_NORETURN;
2372 if (extract32(m5, 0, 1)) {
2373 gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2),
2374 get_field(s, v3), cpu_env, m5, g_cc[es]);
2377 gen_gvec_3_ool(get_field(s, v1), get_field(s, v2),
2378 get_field(s, v3), m5, g[es]);
2383 static DisasJumpType op_vfene(DisasContext *s, DisasOps *o)
2385 const uint8_t es = get_field(s, m4);
2386 const uint8_t m5 = get_field(s, m5);
2387 static gen_helper_gvec_3 * const g[3] = {
2388 gen_helper_gvec_vfene8,
2389 gen_helper_gvec_vfene16,
2390 gen_helper_gvec_vfene32,
2392 static gen_helper_gvec_3_ptr * const g_cc[3] = {
2393 gen_helper_gvec_vfene_cc8,
2394 gen_helper_gvec_vfene_cc16,
2395 gen_helper_gvec_vfene_cc32,
2398 if (es > ES_32 || m5 & ~0x3) {
2399 gen_program_exception(s, PGM_SPECIFICATION);
2400 return DISAS_NORETURN;
2403 if (extract32(m5, 0, 1)) {
2404 gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2),
2405 get_field(s, v3), cpu_env, m5, g_cc[es]);
2408 gen_gvec_3_ool(get_field(s, v1), get_field(s, v2),
2409 get_field(s, v3), m5, g[es]);
2414 static DisasJumpType op_vistr(DisasContext *s, DisasOps *o)
2416 const uint8_t es = get_field(s, m4);
2417 const uint8_t m5 = get_field(s, m5);
2418 static gen_helper_gvec_2 * const g[3] = {
2419 gen_helper_gvec_vistr8,
2420 gen_helper_gvec_vistr16,
2421 gen_helper_gvec_vistr32,
2423 static gen_helper_gvec_2_ptr * const g_cc[3] = {
2424 gen_helper_gvec_vistr_cc8,
2425 gen_helper_gvec_vistr_cc16,
2426 gen_helper_gvec_vistr_cc32,
2429 if (es > ES_32 || m5 & ~0x1) {
2430 gen_program_exception(s, PGM_SPECIFICATION);
2431 return DISAS_NORETURN;
2434 if (extract32(m5, 0, 1)) {
2435 gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2),
2436 cpu_env, 0, g_cc[es]);
2439 gen_gvec_2_ool(get_field(s, v1), get_field(s, v2), 0,
2445 static DisasJumpType op_vstrc(DisasContext *s, DisasOps *o)
2447 const uint8_t es = get_field(s, m5);
2448 const uint8_t m6 = get_field(s, m6);
2449 static gen_helper_gvec_4 * const g[3] = {
2450 gen_helper_gvec_vstrc8,
2451 gen_helper_gvec_vstrc16,
2452 gen_helper_gvec_vstrc32,
2454 static gen_helper_gvec_4 * const g_rt[3] = {
2455 gen_helper_gvec_vstrc_rt8,
2456 gen_helper_gvec_vstrc_rt16,
2457 gen_helper_gvec_vstrc_rt32,
2459 static gen_helper_gvec_4_ptr * const g_cc[3] = {
2460 gen_helper_gvec_vstrc_cc8,
2461 gen_helper_gvec_vstrc_cc16,
2462 gen_helper_gvec_vstrc_cc32,
2464 static gen_helper_gvec_4_ptr * const g_cc_rt[3] = {
2465 gen_helper_gvec_vstrc_cc_rt8,
2466 gen_helper_gvec_vstrc_cc_rt16,
2467 gen_helper_gvec_vstrc_cc_rt32,
2471 gen_program_exception(s, PGM_SPECIFICATION);
2472 return DISAS_NORETURN;
2475 if (extract32(m6, 0, 1)) {
2476 if (extract32(m6, 2, 1)) {
2477 gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2),
2478 get_field(s, v3), get_field(s, v4),
2479 cpu_env, m6, g_cc_rt[es]);
2481 gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2),
2482 get_field(s, v3), get_field(s, v4),
2483 cpu_env, m6, g_cc[es]);
2487 if (extract32(m6, 2, 1)) {
2488 gen_gvec_4_ool(get_field(s, v1), get_field(s, v2),
2489 get_field(s, v3), get_field(s, v4),
2492 gen_gvec_4_ool(get_field(s, v1), get_field(s, v2),
2493 get_field(s, v3), get_field(s, v4),
2500 static DisasJumpType op_vfa(DisasContext *s, DisasOps *o)
2502 const uint8_t fpf = get_field(s, m4);
2503 const uint8_t m5 = get_field(s, m5);
2504 gen_helper_gvec_3_ptr *fn = NULL;
2506 switch (s->fields.op2) {
2510 if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2511 fn = gen_helper_gvec_vfa32;
2515 fn = gen_helper_gvec_vfa64;
2518 if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2519 fn = gen_helper_gvec_vfa128;
2529 if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2530 fn = gen_helper_gvec_vfd32;
2534 fn = gen_helper_gvec_vfd64;
2537 if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2538 fn = gen_helper_gvec_vfd128;
2548 if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2549 fn = gen_helper_gvec_vfm32;
2553 fn = gen_helper_gvec_vfm64;
2556 if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2557 fn = gen_helper_gvec_vfm128;
2567 if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2568 fn = gen_helper_gvec_vfs32;
2572 fn = gen_helper_gvec_vfs64;
2575 if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2576 fn = gen_helper_gvec_vfs128;
2584 g_assert_not_reached();
2587 if (!fn || extract32(m5, 0, 3)) {
2588 gen_program_exception(s, PGM_SPECIFICATION);
2589 return DISAS_NORETURN;
2592 gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2),
2593 get_field(s, v3), cpu_env, m5, fn);
2597 static DisasJumpType op_wfc(DisasContext *s, DisasOps *o)
2599 const uint8_t fpf = get_field(s, m3);
2600 const uint8_t m4 = get_field(s, m4);
2602 if (fpf != FPF_LONG || m4) {
2603 gen_program_exception(s, PGM_SPECIFICATION);
2604 return DISAS_NORETURN;
2607 if (s->fields.op2 == 0xcb) {
2608 gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2),
2609 cpu_env, 0, gen_helper_gvec_wfc64);
2611 gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2),
2612 cpu_env, 0, gen_helper_gvec_wfk64);
2618 static DisasJumpType op_vfc(DisasContext *s, DisasOps *o)
2620 const uint8_t fpf = get_field(s, m4);
2621 const uint8_t m5 = get_field(s, m5);
2622 const uint8_t m6 = get_field(s, m6);
2623 const bool cs = extract32(m6, 0, 1);
2624 const bool sq = extract32(m5, 2, 1);
2625 gen_helper_gvec_3_ptr *fn = NULL;
2627 switch (s->fields.op2) {
2631 fn = cs ? gen_helper_gvec_vfce32_cc : gen_helper_gvec_vfce32;
2634 fn = cs ? gen_helper_gvec_vfce64_cc : gen_helper_gvec_vfce64;
2637 fn = cs ? gen_helper_gvec_vfce128_cc : gen_helper_gvec_vfce128;
2646 fn = cs ? gen_helper_gvec_vfch32_cc : gen_helper_gvec_vfch32;
2649 fn = cs ? gen_helper_gvec_vfch64_cc : gen_helper_gvec_vfch64;
2652 fn = cs ? gen_helper_gvec_vfch128_cc : gen_helper_gvec_vfch128;
2661 fn = cs ? gen_helper_gvec_vfche32_cc : gen_helper_gvec_vfche32;
2664 fn = cs ? gen_helper_gvec_vfche64_cc : gen_helper_gvec_vfche64;
2667 fn = cs ? gen_helper_gvec_vfche128_cc : gen_helper_gvec_vfche128;
2674 g_assert_not_reached();
2677 if (!fn || extract32(m5, 0, 2) || extract32(m6, 1, 3) ||
2678 (!s390_has_feat(S390_FEAT_VECTOR_ENH) && (fpf != FPF_LONG || sq))) {
2679 gen_program_exception(s, PGM_SPECIFICATION);
2680 return DISAS_NORETURN;
2683 gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), get_field(s, v3),
2691 static DisasJumpType op_vcdg(DisasContext *s, DisasOps *o)
2693 const uint8_t fpf = get_field(s, m3);
2694 const uint8_t m4 = get_field(s, m4);
2695 const uint8_t erm = get_field(s, m5);
2696 gen_helper_gvec_2_ptr *fn = NULL;
2699 switch (s->fields.op2) {
2701 if (fpf == FPF_LONG) {
2702 fn = gen_helper_gvec_vcdg64;
2706 if (fpf == FPF_LONG) {
2707 fn = gen_helper_gvec_vcdlg64;
2711 if (fpf == FPF_LONG) {
2712 fn = gen_helper_gvec_vcgd64;
2716 if (fpf == FPF_LONG) {
2717 fn = gen_helper_gvec_vclgd64;
2723 if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2724 fn = gen_helper_gvec_vfi32;
2728 fn = gen_helper_gvec_vfi64;
2731 if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2732 fn = gen_helper_gvec_vfi128;
2740 if (fpf == FPF_LONG) {
2741 fn = gen_helper_gvec_vflr64;
2745 g_assert_not_reached();
2748 if (!fn || extract32(m4, 0, 2) || erm > 7 || erm == 2) {
2749 gen_program_exception(s, PGM_SPECIFICATION);
2750 return DISAS_NORETURN;
2753 gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env,
2754 deposit32(m4, 4, 4, erm), fn);
2758 static DisasJumpType op_vfll(DisasContext *s, DisasOps *o)
2760 const uint8_t fpf = get_field(s, m3);
2761 const uint8_t m4 = get_field(s, m4);
2763 if (fpf != FPF_SHORT || extract32(m4, 0, 3)) {
2764 gen_program_exception(s, PGM_SPECIFICATION);
2765 return DISAS_NORETURN;
2768 gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env,
2769 m4, gen_helper_gvec_vfll32);
2773 static DisasJumpType op_vfma(DisasContext *s, DisasOps *o)
2775 const uint8_t m5 = get_field(s, m5);
2776 const uint8_t fpf = get_field(s, m6);
2777 gen_helper_gvec_4_ptr *fn;
2779 if (fpf != FPF_LONG || extract32(m5, 0, 3)) {
2780 gen_program_exception(s, PGM_SPECIFICATION);
2781 return DISAS_NORETURN;
2784 if (s->fields.op2 == 0x8f) {
2785 fn = gen_helper_gvec_vfma64;
2787 fn = gen_helper_gvec_vfms64;
2789 gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2),
2790 get_field(s, v3), get_field(s, v4), cpu_env, m5, fn);
2794 static DisasJumpType op_vfpso(DisasContext *s, DisasOps *o)
2796 const uint8_t v1 = get_field(s, v1);
2797 const uint8_t v2 = get_field(s, v2);
2798 const uint8_t fpf = get_field(s, m3);
2799 const uint8_t m4 = get_field(s, m4);
2800 const uint8_t m5 = get_field(s, m5);
2803 if (fpf != FPF_LONG || extract32(m4, 0, 3) || m5 > 2) {
2804 gen_program_exception(s, PGM_SPECIFICATION);
2805 return DISAS_NORETURN;
2808 if (extract32(m4, 3, 1)) {
2809 tmp = tcg_temp_new_i64();
2810 read_vec_element_i64(tmp, v2, 0, ES_64);
2813 /* sign bit is inverted (complement) */
2814 tcg_gen_xori_i64(tmp, tmp, 1ull << 63);
2817 /* sign bit is set to one (negative) */
2818 tcg_gen_ori_i64(tmp, tmp, 1ull << 63);
2821 /* sign bit is set to zero (positive) */
2822 tcg_gen_andi_i64(tmp, tmp, (1ull << 63) - 1);
2825 write_vec_element_i64(tmp, v1, 0, ES_64);
2826 tcg_temp_free_i64(tmp);
2830 /* sign bit is inverted (complement) */
2831 gen_gvec_fn_2i(xori, ES_64, v1, v2, 1ull << 63);
2834 /* sign bit is set to one (negative) */
2835 gen_gvec_fn_2i(ori, ES_64, v1, v2, 1ull << 63);
2838 /* sign bit is set to zero (positive) */
2839 gen_gvec_fn_2i(andi, ES_64, v1, v2, (1ull << 63) - 1);
2846 static DisasJumpType op_vfsq(DisasContext *s, DisasOps *o)
2848 const uint8_t fpf = get_field(s, m3);
2849 const uint8_t m4 = get_field(s, m4);
2850 gen_helper_gvec_2_ptr *fn = NULL;
2854 if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2855 fn = gen_helper_gvec_vfsq32;
2859 fn = gen_helper_gvec_vfsq64;
2862 if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2863 fn = gen_helper_gvec_vfsq128;
2870 if (!fn || extract32(m4, 0, 3)) {
2871 gen_program_exception(s, PGM_SPECIFICATION);
2872 return DISAS_NORETURN;
2875 gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, m4, fn);
2879 static DisasJumpType op_vftci(DisasContext *s, DisasOps *o)
2881 const uint16_t i3 = get_field(s, i3);
2882 const uint8_t fpf = get_field(s, m4);
2883 const uint8_t m5 = get_field(s, m5);
2885 if (fpf != FPF_LONG || extract32(m5, 0, 3)) {
2886 gen_program_exception(s, PGM_SPECIFICATION);
2887 return DISAS_NORETURN;
2890 gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env,
2891 deposit32(m5, 4, 12, i3), gen_helper_gvec_vftci64);