4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
46 #include "exec/translator.h"
48 #include "qemu/atomic128.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
57 DisasContextBase base;
58 const DisasInsn *insn;
62 * During translate_one(), pc_tmp is used to determine the instruction
63 * to be executed after base.pc_next - e.g. next sequential instruction
72 /* Information carried about a condition to be evaluated. */
79 struct { TCGv_i64 a, b; } s64;
80 struct { TCGv_i32 a, b; } s32;
84 #ifdef DEBUG_INLINE_BRANCHES
85 static uint64_t inline_branch_hit[CC_OP_MAX];
86 static uint64_t inline_branch_miss[CC_OP_MAX];
89 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
93 if (s->base.tb->flags & FLAG_MASK_32) {
94 if (s->base.tb->flags & FLAG_MASK_64) {
95 tcg_gen_movi_i64(out, pc);
100 assert(!(s->base.tb->flags & FLAG_MASK_64));
101 tmp = tcg_const_i64(pc);
102 tcg_gen_deposit_i64(out, out, tmp, 0, 32);
103 tcg_temp_free_i64(tmp);
106 static TCGv_i64 psw_addr;
107 static TCGv_i64 psw_mask;
108 static TCGv_i64 gbea;
110 static TCGv_i32 cc_op;
111 static TCGv_i64 cc_src;
112 static TCGv_i64 cc_dst;
113 static TCGv_i64 cc_vr;
115 static char cpu_reg_names[16][4];
116 static TCGv_i64 regs[16];
118 void s390x_translate_init(void)
122 psw_addr = tcg_global_mem_new_i64(cpu_env,
123 offsetof(CPUS390XState, psw.addr),
125 psw_mask = tcg_global_mem_new_i64(cpu_env,
126 offsetof(CPUS390XState, psw.mask),
128 gbea = tcg_global_mem_new_i64(cpu_env,
129 offsetof(CPUS390XState, gbea),
132 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
134 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
136 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
138 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
141 for (i = 0; i < 16; i++) {
142 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
143 regs[i] = tcg_global_mem_new(cpu_env,
144 offsetof(CPUS390XState, regs[i]),
149 static inline int vec_full_reg_offset(uint8_t reg)
152 return offsetof(CPUS390XState, vregs[reg][0]);
155 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
157 /* Convert element size (es) - e.g. MO_8 - to bytes */
158 const uint8_t bytes = 1 << es;
159 int offs = enr * bytes;
162 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
163 * of the 16 byte vector, on both, little and big endian systems.
165 * Big Endian (target/possible host)
166 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
167 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
168 * W: [ 0][ 1] - [ 2][ 3]
171 * Little Endian (possible host)
172 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
173 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
174 * W: [ 1][ 0] - [ 3][ 2]
177 * For 16 byte elements, the two 8 byte halves will not form a host
178 * int128 if the host is little endian, since they're in the wrong order.
179 * Some operations (e.g. xor) do not care. For operations like addition,
180 * the two 8 byte elements have to be loaded separately. Let's force all
181 * 16 byte operations to handle it in a special way.
183 g_assert(es <= MO_64);
184 #ifndef HOST_WORDS_BIGENDIAN
187 return offs + vec_full_reg_offset(reg);
190 static inline int freg64_offset(uint8_t reg)
193 return vec_reg_offset(reg, 0, MO_64);
196 static inline int freg32_offset(uint8_t reg)
199 return vec_reg_offset(reg, 0, MO_32);
202 static TCGv_i64 load_reg(int reg)
204 TCGv_i64 r = tcg_temp_new_i64();
205 tcg_gen_mov_i64(r, regs[reg]);
209 static TCGv_i64 load_freg(int reg)
211 TCGv_i64 r = tcg_temp_new_i64();
213 tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
217 static TCGv_i64 load_freg32_i64(int reg)
219 TCGv_i64 r = tcg_temp_new_i64();
221 tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
225 static void store_reg(int reg, TCGv_i64 v)
227 tcg_gen_mov_i64(regs[reg], v);
230 static void store_freg(int reg, TCGv_i64 v)
232 tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
235 static void store_reg32_i64(int reg, TCGv_i64 v)
237 /* 32 bit register writes keep the upper half */
238 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
241 static void store_reg32h_i64(int reg, TCGv_i64 v)
243 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
246 static void store_freg32_i64(int reg, TCGv_i64 v)
248 tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
251 static void return_low128(TCGv_i64 dest)
253 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
256 static void update_psw_addr(DisasContext *s)
259 tcg_gen_movi_i64(psw_addr, s->base.pc_next);
262 static void per_branch(DisasContext *s, bool to_next)
264 #ifndef CONFIG_USER_ONLY
265 tcg_gen_movi_i64(gbea, s->base.pc_next);
267 if (s->base.tb->flags & FLAG_MASK_PER) {
268 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
269 gen_helper_per_branch(cpu_env, gbea, next_pc);
271 tcg_temp_free_i64(next_pc);
277 static void per_branch_cond(DisasContext *s, TCGCond cond,
278 TCGv_i64 arg1, TCGv_i64 arg2)
280 #ifndef CONFIG_USER_ONLY
281 if (s->base.tb->flags & FLAG_MASK_PER) {
282 TCGLabel *lab = gen_new_label();
283 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
285 tcg_gen_movi_i64(gbea, s->base.pc_next);
286 gen_helper_per_branch(cpu_env, gbea, psw_addr);
290 TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
291 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
292 tcg_temp_free_i64(pc);
297 static void per_breaking_event(DisasContext *s)
299 tcg_gen_movi_i64(gbea, s->base.pc_next);
302 static void update_cc_op(DisasContext *s)
304 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
305 tcg_gen_movi_i32(cc_op, s->cc_op);
309 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
311 return (uint64_t)cpu_lduw_code(env, pc);
314 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
316 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
319 static int get_mem_index(DisasContext *s)
321 #ifdef CONFIG_USER_ONLY
324 if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
328 switch (s->base.tb->flags & FLAG_MASK_ASC) {
329 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
330 return MMU_PRIMARY_IDX;
331 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
332 return MMU_SECONDARY_IDX;
333 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
342 static void gen_exception(int excp)
344 TCGv_i32 tmp = tcg_const_i32(excp);
345 gen_helper_exception(cpu_env, tmp);
346 tcg_temp_free_i32(tmp);
349 static void gen_program_exception(DisasContext *s, int code)
353 /* Remember what pgm exeption this was. */
354 tmp = tcg_const_i32(code);
355 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
356 tcg_temp_free_i32(tmp);
358 tmp = tcg_const_i32(s->ilen);
359 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
360 tcg_temp_free_i32(tmp);
368 /* Trigger exception. */
369 gen_exception(EXCP_PGM);
372 static inline void gen_illegal_opcode(DisasContext *s)
374 gen_program_exception(s, PGM_OPERATION);
377 static inline void gen_data_exception(uint8_t dxc)
379 TCGv_i32 tmp = tcg_const_i32(dxc);
380 gen_helper_data_exception(cpu_env, tmp);
381 tcg_temp_free_i32(tmp);
384 static inline void gen_trap(DisasContext *s)
386 /* Set DXC to 0xff */
387 gen_data_exception(0xff);
390 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
393 tcg_gen_addi_i64(dst, src, imm);
394 if (!(s->base.tb->flags & FLAG_MASK_64)) {
395 if (s->base.tb->flags & FLAG_MASK_32) {
396 tcg_gen_andi_i64(dst, dst, 0x7fffffff);
398 tcg_gen_andi_i64(dst, dst, 0x00ffffff);
403 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
405 TCGv_i64 tmp = tcg_temp_new_i64();
408 * Note that d2 is limited to 20 bits, signed. If we crop negative
409 * displacements early we create larger immedate addends.
412 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
413 gen_addi_and_wrap_i64(s, tmp, tmp, d2);
415 gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
417 gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
418 } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
419 if (s->base.tb->flags & FLAG_MASK_32) {
420 tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
422 tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
425 tcg_gen_movi_i64(tmp, d2);
431 static inline bool live_cc_data(DisasContext *s)
433 return (s->cc_op != CC_OP_DYNAMIC
434 && s->cc_op != CC_OP_STATIC
438 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
440 if (live_cc_data(s)) {
441 tcg_gen_discard_i64(cc_src);
442 tcg_gen_discard_i64(cc_dst);
443 tcg_gen_discard_i64(cc_vr);
445 s->cc_op = CC_OP_CONST0 + val;
448 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
450 if (live_cc_data(s)) {
451 tcg_gen_discard_i64(cc_src);
452 tcg_gen_discard_i64(cc_vr);
454 tcg_gen_mov_i64(cc_dst, dst);
458 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
461 if (live_cc_data(s)) {
462 tcg_gen_discard_i64(cc_vr);
464 tcg_gen_mov_i64(cc_src, src);
465 tcg_gen_mov_i64(cc_dst, dst);
469 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
470 TCGv_i64 dst, TCGv_i64 vr)
472 tcg_gen_mov_i64(cc_src, src);
473 tcg_gen_mov_i64(cc_dst, dst);
474 tcg_gen_mov_i64(cc_vr, vr);
478 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
480 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
483 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
485 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
488 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
490 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
493 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
495 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
498 /* CC value is in env->cc_op */
499 static void set_cc_static(DisasContext *s)
501 if (live_cc_data(s)) {
502 tcg_gen_discard_i64(cc_src);
503 tcg_gen_discard_i64(cc_dst);
504 tcg_gen_discard_i64(cc_vr);
506 s->cc_op = CC_OP_STATIC;
509 /* calculates cc into cc_op */
510 static void gen_op_calc_cc(DisasContext *s)
512 TCGv_i32 local_cc_op = NULL;
513 TCGv_i64 dummy = NULL;
517 dummy = tcg_const_i64(0);
531 local_cc_op = tcg_const_i32(s->cc_op);
547 /* s->cc_op is the cc value */
548 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
551 /* env->cc_op already is the cc value */
567 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
572 case CC_OP_LTUGTU_32:
573 case CC_OP_LTUGTU_64:
581 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
596 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
599 /* unknown operation - assume 3 arguments and cc_op in env */
600 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
607 tcg_temp_free_i32(local_cc_op);
610 tcg_temp_free_i64(dummy);
613 /* We now have cc in cc_op as constant */
617 static bool use_exit_tb(DisasContext *s)
619 return s->base.singlestep_enabled ||
620 (tb_cflags(s->base.tb) & CF_LAST_IO) ||
621 (s->base.tb->flags & FLAG_MASK_PER);
624 static bool use_goto_tb(DisasContext *s, uint64_t dest)
626 if (unlikely(use_exit_tb(s))) {
629 #ifndef CONFIG_USER_ONLY
630 return (dest & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) ||
631 (dest & TARGET_PAGE_MASK) == (s->base.pc_next & TARGET_PAGE_MASK);
637 static void account_noninline_branch(DisasContext *s, int cc_op)
639 #ifdef DEBUG_INLINE_BRANCHES
640 inline_branch_miss[cc_op]++;
644 static void account_inline_branch(DisasContext *s, int cc_op)
646 #ifdef DEBUG_INLINE_BRANCHES
647 inline_branch_hit[cc_op]++;
651 /* Table of mask values to comparison codes, given a comparison as input.
652 For such, CC=3 should not be possible. */
653 static const TCGCond ltgt_cond[16] = {
654 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
655 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
656 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
657 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
658 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
659 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
660 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
661 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
664 /* Table of mask values to comparison codes, given a logic op as input.
665 For such, only CC=0 and CC=1 should be possible. */
666 static const TCGCond nz_cond[16] = {
667 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
668 TCG_COND_NEVER, TCG_COND_NEVER,
669 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
670 TCG_COND_NE, TCG_COND_NE,
671 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
672 TCG_COND_EQ, TCG_COND_EQ,
673 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
674 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
677 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
678 details required to generate a TCG comparison. */
679 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
682 enum cc_op old_cc_op = s->cc_op;
684 if (mask == 15 || mask == 0) {
685 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
688 c->g1 = c->g2 = true;
693 /* Find the TCG condition for the mask + cc op. */
699 cond = ltgt_cond[mask];
700 if (cond == TCG_COND_NEVER) {
703 account_inline_branch(s, old_cc_op);
706 case CC_OP_LTUGTU_32:
707 case CC_OP_LTUGTU_64:
708 cond = tcg_unsigned_cond(ltgt_cond[mask]);
709 if (cond == TCG_COND_NEVER) {
712 account_inline_branch(s, old_cc_op);
716 cond = nz_cond[mask];
717 if (cond == TCG_COND_NEVER) {
720 account_inline_branch(s, old_cc_op);
735 account_inline_branch(s, old_cc_op);
750 account_inline_branch(s, old_cc_op);
754 switch (mask & 0xa) {
755 case 8: /* src == 0 -> no one bit found */
758 case 2: /* src != 0 -> one bit found */
764 account_inline_branch(s, old_cc_op);
770 case 8 | 2: /* vr == 0 */
773 case 4 | 1: /* vr != 0 */
776 case 8 | 4: /* no carry -> vr >= src */
779 case 2 | 1: /* carry -> vr < src */
785 account_inline_branch(s, old_cc_op);
790 /* Note that CC=0 is impossible; treat it as dont-care. */
792 case 2: /* zero -> op1 == op2 */
795 case 4 | 1: /* !zero -> op1 != op2 */
798 case 4: /* borrow (!carry) -> op1 < op2 */
801 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
807 account_inline_branch(s, old_cc_op);
812 /* Calculate cc value. */
817 /* Jump based on CC. We'll load up the real cond below;
818 the assignment here merely avoids a compiler warning. */
819 account_noninline_branch(s, old_cc_op);
820 old_cc_op = CC_OP_STATIC;
821 cond = TCG_COND_NEVER;
825 /* Load up the arguments of the comparison. */
827 c->g1 = c->g2 = false;
831 c->u.s32.a = tcg_temp_new_i32();
832 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
833 c->u.s32.b = tcg_const_i32(0);
836 case CC_OP_LTUGTU_32:
839 c->u.s32.a = tcg_temp_new_i32();
840 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
841 c->u.s32.b = tcg_temp_new_i32();
842 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
849 c->u.s64.b = tcg_const_i64(0);
853 case CC_OP_LTUGTU_64:
857 c->g1 = c->g2 = true;
863 c->u.s64.a = tcg_temp_new_i64();
864 c->u.s64.b = tcg_const_i64(0);
865 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
870 c->u.s32.a = tcg_temp_new_i32();
871 c->u.s32.b = tcg_temp_new_i32();
872 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
873 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
874 tcg_gen_movi_i32(c->u.s32.b, 0);
876 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
883 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
884 c->u.s64.b = tcg_const_i64(0);
896 case 0x8 | 0x4 | 0x2: /* cc != 3 */
898 c->u.s32.b = tcg_const_i32(3);
900 case 0x8 | 0x4 | 0x1: /* cc != 2 */
902 c->u.s32.b = tcg_const_i32(2);
904 case 0x8 | 0x2 | 0x1: /* cc != 1 */
906 c->u.s32.b = tcg_const_i32(1);
908 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
911 c->u.s32.a = tcg_temp_new_i32();
912 c->u.s32.b = tcg_const_i32(0);
913 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
915 case 0x8 | 0x4: /* cc < 2 */
917 c->u.s32.b = tcg_const_i32(2);
919 case 0x8: /* cc == 0 */
921 c->u.s32.b = tcg_const_i32(0);
923 case 0x4 | 0x2 | 0x1: /* cc != 0 */
925 c->u.s32.b = tcg_const_i32(0);
927 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
930 c->u.s32.a = tcg_temp_new_i32();
931 c->u.s32.b = tcg_const_i32(0);
932 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
934 case 0x4: /* cc == 1 */
936 c->u.s32.b = tcg_const_i32(1);
938 case 0x2 | 0x1: /* cc > 1 */
940 c->u.s32.b = tcg_const_i32(1);
942 case 0x2: /* cc == 2 */
944 c->u.s32.b = tcg_const_i32(2);
946 case 0x1: /* cc == 3 */
948 c->u.s32.b = tcg_const_i32(3);
951 /* CC is masked by something else: (8 >> cc) & mask. */
954 c->u.s32.a = tcg_const_i32(8);
955 c->u.s32.b = tcg_const_i32(0);
956 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
957 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
968 static void free_compare(DisasCompare *c)
972 tcg_temp_free_i64(c->u.s64.a);
974 tcg_temp_free_i32(c->u.s32.a);
979 tcg_temp_free_i64(c->u.s64.b);
981 tcg_temp_free_i32(c->u.s32.b);
986 /* ====================================================================== */
987 /* Define the insn format enumeration. */
988 #define F0(N) FMT_##N,
989 #define F1(N, X1) F0(N)
990 #define F2(N, X1, X2) F0(N)
991 #define F3(N, X1, X2, X3) F0(N)
992 #define F4(N, X1, X2, X3, X4) F0(N)
993 #define F5(N, X1, X2, X3, X4, X5) F0(N)
994 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
997 #include "insn-format.def"
1008 /* Define a structure to hold the decoded fields. We'll store each inside
1009 an array indexed by an enum. In order to conserve memory, we'll arrange
1010 for fields that do not exist at the same time to overlap, thus the "C"
1011 for compact. For checking purposes there is an "O" for original index
1012 as well that will be applied to availability bitmaps. */
1014 enum DisasFieldIndexO {
1043 enum DisasFieldIndexC {
1080 struct DisasFields {
1084 unsigned presentC:16;
1085 unsigned int presentO;
1089 /* This is the way fields are to be accessed out of DisasFields. */
1090 #define have_field(S, F) have_field1((S), FLD_O_##F)
1091 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1093 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1095 return (f->presentO >> c) & 1;
1098 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1099 enum DisasFieldIndexC c)
1101 assert(have_field1(f, o));
1105 /* Describe the layout of each field in each format. */
1106 typedef struct DisasField {
1108 unsigned int size:8;
1109 unsigned int type:2;
1110 unsigned int indexC:6;
1111 enum DisasFieldIndexO indexO:8;
1114 typedef struct DisasFormatInfo {
1115 DisasField op[NUM_C_FIELD];
1118 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1119 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1120 #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N }
1121 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1122 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1123 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1124 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1125 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1126 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1127 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1128 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1129 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1130 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1131 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1132 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1134 #define F0(N) { { } },
1135 #define F1(N, X1) { { X1 } },
1136 #define F2(N, X1, X2) { { X1, X2 } },
1137 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1138 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1139 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1140 #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } },
1142 static const DisasFormatInfo format_info[] = {
1143 #include "insn-format.def"
1163 /* Generally, we'll extract operands into this structures, operate upon
1164 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1165 of routines below for more details. */
1167 bool g_out, g_out2, g_in1, g_in2;
1168 TCGv_i64 out, out2, in1, in2;
1172 /* Instructions can place constraints on their operands, raising specification
1173 exceptions if they are violated. To make this easy to automate, each "in1",
1174 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1175 of the following, or 0. To make this easy to document, we'll put the
1176 SPEC_<name> defines next to <name>. */
1178 #define SPEC_r1_even 1
1179 #define SPEC_r2_even 2
1180 #define SPEC_r3_even 4
1181 #define SPEC_r1_f128 8
1182 #define SPEC_r2_f128 16
1184 /* Return values from translate_one, indicating the state of the TB. */
1186 /* We are not using a goto_tb (for whatever reason), but have updated
1187 the PC (for whatever reason), so there's no need to do it again on
1189 #define DISAS_PC_UPDATED DISAS_TARGET_0
1191 /* We have emitted one or more goto_tb. No fixup required. */
1192 #define DISAS_GOTO_TB DISAS_TARGET_1
1194 /* We have updated the PC and CC values. */
1195 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1197 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1198 updated the PC for the next instruction to be executed. */
1199 #define DISAS_PC_STALE DISAS_TARGET_3
1201 /* We are exiting the TB to the main loop. */
1202 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1205 /* Instruction flags */
1206 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1207 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1208 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1209 #define IF_BFP 0x0008 /* binary floating point instruction */
1210 #define IF_DFP 0x0010 /* decimal floating point instruction */
1211 #define IF_PRIV 0x0020 /* privileged instruction */
1212 #define IF_VEC 0x0040 /* vector instruction */
1223 /* Pre-process arguments before HELP_OP. */
1224 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1225 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1226 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1229 * Post-process output after HELP_OP.
1230 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1232 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1233 void (*help_cout)(DisasContext *, DisasOps *);
1235 /* Implement the operation itself. */
1236 DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1241 /* ====================================================================== */
1242 /* Miscellaneous helpers, used by several operations. */
1244 static void help_l2_shift(DisasContext *s, DisasFields *f,
1245 DisasOps *o, int mask)
1247 int b2 = get_field(f, b2);
1248 int d2 = get_field(f, d2);
1251 o->in2 = tcg_const_i64(d2 & mask);
1253 o->in2 = get_address(s, 0, b2, d2);
1254 tcg_gen_andi_i64(o->in2, o->in2, mask);
1258 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1260 if (dest == s->pc_tmp) {
1261 per_branch(s, true);
1264 if (use_goto_tb(s, dest)) {
1266 per_breaking_event(s);
1268 tcg_gen_movi_i64(psw_addr, dest);
1269 tcg_gen_exit_tb(s->base.tb, 0);
1270 return DISAS_GOTO_TB;
1272 tcg_gen_movi_i64(psw_addr, dest);
1273 per_branch(s, false);
1274 return DISAS_PC_UPDATED;
1278 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1279 bool is_imm, int imm, TCGv_i64 cdest)
1282 uint64_t dest = s->base.pc_next + 2 * imm;
1285 /* Take care of the special cases first. */
1286 if (c->cond == TCG_COND_NEVER) {
1291 if (dest == s->pc_tmp) {
1292 /* Branch to next. */
1293 per_branch(s, true);
1297 if (c->cond == TCG_COND_ALWAYS) {
1298 ret = help_goto_direct(s, dest);
1303 /* E.g. bcr %r0 -> no branch. */
1307 if (c->cond == TCG_COND_ALWAYS) {
1308 tcg_gen_mov_i64(psw_addr, cdest);
1309 per_branch(s, false);
1310 ret = DISAS_PC_UPDATED;
1315 if (use_goto_tb(s, s->pc_tmp)) {
1316 if (is_imm && use_goto_tb(s, dest)) {
1317 /* Both exits can use goto_tb. */
1320 lab = gen_new_label();
1322 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1324 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1327 /* Branch not taken. */
1329 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1330 tcg_gen_exit_tb(s->base.tb, 0);
1334 per_breaking_event(s);
1336 tcg_gen_movi_i64(psw_addr, dest);
1337 tcg_gen_exit_tb(s->base.tb, 1);
1339 ret = DISAS_GOTO_TB;
1341 /* Fallthru can use goto_tb, but taken branch cannot. */
1342 /* Store taken branch destination before the brcond. This
1343 avoids having to allocate a new local temp to hold it.
1344 We'll overwrite this in the not taken case anyway. */
1346 tcg_gen_mov_i64(psw_addr, cdest);
1349 lab = gen_new_label();
1351 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1353 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1356 /* Branch not taken. */
1359 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1360 tcg_gen_exit_tb(s->base.tb, 0);
1364 tcg_gen_movi_i64(psw_addr, dest);
1366 per_breaking_event(s);
1367 ret = DISAS_PC_UPDATED;
1370 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1371 Most commonly we're single-stepping or some other condition that
1372 disables all use of goto_tb. Just update the PC and exit. */
1374 TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1376 cdest = tcg_const_i64(dest);
1380 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1382 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1384 TCGv_i32 t0 = tcg_temp_new_i32();
1385 TCGv_i64 t1 = tcg_temp_new_i64();
1386 TCGv_i64 z = tcg_const_i64(0);
1387 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1388 tcg_gen_extu_i32_i64(t1, t0);
1389 tcg_temp_free_i32(t0);
1390 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1391 per_branch_cond(s, TCG_COND_NE, t1, z);
1392 tcg_temp_free_i64(t1);
1393 tcg_temp_free_i64(z);
1397 tcg_temp_free_i64(cdest);
1399 tcg_temp_free_i64(next);
1401 ret = DISAS_PC_UPDATED;
1409 /* ====================================================================== */
1410 /* The operations. These perform the bulk of the work for any insn,
1411 usually after the operands have been loaded and output initialized. */
1413 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1415 tcg_gen_abs_i64(o->out, o->in2);
1419 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1421 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1425 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1427 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1431 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1433 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1434 tcg_gen_mov_i64(o->out2, o->in2);
1438 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1440 tcg_gen_add_i64(o->out, o->in1, o->in2);
1444 static DisasJumpType op_addc(DisasContext *s, DisasOps *o)
1449 tcg_gen_add_i64(o->out, o->in1, o->in2);
1451 /* The carry flag is the msb of CC, therefore the branch mask that would
1452 create that comparison is 3. Feeding the generated comparison to
1453 setcond produces the carry flag that we desire. */
1454 disas_jcc(s, &cmp, 3);
1455 carry = tcg_temp_new_i64();
1457 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1459 TCGv_i32 t = tcg_temp_new_i32();
1460 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1461 tcg_gen_extu_i32_i64(carry, t);
1462 tcg_temp_free_i32(t);
1466 tcg_gen_add_i64(o->out, o->out, carry);
1467 tcg_temp_free_i64(carry);
1471 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1473 o->in1 = tcg_temp_new_i64();
1475 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1476 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1478 /* Perform the atomic addition in memory. */
1479 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1483 /* Recompute also for atomic case: needed for setting CC. */
1484 tcg_gen_add_i64(o->out, o->in1, o->in2);
1486 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1487 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1492 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1494 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1498 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1500 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1504 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1506 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1507 return_low128(o->out2);
1511 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1513 tcg_gen_and_i64(o->out, o->in1, o->in2);
1517 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1519 int shift = s->insn->data & 0xff;
1520 int size = s->insn->data >> 8;
1521 uint64_t mask = ((1ull << size) - 1) << shift;
1524 tcg_gen_shli_i64(o->in2, o->in2, shift);
1525 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1526 tcg_gen_and_i64(o->out, o->in1, o->in2);
1528 /* Produce the CC from only the bits manipulated. */
1529 tcg_gen_andi_i64(cc_dst, o->out, mask);
1530 set_cc_nz_u64(s, cc_dst);
1534 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1536 o->in1 = tcg_temp_new_i64();
1538 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1539 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1541 /* Perform the atomic operation in memory. */
1542 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1546 /* Recompute also for atomic case: needed for setting CC. */
1547 tcg_gen_and_i64(o->out, o->in1, o->in2);
1549 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1550 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1555 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1557 pc_to_link_info(o->out, s, s->pc_tmp);
1559 tcg_gen_mov_i64(psw_addr, o->in2);
1560 per_branch(s, false);
1561 return DISAS_PC_UPDATED;
1567 static void save_link_info(DisasContext *s, DisasOps *o)
1571 if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1572 pc_to_link_info(o->out, s, s->pc_tmp);
1576 tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1577 tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1578 t = tcg_temp_new_i64();
1579 tcg_gen_shri_i64(t, psw_mask, 16);
1580 tcg_gen_andi_i64(t, t, 0x0f000000);
1581 tcg_gen_or_i64(o->out, o->out, t);
1582 tcg_gen_extu_i32_i64(t, cc_op);
1583 tcg_gen_shli_i64(t, t, 28);
1584 tcg_gen_or_i64(o->out, o->out, t);
1585 tcg_temp_free_i64(t);
1588 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1590 save_link_info(s, o);
1592 tcg_gen_mov_i64(psw_addr, o->in2);
1593 per_branch(s, false);
1594 return DISAS_PC_UPDATED;
1600 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1602 pc_to_link_info(o->out, s, s->pc_tmp);
1603 return help_goto_direct(s, s->base.pc_next + 2 * get_field(s->fields, i2));
1606 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1608 int m1 = get_field(s->fields, m1);
1609 bool is_imm = have_field(s->fields, i2);
1610 int imm = is_imm ? get_field(s->fields, i2) : 0;
1613 /* BCR with R2 = 0 causes no branching */
1614 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1616 /* Perform serialization */
1617 /* FIXME: check for fast-BCR-serialization facility */
1618 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1621 /* Perform serialization */
1622 /* FIXME: perform checkpoint-synchronisation */
1623 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1628 disas_jcc(s, &c, m1);
1629 return help_branch(s, &c, is_imm, imm, o->in2);
1632 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1634 int r1 = get_field(s->fields, r1);
1635 bool is_imm = have_field(s->fields, i2);
1636 int imm = is_imm ? get_field(s->fields, i2) : 0;
1640 c.cond = TCG_COND_NE;
1645 t = tcg_temp_new_i64();
1646 tcg_gen_subi_i64(t, regs[r1], 1);
1647 store_reg32_i64(r1, t);
1648 c.u.s32.a = tcg_temp_new_i32();
1649 c.u.s32.b = tcg_const_i32(0);
1650 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1651 tcg_temp_free_i64(t);
1653 return help_branch(s, &c, is_imm, imm, o->in2);
1656 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1658 int r1 = get_field(s->fields, r1);
1659 int imm = get_field(s->fields, i2);
1663 c.cond = TCG_COND_NE;
1668 t = tcg_temp_new_i64();
1669 tcg_gen_shri_i64(t, regs[r1], 32);
1670 tcg_gen_subi_i64(t, t, 1);
1671 store_reg32h_i64(r1, t);
1672 c.u.s32.a = tcg_temp_new_i32();
1673 c.u.s32.b = tcg_const_i32(0);
1674 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1675 tcg_temp_free_i64(t);
1677 return help_branch(s, &c, 1, imm, o->in2);
1680 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1682 int r1 = get_field(s->fields, r1);
1683 bool is_imm = have_field(s->fields, i2);
1684 int imm = is_imm ? get_field(s->fields, i2) : 0;
1687 c.cond = TCG_COND_NE;
1692 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1693 c.u.s64.a = regs[r1];
1694 c.u.s64.b = tcg_const_i64(0);
1696 return help_branch(s, &c, is_imm, imm, o->in2);
1699 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1701 int r1 = get_field(s->fields, r1);
1702 int r3 = get_field(s->fields, r3);
1703 bool is_imm = have_field(s->fields, i2);
1704 int imm = is_imm ? get_field(s->fields, i2) : 0;
1708 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1713 t = tcg_temp_new_i64();
1714 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1715 c.u.s32.a = tcg_temp_new_i32();
1716 c.u.s32.b = tcg_temp_new_i32();
1717 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1718 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1719 store_reg32_i64(r1, t);
1720 tcg_temp_free_i64(t);
1722 return help_branch(s, &c, is_imm, imm, o->in2);
1725 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1727 int r1 = get_field(s->fields, r1);
1728 int r3 = get_field(s->fields, r3);
1729 bool is_imm = have_field(s->fields, i2);
1730 int imm = is_imm ? get_field(s->fields, i2) : 0;
1733 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1736 if (r1 == (r3 | 1)) {
1737 c.u.s64.b = load_reg(r3 | 1);
1740 c.u.s64.b = regs[r3 | 1];
1744 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1745 c.u.s64.a = regs[r1];
1748 return help_branch(s, &c, is_imm, imm, o->in2);
1751 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1753 int imm, m3 = get_field(s->fields, m3);
1757 c.cond = ltgt_cond[m3];
1758 if (s->insn->data) {
1759 c.cond = tcg_unsigned_cond(c.cond);
1761 c.is_64 = c.g1 = c.g2 = true;
1765 is_imm = have_field(s->fields, i4);
1767 imm = get_field(s->fields, i4);
1770 o->out = get_address(s, 0, get_field(s->fields, b4),
1771 get_field(s->fields, d4));
1774 return help_branch(s, &c, is_imm, imm, o->out);
1777 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1779 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1784 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1786 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1791 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1793 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1798 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1801 const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1802 uint8_t m3 = get_field(s->fields, m3);
1803 uint8_t m4 = get_field(s->fields, m4);
1805 /* m3 field was introduced with FPE */
1806 if (!fpe && m3_with_fpe) {
1809 /* m4 field was introduced with FPE */
1810 if (!fpe && m4_with_fpe) {
1814 /* Check for valid rounding modes. Mode 3 was introduced later. */
1815 if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1816 gen_program_exception(s, PGM_SPECIFICATION);
1820 return tcg_const_i32(deposit32(m3, 4, 4, m4));
1823 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1825 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1828 return DISAS_NORETURN;
1830 gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1831 tcg_temp_free_i32(m34);
1832 gen_set_cc_nz_f32(s, o->in2);
1836 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1838 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1841 return DISAS_NORETURN;
1843 gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1844 tcg_temp_free_i32(m34);
1845 gen_set_cc_nz_f64(s, o->in2);
1849 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1851 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1854 return DISAS_NORETURN;
1856 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34);
1857 tcg_temp_free_i32(m34);
1858 gen_set_cc_nz_f128(s, o->in1, o->in2);
1862 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1864 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1867 return DISAS_NORETURN;
1869 gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1870 tcg_temp_free_i32(m34);
1871 gen_set_cc_nz_f32(s, o->in2);
1875 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1877 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1880 return DISAS_NORETURN;
1882 gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1883 tcg_temp_free_i32(m34);
1884 gen_set_cc_nz_f64(s, o->in2);
1888 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1890 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1893 return DISAS_NORETURN;
1895 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34);
1896 tcg_temp_free_i32(m34);
1897 gen_set_cc_nz_f128(s, o->in1, o->in2);
1901 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1903 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1906 return DISAS_NORETURN;
1908 gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1909 tcg_temp_free_i32(m34);
1910 gen_set_cc_nz_f32(s, o->in2);
1914 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1916 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1919 return DISAS_NORETURN;
1921 gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1922 tcg_temp_free_i32(m34);
1923 gen_set_cc_nz_f64(s, o->in2);
1927 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1929 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1932 return DISAS_NORETURN;
1934 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34);
1935 tcg_temp_free_i32(m34);
1936 gen_set_cc_nz_f128(s, o->in1, o->in2);
1940 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1942 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1945 return DISAS_NORETURN;
1947 gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1948 tcg_temp_free_i32(m34);
1949 gen_set_cc_nz_f32(s, o->in2);
1953 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1955 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1958 return DISAS_NORETURN;
1960 gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1961 tcg_temp_free_i32(m34);
1962 gen_set_cc_nz_f64(s, o->in2);
1966 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1968 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1971 return DISAS_NORETURN;
1973 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34);
1974 tcg_temp_free_i32(m34);
1975 gen_set_cc_nz_f128(s, o->in1, o->in2);
1979 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1981 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1984 return DISAS_NORETURN;
1986 gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1987 tcg_temp_free_i32(m34);
1991 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1993 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1996 return DISAS_NORETURN;
1998 gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
1999 tcg_temp_free_i32(m34);
2003 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
2005 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2008 return DISAS_NORETURN;
2010 gen_helper_cxgb(o->out, cpu_env, o->in2, m34);
2011 tcg_temp_free_i32(m34);
2012 return_low128(o->out2);
2016 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
2018 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2021 return DISAS_NORETURN;
2023 gen_helper_celgb(o->out, cpu_env, o->in2, m34);
2024 tcg_temp_free_i32(m34);
2028 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
2030 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2033 return DISAS_NORETURN;
2035 gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
2036 tcg_temp_free_i32(m34);
2040 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
2042 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2045 return DISAS_NORETURN;
2047 gen_helper_cxlgb(o->out, cpu_env, o->in2, m34);
2048 tcg_temp_free_i32(m34);
2049 return_low128(o->out2);
2053 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
2055 int r2 = get_field(s->fields, r2);
2056 TCGv_i64 len = tcg_temp_new_i64();
2058 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2060 return_low128(o->out);
2062 tcg_gen_add_i64(regs[r2], regs[r2], len);
2063 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2064 tcg_temp_free_i64(len);
2069 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2071 int l = get_field(s->fields, l1);
2076 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2077 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2080 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2081 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2084 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2085 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2088 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2089 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2092 vl = tcg_const_i32(l);
2093 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2094 tcg_temp_free_i32(vl);
2098 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2102 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2104 int r1 = get_field(s->fields, r1);
2105 int r2 = get_field(s->fields, r2);
2108 /* r1 and r2 must be even. */
2109 if (r1 & 1 || r2 & 1) {
2110 gen_program_exception(s, PGM_SPECIFICATION);
2111 return DISAS_NORETURN;
2114 t1 = tcg_const_i32(r1);
2115 t2 = tcg_const_i32(r2);
2116 gen_helper_clcl(cc_op, cpu_env, t1, t2);
2117 tcg_temp_free_i32(t1);
2118 tcg_temp_free_i32(t2);
2123 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2125 int r1 = get_field(s->fields, r1);
2126 int r3 = get_field(s->fields, r3);
2129 /* r1 and r3 must be even. */
2130 if (r1 & 1 || r3 & 1) {
2131 gen_program_exception(s, PGM_SPECIFICATION);
2132 return DISAS_NORETURN;
2135 t1 = tcg_const_i32(r1);
2136 t3 = tcg_const_i32(r3);
2137 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2138 tcg_temp_free_i32(t1);
2139 tcg_temp_free_i32(t3);
2144 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2146 int r1 = get_field(s->fields, r1);
2147 int r3 = get_field(s->fields, r3);
2150 /* r1 and r3 must be even. */
2151 if (r1 & 1 || r3 & 1) {
2152 gen_program_exception(s, PGM_SPECIFICATION);
2153 return DISAS_NORETURN;
2156 t1 = tcg_const_i32(r1);
2157 t3 = tcg_const_i32(r3);
2158 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2159 tcg_temp_free_i32(t1);
2160 tcg_temp_free_i32(t3);
2165 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2167 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2168 TCGv_i32 t1 = tcg_temp_new_i32();
2169 tcg_gen_extrl_i64_i32(t1, o->in1);
2170 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2172 tcg_temp_free_i32(t1);
2173 tcg_temp_free_i32(m3);
2177 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2179 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2181 return_low128(o->in2);
2185 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2187 TCGv_i64 t = tcg_temp_new_i64();
2188 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2189 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2190 tcg_gen_or_i64(o->out, o->out, t);
2191 tcg_temp_free_i64(t);
2195 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2197 int d2 = get_field(s->fields, d2);
2198 int b2 = get_field(s->fields, b2);
2201 /* Note that in1 = R3 (new value) and
2202 in2 = (zero-extended) R1 (expected value). */
2204 addr = get_address(s, 0, b2, d2);
2205 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2206 get_mem_index(s), s->insn->data | MO_ALIGN);
2207 tcg_temp_free_i64(addr);
2209 /* Are the memory and expected values (un)equal? Note that this setcond
2210 produces the output CC value, thus the NE sense of the test. */
2211 cc = tcg_temp_new_i64();
2212 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2213 tcg_gen_extrl_i64_i32(cc_op, cc);
2214 tcg_temp_free_i64(cc);
2220 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2222 int r1 = get_field(s->fields, r1);
2223 int r3 = get_field(s->fields, r3);
2224 int d2 = get_field(s->fields, d2);
2225 int b2 = get_field(s->fields, b2);
2226 DisasJumpType ret = DISAS_NEXT;
2228 TCGv_i32 t_r1, t_r3;
2230 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2231 addr = get_address(s, 0, b2, d2);
2232 t_r1 = tcg_const_i32(r1);
2233 t_r3 = tcg_const_i32(r3);
2234 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2235 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2236 } else if (HAVE_CMPXCHG128) {
2237 gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2239 gen_helper_exit_atomic(cpu_env);
2240 ret = DISAS_NORETURN;
2242 tcg_temp_free_i64(addr);
2243 tcg_temp_free_i32(t_r1);
2244 tcg_temp_free_i32(t_r3);
2250 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2252 int r3 = get_field(s->fields, r3);
2253 TCGv_i32 t_r3 = tcg_const_i32(r3);
2255 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2256 gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2258 gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2260 tcg_temp_free_i32(t_r3);
2266 #ifndef CONFIG_USER_ONLY
2267 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2269 MemOp mop = s->insn->data;
2270 TCGv_i64 addr, old, cc;
2271 TCGLabel *lab = gen_new_label();
2273 /* Note that in1 = R1 (zero-extended expected value),
2274 out = R1 (original reg), out2 = R1+1 (new value). */
2276 addr = tcg_temp_new_i64();
2277 old = tcg_temp_new_i64();
2278 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2279 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2280 get_mem_index(s), mop | MO_ALIGN);
2281 tcg_temp_free_i64(addr);
2283 /* Are the memory and expected values (un)equal? */
2284 cc = tcg_temp_new_i64();
2285 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2286 tcg_gen_extrl_i64_i32(cc_op, cc);
2288 /* Write back the output now, so that it happens before the
2289 following branch, so that we don't need local temps. */
2290 if ((mop & MO_SIZE) == MO_32) {
2291 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2293 tcg_gen_mov_i64(o->out, old);
2295 tcg_temp_free_i64(old);
2297 /* If the comparison was equal, and the LSB of R2 was set,
2298 then we need to flush the TLB (for all cpus). */
2299 tcg_gen_xori_i64(cc, cc, 1);
2300 tcg_gen_and_i64(cc, cc, o->in2);
2301 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2302 tcg_temp_free_i64(cc);
2304 gen_helper_purge(cpu_env);
2311 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2313 TCGv_i64 t1 = tcg_temp_new_i64();
2314 TCGv_i32 t2 = tcg_temp_new_i32();
2315 tcg_gen_extrl_i64_i32(t2, o->in1);
2316 gen_helper_cvd(t1, t2);
2317 tcg_temp_free_i32(t2);
2318 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2319 tcg_temp_free_i64(t1);
2323 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2325 int m3 = get_field(s->fields, m3);
2326 TCGLabel *lab = gen_new_label();
2329 c = tcg_invert_cond(ltgt_cond[m3]);
2330 if (s->insn->data) {
2331 c = tcg_unsigned_cond(c);
2333 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2342 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2344 int m3 = get_field(s->fields, m3);
2345 int r1 = get_field(s->fields, r1);
2346 int r2 = get_field(s->fields, r2);
2347 TCGv_i32 tr1, tr2, chk;
2349 /* R1 and R2 must both be even. */
2350 if ((r1 | r2) & 1) {
2351 gen_program_exception(s, PGM_SPECIFICATION);
2352 return DISAS_NORETURN;
2354 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2358 tr1 = tcg_const_i32(r1);
2359 tr2 = tcg_const_i32(r2);
2360 chk = tcg_const_i32(m3);
2362 switch (s->insn->data) {
2364 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2367 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2370 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2373 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2376 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2379 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2382 g_assert_not_reached();
2385 tcg_temp_free_i32(tr1);
2386 tcg_temp_free_i32(tr2);
2387 tcg_temp_free_i32(chk);
2392 #ifndef CONFIG_USER_ONLY
2393 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2395 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2396 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2397 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2399 gen_helper_diag(cpu_env, r1, r3, func_code);
2401 tcg_temp_free_i32(func_code);
2402 tcg_temp_free_i32(r3);
2403 tcg_temp_free_i32(r1);
2408 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2410 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2411 return_low128(o->out);
2415 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2417 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2418 return_low128(o->out);
2422 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2424 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2425 return_low128(o->out);
2429 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2431 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2432 return_low128(o->out);
2436 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2438 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2442 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2444 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2448 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2450 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2451 return_low128(o->out2);
2455 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2457 int r2 = get_field(s->fields, r2);
2458 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2462 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2464 /* No cache information provided. */
2465 tcg_gen_movi_i64(o->out, -1);
2469 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2471 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2475 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2477 int r1 = get_field(s->fields, r1);
2478 int r2 = get_field(s->fields, r2);
2479 TCGv_i64 t = tcg_temp_new_i64();
2481 /* Note the "subsequently" in the PoO, which implies a defined result
2482 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2483 tcg_gen_shri_i64(t, psw_mask, 32);
2484 store_reg32_i64(r1, t);
2486 store_reg32_i64(r2, psw_mask);
2489 tcg_temp_free_i64(t);
2493 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2495 int r1 = get_field(s->fields, r1);
2499 /* Nested EXECUTE is not allowed. */
2500 if (unlikely(s->ex_value)) {
2501 gen_program_exception(s, PGM_EXECUTE);
2502 return DISAS_NORETURN;
2509 v1 = tcg_const_i64(0);
2514 ilen = tcg_const_i32(s->ilen);
2515 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2516 tcg_temp_free_i32(ilen);
2519 tcg_temp_free_i64(v1);
2522 return DISAS_PC_CC_UPDATED;
2525 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2527 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2530 return DISAS_NORETURN;
2532 gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2533 tcg_temp_free_i32(m34);
2537 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2539 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2542 return DISAS_NORETURN;
2544 gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2545 tcg_temp_free_i32(m34);
2549 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2551 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2554 return DISAS_NORETURN;
2556 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34);
2557 return_low128(o->out2);
2558 tcg_temp_free_i32(m34);
2562 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2564 /* We'll use the original input for cc computation, since we get to
2565 compare that against 0, which ought to be better than comparing
2566 the real output against 64. It also lets cc_dst be a convenient
2567 temporary during our computation. */
2568 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2570 /* R1 = IN ? CLZ(IN) : 64. */
2571 tcg_gen_clzi_i64(o->out, o->in2, 64);
2573 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2574 value by 64, which is undefined. But since the shift is 64 iff the
2575 input is zero, we still get the correct result after and'ing. */
2576 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2577 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2578 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2582 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2584 int m3 = get_field(s->fields, m3);
2585 int pos, len, base = s->insn->data;
2586 TCGv_i64 tmp = tcg_temp_new_i64();
2591 /* Effectively a 32-bit load. */
2592 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2599 /* Effectively a 16-bit load. */
2600 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2608 /* Effectively an 8-bit load. */
2609 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2614 pos = base + ctz32(m3) * 8;
2615 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2616 ccm = ((1ull << len) - 1) << pos;
2620 /* This is going to be a sequence of loads and inserts. */
2621 pos = base + 32 - 8;
2625 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2626 tcg_gen_addi_i64(o->in2, o->in2, 1);
2627 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2630 m3 = (m3 << 1) & 0xf;
2636 tcg_gen_movi_i64(tmp, ccm);
2637 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2638 tcg_temp_free_i64(tmp);
2642 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2644 int shift = s->insn->data & 0xff;
2645 int size = s->insn->data >> 8;
2646 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2650 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2655 t1 = tcg_temp_new_i64();
2656 tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2657 t2 = tcg_temp_new_i64();
2658 tcg_gen_extu_i32_i64(t2, cc_op);
2659 tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2660 tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2661 tcg_temp_free_i64(t1);
2662 tcg_temp_free_i64(t2);
2666 #ifndef CONFIG_USER_ONLY
2667 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2671 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2672 m4 = tcg_const_i32(get_field(s->fields, m4));
2674 m4 = tcg_const_i32(0);
2676 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2677 tcg_temp_free_i32(m4);
2681 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2685 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2686 m4 = tcg_const_i32(get_field(s->fields, m4));
2688 m4 = tcg_const_i32(0);
2690 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2691 tcg_temp_free_i32(m4);
2695 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2697 gen_helper_iske(o->out, cpu_env, o->in2);
2702 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2704 int r1 = have_field(s->fields, r1) ? get_field(s->fields, r1) : 0;
2705 int r2 = have_field(s->fields, r2) ? get_field(s->fields, r2) : 0;
2706 int r3 = have_field(s->fields, r3) ? get_field(s->fields, r3) : 0;
2707 TCGv_i32 t_r1, t_r2, t_r3, type;
2709 switch (s->insn->data) {
2710 case S390_FEAT_TYPE_KMCTR:
2711 if (r3 & 1 || !r3) {
2712 gen_program_exception(s, PGM_SPECIFICATION);
2713 return DISAS_NORETURN;
2716 case S390_FEAT_TYPE_PPNO:
2717 case S390_FEAT_TYPE_KMF:
2718 case S390_FEAT_TYPE_KMC:
2719 case S390_FEAT_TYPE_KMO:
2720 case S390_FEAT_TYPE_KM:
2721 if (r1 & 1 || !r1) {
2722 gen_program_exception(s, PGM_SPECIFICATION);
2723 return DISAS_NORETURN;
2726 case S390_FEAT_TYPE_KMAC:
2727 case S390_FEAT_TYPE_KIMD:
2728 case S390_FEAT_TYPE_KLMD:
2729 if (r2 & 1 || !r2) {
2730 gen_program_exception(s, PGM_SPECIFICATION);
2731 return DISAS_NORETURN;
2734 case S390_FEAT_TYPE_PCKMO:
2735 case S390_FEAT_TYPE_PCC:
2738 g_assert_not_reached();
2741 t_r1 = tcg_const_i32(r1);
2742 t_r2 = tcg_const_i32(r2);
2743 t_r3 = tcg_const_i32(r3);
2744 type = tcg_const_i32(s->insn->data);
2745 gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2747 tcg_temp_free_i32(t_r1);
2748 tcg_temp_free_i32(t_r2);
2749 tcg_temp_free_i32(t_r3);
2750 tcg_temp_free_i32(type);
2754 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2756 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2761 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2763 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2768 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2770 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2775 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2777 /* The real output is indeed the original value in memory;
2778 recompute the addition for the computation of CC. */
2779 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2780 s->insn->data | MO_ALIGN);
2781 /* However, we need to recompute the addition for setting CC. */
2782 tcg_gen_add_i64(o->out, o->in1, o->in2);
2786 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2788 /* The real output is indeed the original value in memory;
2789 recompute the addition for the computation of CC. */
2790 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2791 s->insn->data | MO_ALIGN);
2792 /* However, we need to recompute the operation for setting CC. */
2793 tcg_gen_and_i64(o->out, o->in1, o->in2);
2797 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2799 /* The real output is indeed the original value in memory;
2800 recompute the addition for the computation of CC. */
2801 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2802 s->insn->data | MO_ALIGN);
2803 /* However, we need to recompute the operation for setting CC. */
2804 tcg_gen_or_i64(o->out, o->in1, o->in2);
2808 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2810 /* The real output is indeed the original value in memory;
2811 recompute the addition for the computation of CC. */
2812 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2813 s->insn->data | MO_ALIGN);
2814 /* However, we need to recompute the operation for setting CC. */
2815 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2819 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2821 gen_helper_ldeb(o->out, cpu_env, o->in2);
2825 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2827 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2830 return DISAS_NORETURN;
2832 gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2833 tcg_temp_free_i32(m34);
2837 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2839 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2842 return DISAS_NORETURN;
2844 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34);
2845 tcg_temp_free_i32(m34);
2849 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2851 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2854 return DISAS_NORETURN;
2856 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34);
2857 tcg_temp_free_i32(m34);
2861 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2863 gen_helper_lxdb(o->out, cpu_env, o->in2);
2864 return_low128(o->out2);
2868 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2870 gen_helper_lxeb(o->out, cpu_env, o->in2);
2871 return_low128(o->out2);
2875 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2877 tcg_gen_shli_i64(o->out, o->in2, 32);
2881 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2883 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2887 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2889 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2893 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2895 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2899 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2901 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2905 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2907 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2911 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2913 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2917 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2919 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2923 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2925 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2929 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2931 TCGLabel *lab = gen_new_label();
2932 store_reg32_i64(get_field(s->fields, r1), o->in2);
2933 /* The value is stored even in case of trap. */
2934 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2940 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2942 TCGLabel *lab = gen_new_label();
2943 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2944 /* The value is stored even in case of trap. */
2945 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2951 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2953 TCGLabel *lab = gen_new_label();
2954 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2955 /* The value is stored even in case of trap. */
2956 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2962 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2964 TCGLabel *lab = gen_new_label();
2965 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2966 /* The value is stored even in case of trap. */
2967 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2973 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2975 TCGLabel *lab = gen_new_label();
2976 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2977 /* The value is stored even in case of trap. */
2978 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2984 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2988 disas_jcc(s, &c, get_field(s->fields, m3));
2991 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2995 TCGv_i32 t32 = tcg_temp_new_i32();
2998 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
3001 t = tcg_temp_new_i64();
3002 tcg_gen_extu_i32_i64(t, t32);
3003 tcg_temp_free_i32(t32);
3005 z = tcg_const_i64(0);
3006 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
3007 tcg_temp_free_i64(t);
3008 tcg_temp_free_i64(z);
3014 #ifndef CONFIG_USER_ONLY
3015 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
3017 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3018 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3019 gen_helper_lctl(cpu_env, r1, o->in2, r3);
3020 tcg_temp_free_i32(r1);
3021 tcg_temp_free_i32(r3);
3022 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3023 return DISAS_PC_STALE_NOCHAIN;
3026 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
3028 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3029 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3030 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
3031 tcg_temp_free_i32(r1);
3032 tcg_temp_free_i32(r3);
3033 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3034 return DISAS_PC_STALE_NOCHAIN;
3037 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
3039 gen_helper_lra(o->out, cpu_env, o->in2);
3044 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
3046 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
3050 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
3054 per_breaking_event(s);
3056 t1 = tcg_temp_new_i64();
3057 t2 = tcg_temp_new_i64();
3058 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3059 MO_TEUL | MO_ALIGN_8);
3060 tcg_gen_addi_i64(o->in2, o->in2, 4);
3061 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
3062 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
3063 tcg_gen_shli_i64(t1, t1, 32);
3064 gen_helper_load_psw(cpu_env, t1, t2);
3065 tcg_temp_free_i64(t1);
3066 tcg_temp_free_i64(t2);
3067 return DISAS_NORETURN;
3070 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3074 per_breaking_event(s);
3076 t1 = tcg_temp_new_i64();
3077 t2 = tcg_temp_new_i64();
3078 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3079 MO_TEQ | MO_ALIGN_8);
3080 tcg_gen_addi_i64(o->in2, o->in2, 8);
3081 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
3082 gen_helper_load_psw(cpu_env, t1, t2);
3083 tcg_temp_free_i64(t1);
3084 tcg_temp_free_i64(t2);
3085 return DISAS_NORETURN;
3089 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3091 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3092 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3093 gen_helper_lam(cpu_env, r1, o->in2, r3);
3094 tcg_temp_free_i32(r1);
3095 tcg_temp_free_i32(r3);
3099 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3101 int r1 = get_field(s->fields, r1);
3102 int r3 = get_field(s->fields, r3);
3105 /* Only one register to read. */
3106 t1 = tcg_temp_new_i64();
3107 if (unlikely(r1 == r3)) {
3108 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3109 store_reg32_i64(r1, t1);
3114 /* First load the values of the first and last registers to trigger
3115 possible page faults. */
3116 t2 = tcg_temp_new_i64();
3117 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3118 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3119 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3120 store_reg32_i64(r1, t1);
3121 store_reg32_i64(r3, t2);
3123 /* Only two registers to read. */
3124 if (((r1 + 1) & 15) == r3) {
3130 /* Then load the remaining registers. Page fault can't occur. */
3132 tcg_gen_movi_i64(t2, 4);
3135 tcg_gen_add_i64(o->in2, o->in2, t2);
3136 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3137 store_reg32_i64(r1, t1);
3145 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3147 int r1 = get_field(s->fields, r1);
3148 int r3 = get_field(s->fields, r3);
3151 /* Only one register to read. */
3152 t1 = tcg_temp_new_i64();
3153 if (unlikely(r1 == r3)) {
3154 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3155 store_reg32h_i64(r1, t1);
3160 /* First load the values of the first and last registers to trigger
3161 possible page faults. */
3162 t2 = tcg_temp_new_i64();
3163 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3164 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3165 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3166 store_reg32h_i64(r1, t1);
3167 store_reg32h_i64(r3, t2);
3169 /* Only two registers to read. */
3170 if (((r1 + 1) & 15) == r3) {
3176 /* Then load the remaining registers. Page fault can't occur. */
3178 tcg_gen_movi_i64(t2, 4);
3181 tcg_gen_add_i64(o->in2, o->in2, t2);
3182 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3183 store_reg32h_i64(r1, t1);
3191 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3193 int r1 = get_field(s->fields, r1);
3194 int r3 = get_field(s->fields, r3);
3197 /* Only one register to read. */
3198 if (unlikely(r1 == r3)) {
3199 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3203 /* First load the values of the first and last registers to trigger
3204 possible page faults. */
3205 t1 = tcg_temp_new_i64();
3206 t2 = tcg_temp_new_i64();
3207 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3208 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3209 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3210 tcg_gen_mov_i64(regs[r1], t1);
3213 /* Only two registers to read. */
3214 if (((r1 + 1) & 15) == r3) {
3219 /* Then load the remaining registers. Page fault can't occur. */
3221 tcg_gen_movi_i64(t1, 8);
3224 tcg_gen_add_i64(o->in2, o->in2, t1);
3225 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3232 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3235 MemOp mop = s->insn->data;
3237 /* In a parallel context, stop the world and single step. */
3238 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3241 gen_exception(EXCP_ATOMIC);
3242 return DISAS_NORETURN;
3245 /* In a serial context, perform the two loads ... */
3246 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
3247 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3248 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3249 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3250 tcg_temp_free_i64(a1);
3251 tcg_temp_free_i64(a2);
3253 /* ... and indicate that we performed them while interlocked. */
3254 gen_op_movi_cc(s, 0);
3258 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3260 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3261 gen_helper_lpq(o->out, cpu_env, o->in2);
3262 } else if (HAVE_ATOMIC128) {
3263 gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3265 gen_helper_exit_atomic(cpu_env);
3266 return DISAS_NORETURN;
3268 return_low128(o->out2);
3272 #ifndef CONFIG_USER_ONLY
3273 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3275 o->addr1 = get_address(s, 0, get_field(s->fields, r2), 0);
3276 tcg_gen_qemu_ld_tl(o->out, o->addr1, MMU_REAL_IDX, s->insn->data);
3281 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3283 tcg_gen_andi_i64(o->out, o->in2, -256);
3287 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3289 const int64_t block_size = (1ull << (get_field(s->fields, m3) + 6));
3291 if (get_field(s->fields, m3) > 6) {
3292 gen_program_exception(s, PGM_SPECIFICATION);
3293 return DISAS_NORETURN;
3296 tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3297 tcg_gen_neg_i64(o->addr1, o->addr1);
3298 tcg_gen_movi_i64(o->out, 16);
3299 tcg_gen_umin_i64(o->out, o->out, o->addr1);
3300 gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3304 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3307 o->g_out = o->g_in2;
3313 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3315 int b2 = get_field(s->fields, b2);
3316 TCGv ar1 = tcg_temp_new_i64();
3319 o->g_out = o->g_in2;
3323 switch (s->base.tb->flags & FLAG_MASK_ASC) {
3324 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3325 tcg_gen_movi_i64(ar1, 0);
3327 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3328 tcg_gen_movi_i64(ar1, 1);
3330 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3332 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3334 tcg_gen_movi_i64(ar1, 0);
3337 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3338 tcg_gen_movi_i64(ar1, 2);
3342 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3343 tcg_temp_free_i64(ar1);
3348 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3352 o->g_out = o->g_in1;
3353 o->g_out2 = o->g_in2;
3356 o->g_in1 = o->g_in2 = false;
3360 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3362 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3363 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3364 tcg_temp_free_i32(l);
3368 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3370 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3371 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3372 tcg_temp_free_i32(l);
3376 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3378 int r1 = get_field(s->fields, r1);
3379 int r2 = get_field(s->fields, r2);
3382 /* r1 and r2 must be even. */
3383 if (r1 & 1 || r2 & 1) {
3384 gen_program_exception(s, PGM_SPECIFICATION);
3385 return DISAS_NORETURN;
3388 t1 = tcg_const_i32(r1);
3389 t2 = tcg_const_i32(r2);
3390 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3391 tcg_temp_free_i32(t1);
3392 tcg_temp_free_i32(t2);
3397 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3399 int r1 = get_field(s->fields, r1);
3400 int r3 = get_field(s->fields, r3);
3403 /* r1 and r3 must be even. */
3404 if (r1 & 1 || r3 & 1) {
3405 gen_program_exception(s, PGM_SPECIFICATION);
3406 return DISAS_NORETURN;
3409 t1 = tcg_const_i32(r1);
3410 t3 = tcg_const_i32(r3);
3411 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3412 tcg_temp_free_i32(t1);
3413 tcg_temp_free_i32(t3);
3418 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3420 int r1 = get_field(s->fields, r1);
3421 int r3 = get_field(s->fields, r3);
3424 /* r1 and r3 must be even. */
3425 if (r1 & 1 || r3 & 1) {
3426 gen_program_exception(s, PGM_SPECIFICATION);
3427 return DISAS_NORETURN;
3430 t1 = tcg_const_i32(r1);
3431 t3 = tcg_const_i32(r3);
3432 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3433 tcg_temp_free_i32(t1);
3434 tcg_temp_free_i32(t3);
3439 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3441 int r3 = get_field(s->fields, r3);
3442 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3447 #ifndef CONFIG_USER_ONLY
3448 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3450 int r1 = get_field(s->fields, l1);
3451 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3456 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3458 int r1 = get_field(s->fields, l1);
3459 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3465 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3467 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3468 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3469 tcg_temp_free_i32(l);
3473 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3475 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3476 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3477 tcg_temp_free_i32(l);
3481 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3483 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3488 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3490 TCGv_i32 t1 = tcg_const_i32(get_field(s->fields, r1));
3491 TCGv_i32 t2 = tcg_const_i32(get_field(s->fields, r2));
3493 gen_helper_mvst(cc_op, cpu_env, t1, t2);
3494 tcg_temp_free_i32(t1);
3495 tcg_temp_free_i32(t2);
3500 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3502 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3503 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3504 tcg_temp_free_i32(l);
3508 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3510 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3514 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3516 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3520 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3522 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3526 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3528 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3532 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3534 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3538 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3540 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3541 return_low128(o->out2);
3545 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3547 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3548 return_low128(o->out2);
3552 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3554 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3555 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3556 tcg_temp_free_i64(r3);
3560 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3562 TCGv_i64 r3 = load_freg(get_field(s->fields, r3));
3563 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3564 tcg_temp_free_i64(r3);
3568 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3570 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3571 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3572 tcg_temp_free_i64(r3);
3576 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3578 TCGv_i64 r3 = load_freg(get_field(s->fields, r3));
3579 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3580 tcg_temp_free_i64(r3);
3584 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3587 z = tcg_const_i64(0);
3588 n = tcg_temp_new_i64();
3589 tcg_gen_neg_i64(n, o->in2);
3590 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3591 tcg_temp_free_i64(n);
3592 tcg_temp_free_i64(z);
3596 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3598 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3602 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3604 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3608 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3610 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3611 tcg_gen_mov_i64(o->out2, o->in2);
3615 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3617 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3618 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3619 tcg_temp_free_i32(l);
3624 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3626 tcg_gen_neg_i64(o->out, o->in2);
3630 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3632 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3636 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3638 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3642 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3644 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3645 tcg_gen_mov_i64(o->out2, o->in2);
3649 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3651 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3652 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3653 tcg_temp_free_i32(l);
3658 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3660 tcg_gen_or_i64(o->out, o->in1, o->in2);
3664 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3666 int shift = s->insn->data & 0xff;
3667 int size = s->insn->data >> 8;
3668 uint64_t mask = ((1ull << size) - 1) << shift;
3671 tcg_gen_shli_i64(o->in2, o->in2, shift);
3672 tcg_gen_or_i64(o->out, o->in1, o->in2);
3674 /* Produce the CC from only the bits manipulated. */
3675 tcg_gen_andi_i64(cc_dst, o->out, mask);
3676 set_cc_nz_u64(s, cc_dst);
3680 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3682 o->in1 = tcg_temp_new_i64();
3684 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3685 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3687 /* Perform the atomic operation in memory. */
3688 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3692 /* Recompute also for atomic case: needed for setting CC. */
3693 tcg_gen_or_i64(o->out, o->in1, o->in2);
3695 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3696 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3701 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3703 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3704 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3705 tcg_temp_free_i32(l);
3709 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3711 int l2 = get_field(s->fields, l2) + 1;
3714 /* The length must not exceed 32 bytes. */
3716 gen_program_exception(s, PGM_SPECIFICATION);
3717 return DISAS_NORETURN;
3719 l = tcg_const_i32(l2);
3720 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3721 tcg_temp_free_i32(l);
3725 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3727 int l2 = get_field(s->fields, l2) + 1;
3730 /* The length must be even and should not exceed 64 bytes. */
3731 if ((l2 & 1) || (l2 > 64)) {
3732 gen_program_exception(s, PGM_SPECIFICATION);
3733 return DISAS_NORETURN;
3735 l = tcg_const_i32(l2);
3736 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3737 tcg_temp_free_i32(l);
3741 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3743 gen_helper_popcnt(o->out, o->in2);
3747 #ifndef CONFIG_USER_ONLY
3748 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3750 gen_helper_ptlb(cpu_env);
3755 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3757 int i3 = get_field(s->fields, i3);
3758 int i4 = get_field(s->fields, i4);
3759 int i5 = get_field(s->fields, i5);
3760 int do_zero = i4 & 0x80;
3761 uint64_t mask, imask, pmask;
3764 /* Adjust the arguments for the specific insn. */
3765 switch (s->fields->op2) {
3766 case 0x55: /* risbg */
3767 case 0x59: /* risbgn */
3772 case 0x5d: /* risbhg */
3775 pmask = 0xffffffff00000000ull;
3777 case 0x51: /* risblg */
3780 pmask = 0x00000000ffffffffull;
3783 g_assert_not_reached();
3786 /* MASK is the set of bits to be inserted from R2.
3787 Take care for I3/I4 wraparound. */
3790 mask ^= pmask >> i4 >> 1;
3792 mask |= ~(pmask >> i4 >> 1);
3796 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3797 insns, we need to keep the other half of the register. */
3798 imask = ~mask | ~pmask;
3806 if (s->fields->op2 == 0x5d) {
3810 /* In some cases we can implement this with extract. */
3811 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3812 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3816 /* In some cases we can implement this with deposit. */
3817 if (len > 0 && (imask == 0 || ~mask == imask)) {
3818 /* Note that we rotate the bits to be inserted to the lsb, not to
3819 the position as described in the PoO. */
3820 rot = (rot - pos) & 63;
3825 /* Rotate the input as necessary. */
3826 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3828 /* Insert the selected bits into the output. */
3831 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3833 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3835 } else if (imask == 0) {
3836 tcg_gen_andi_i64(o->out, o->in2, mask);
3838 tcg_gen_andi_i64(o->in2, o->in2, mask);
3839 tcg_gen_andi_i64(o->out, o->out, imask);
3840 tcg_gen_or_i64(o->out, o->out, o->in2);
3845 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3847 int i3 = get_field(s->fields, i3);
3848 int i4 = get_field(s->fields, i4);
3849 int i5 = get_field(s->fields, i5);
3852 /* If this is a test-only form, arrange to discard the result. */
3854 o->out = tcg_temp_new_i64();
3862 /* MASK is the set of bits to be operated on from R2.
3863 Take care for I3/I4 wraparound. */
3866 mask ^= ~0ull >> i4 >> 1;
3868 mask |= ~(~0ull >> i4 >> 1);
3871 /* Rotate the input as necessary. */
3872 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3875 switch (s->fields->op2) {
3876 case 0x55: /* AND */
3877 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3878 tcg_gen_and_i64(o->out, o->out, o->in2);
3881 tcg_gen_andi_i64(o->in2, o->in2, mask);
3882 tcg_gen_or_i64(o->out, o->out, o->in2);
3884 case 0x57: /* XOR */
3885 tcg_gen_andi_i64(o->in2, o->in2, mask);
3886 tcg_gen_xor_i64(o->out, o->out, o->in2);
3893 tcg_gen_andi_i64(cc_dst, o->out, mask);
3894 set_cc_nz_u64(s, cc_dst);
3898 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3900 tcg_gen_bswap16_i64(o->out, o->in2);
3904 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3906 tcg_gen_bswap32_i64(o->out, o->in2);
3910 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3912 tcg_gen_bswap64_i64(o->out, o->in2);
3916 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3918 TCGv_i32 t1 = tcg_temp_new_i32();
3919 TCGv_i32 t2 = tcg_temp_new_i32();
3920 TCGv_i32 to = tcg_temp_new_i32();
3921 tcg_gen_extrl_i64_i32(t1, o->in1);
3922 tcg_gen_extrl_i64_i32(t2, o->in2);
3923 tcg_gen_rotl_i32(to, t1, t2);
3924 tcg_gen_extu_i32_i64(o->out, to);
3925 tcg_temp_free_i32(t1);
3926 tcg_temp_free_i32(t2);
3927 tcg_temp_free_i32(to);
3931 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3933 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3937 #ifndef CONFIG_USER_ONLY
3938 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3940 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3945 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3947 gen_helper_sacf(cpu_env, o->in2);
3948 /* Addressing mode has changed, so end the block. */
3949 return DISAS_PC_STALE;
3953 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3955 int sam = s->insn->data;
3971 /* Bizarre but true, we check the address of the current insn for the
3972 specification exception, not the next to be executed. Thus the PoO
3973 documents that Bad Things Happen two bytes before the end. */
3974 if (s->base.pc_next & ~mask) {
3975 gen_program_exception(s, PGM_SPECIFICATION);
3976 return DISAS_NORETURN;
3980 tsam = tcg_const_i64(sam);
3981 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3982 tcg_temp_free_i64(tsam);
3984 /* Always exit the TB, since we (may have) changed execution mode. */
3985 return DISAS_PC_STALE;
3988 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3990 int r1 = get_field(s->fields, r1);
3991 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3995 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3997 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
4001 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
4003 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
4007 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
4009 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
4010 return_low128(o->out2);
4014 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
4016 gen_helper_sqeb(o->out, cpu_env, o->in2);
4020 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
4022 gen_helper_sqdb(o->out, cpu_env, o->in2);
4026 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
4028 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
4029 return_low128(o->out2);
4033 #ifndef CONFIG_USER_ONLY
4034 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
4036 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
4041 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
4043 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4044 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4045 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
4047 tcg_temp_free_i32(r1);
4048 tcg_temp_free_i32(r3);
4053 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
4060 disas_jcc(s, &c, get_field(s->fields, m3));
4062 /* We want to store when the condition is fulfilled, so branch
4063 out when it's not */
4064 c.cond = tcg_invert_cond(c.cond);
4066 lab = gen_new_label();
4068 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
4070 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
4074 r1 = get_field(s->fields, r1);
4075 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
4076 switch (s->insn->data) {
4078 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
4081 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
4083 case 2: /* STOCFH */
4084 h = tcg_temp_new_i64();
4085 tcg_gen_shri_i64(h, regs[r1], 32);
4086 tcg_gen_qemu_st32(h, a, get_mem_index(s));
4087 tcg_temp_free_i64(h);
4090 g_assert_not_reached();
4092 tcg_temp_free_i64(a);
4098 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4100 uint64_t sign = 1ull << s->insn->data;
4101 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
4102 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
4103 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4104 /* The arithmetic left shift is curious in that it does not affect
4105 the sign bit. Copy that over from the source unchanged. */
4106 tcg_gen_andi_i64(o->out, o->out, ~sign);
4107 tcg_gen_andi_i64(o->in1, o->in1, sign);
4108 tcg_gen_or_i64(o->out, o->out, o->in1);
4112 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4114 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4118 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4120 tcg_gen_sar_i64(o->out, o->in1, o->in2);
4124 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4126 tcg_gen_shr_i64(o->out, o->in1, o->in2);
4130 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4132 gen_helper_sfpc(cpu_env, o->in2);
4136 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4138 gen_helper_sfas(cpu_env, o->in2);
4142 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4144 /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4145 tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4146 gen_helper_srnm(cpu_env, o->addr1);
4150 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4152 /* Bits 0-55 are are ignored. */
4153 tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4154 gen_helper_srnm(cpu_env, o->addr1);
4158 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4160 TCGv_i64 tmp = tcg_temp_new_i64();
4162 /* Bits other than 61-63 are ignored. */
4163 tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4165 /* No need to call a helper, we don't implement dfp */
4166 tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4167 tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4168 tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4170 tcg_temp_free_i64(tmp);
4174 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4176 tcg_gen_extrl_i64_i32(cc_op, o->in1);
4177 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4180 tcg_gen_shri_i64(o->in1, o->in1, 24);
4181 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4185 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4187 int b1 = get_field(s->fields, b1);
4188 int d1 = get_field(s->fields, d1);
4189 int b2 = get_field(s->fields, b2);
4190 int d2 = get_field(s->fields, d2);
4191 int r3 = get_field(s->fields, r3);
4192 TCGv_i64 tmp = tcg_temp_new_i64();
4194 /* fetch all operands first */
4195 o->in1 = tcg_temp_new_i64();
4196 tcg_gen_addi_i64(o->in1, regs[b1], d1);
4197 o->in2 = tcg_temp_new_i64();
4198 tcg_gen_addi_i64(o->in2, regs[b2], d2);
4199 o->addr1 = get_address(s, 0, r3, 0);
4201 /* load the third operand into r3 before modifying anything */
4202 tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4204 /* subtract CPU timer from first operand and store in GR0 */
4205 gen_helper_stpt(tmp, cpu_env);
4206 tcg_gen_sub_i64(regs[0], o->in1, tmp);
4208 /* store second operand in GR1 */
4209 tcg_gen_mov_i64(regs[1], o->in2);
4211 tcg_temp_free_i64(tmp);
4215 #ifndef CONFIG_USER_ONLY
4216 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4218 tcg_gen_shri_i64(o->in2, o->in2, 4);
4219 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4223 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4225 gen_helper_sske(cpu_env, o->in1, o->in2);
4229 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4231 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4232 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4233 return DISAS_PC_STALE_NOCHAIN;
4236 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4238 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4243 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4245 gen_helper_stck(o->out, cpu_env);
4246 /* ??? We don't implement clock states. */
4247 gen_op_movi_cc(s, 0);
4251 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4253 TCGv_i64 c1 = tcg_temp_new_i64();
4254 TCGv_i64 c2 = tcg_temp_new_i64();
4255 TCGv_i64 todpr = tcg_temp_new_i64();
4256 gen_helper_stck(c1, cpu_env);
4257 /* 16 bit value store in an uint32_t (only valid bits set) */
4258 tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4259 /* Shift the 64-bit value into its place as a zero-extended
4260 104-bit value. Note that "bit positions 64-103 are always
4261 non-zero so that they compare differently to STCK"; we set
4262 the least significant bit to 1. */
4263 tcg_gen_shli_i64(c2, c1, 56);
4264 tcg_gen_shri_i64(c1, c1, 8);
4265 tcg_gen_ori_i64(c2, c2, 0x10000);
4266 tcg_gen_or_i64(c2, c2, todpr);
4267 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4268 tcg_gen_addi_i64(o->in2, o->in2, 8);
4269 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4270 tcg_temp_free_i64(c1);
4271 tcg_temp_free_i64(c2);
4272 tcg_temp_free_i64(todpr);
4273 /* ??? We don't implement clock states. */
4274 gen_op_movi_cc(s, 0);
4278 #ifndef CONFIG_USER_ONLY
4279 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4281 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
4282 gen_helper_sck(cc_op, cpu_env, o->in1);
4287 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4289 gen_helper_sckc(cpu_env, o->in2);
4293 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4295 gen_helper_sckpf(cpu_env, regs[0]);
4299 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4301 gen_helper_stckc(o->out, cpu_env);
4305 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4307 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4308 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4309 gen_helper_stctg(cpu_env, r1, o->in2, r3);
4310 tcg_temp_free_i32(r1);
4311 tcg_temp_free_i32(r3);
4315 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4317 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4318 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4319 gen_helper_stctl(cpu_env, r1, o->in2, r3);
4320 tcg_temp_free_i32(r1);
4321 tcg_temp_free_i32(r3);
4325 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4327 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4331 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4333 gen_helper_spt(cpu_env, o->in2);
4337 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4339 gen_helper_stfl(cpu_env);
4343 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4345 gen_helper_stpt(o->out, cpu_env);
4349 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4351 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4356 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4358 gen_helper_spx(cpu_env, o->in2);
4362 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4364 gen_helper_xsch(cpu_env, regs[1]);
4369 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4371 gen_helper_csch(cpu_env, regs[1]);
4376 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4378 gen_helper_hsch(cpu_env, regs[1]);
4383 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4385 gen_helper_msch(cpu_env, regs[1], o->in2);
4390 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4392 gen_helper_rchp(cpu_env, regs[1]);
4397 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4399 gen_helper_rsch(cpu_env, regs[1]);
4404 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4406 gen_helper_sal(cpu_env, regs[1]);
4410 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4412 gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4416 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4418 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4419 gen_op_movi_cc(s, 3);
4423 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4425 /* The instruction is suppressed if not provided. */
4429 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4431 gen_helper_ssch(cpu_env, regs[1], o->in2);
4436 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4438 gen_helper_stsch(cpu_env, regs[1], o->in2);
4443 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4445 gen_helper_stcrw(cpu_env, o->in2);
4450 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4452 gen_helper_tpi(cc_op, cpu_env, o->addr1);
4457 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4459 gen_helper_tsch(cpu_env, regs[1], o->in2);
4464 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4466 gen_helper_chsc(cpu_env, o->in2);
4471 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4473 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4474 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4478 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4480 uint64_t i2 = get_field(s->fields, i2);
4483 /* It is important to do what the instruction name says: STORE THEN.
4484 If we let the output hook perform the store then if we fault and
4485 restart, we'll have the wrong SYSTEM MASK in place. */
4486 t = tcg_temp_new_i64();
4487 tcg_gen_shri_i64(t, psw_mask, 56);
4488 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4489 tcg_temp_free_i64(t);
4491 if (s->fields->op == 0xac) {
4492 tcg_gen_andi_i64(psw_mask, psw_mask,
4493 (i2 << 56) | 0x00ffffffffffffffull);
4495 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4498 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4499 return DISAS_PC_STALE_NOCHAIN;
4502 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4504 o->addr1 = get_address(s, 0, get_field(s->fields, r2), 0);
4505 tcg_gen_qemu_st_tl(o->in1, o->addr1, MMU_REAL_IDX, s->insn->data);
4507 if (s->base.tb->flags & FLAG_MASK_PER) {
4509 gen_helper_per_store_real(cpu_env);
4515 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4517 gen_helper_stfle(cc_op, cpu_env, o->in2);
4522 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4524 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4528 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4530 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4534 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4536 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4540 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4542 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4546 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4548 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4549 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4550 gen_helper_stam(cpu_env, r1, o->in2, r3);
4551 tcg_temp_free_i32(r1);
4552 tcg_temp_free_i32(r3);
4556 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4558 int m3 = get_field(s->fields, m3);
4559 int pos, base = s->insn->data;
4560 TCGv_i64 tmp = tcg_temp_new_i64();
4562 pos = base + ctz32(m3) * 8;
4565 /* Effectively a 32-bit store. */
4566 tcg_gen_shri_i64(tmp, o->in1, pos);
4567 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4573 /* Effectively a 16-bit store. */
4574 tcg_gen_shri_i64(tmp, o->in1, pos);
4575 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4582 /* Effectively an 8-bit store. */
4583 tcg_gen_shri_i64(tmp, o->in1, pos);
4584 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4588 /* This is going to be a sequence of shifts and stores. */
4589 pos = base + 32 - 8;
4592 tcg_gen_shri_i64(tmp, o->in1, pos);
4593 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4594 tcg_gen_addi_i64(o->in2, o->in2, 1);
4596 m3 = (m3 << 1) & 0xf;
4601 tcg_temp_free_i64(tmp);
4605 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4607 int r1 = get_field(s->fields, r1);
4608 int r3 = get_field(s->fields, r3);
4609 int size = s->insn->data;
4610 TCGv_i64 tsize = tcg_const_i64(size);
4614 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4616 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4621 tcg_gen_add_i64(o->in2, o->in2, tsize);
4625 tcg_temp_free_i64(tsize);
4629 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4631 int r1 = get_field(s->fields, r1);
4632 int r3 = get_field(s->fields, r3);
4633 TCGv_i64 t = tcg_temp_new_i64();
4634 TCGv_i64 t4 = tcg_const_i64(4);
4635 TCGv_i64 t32 = tcg_const_i64(32);
4638 tcg_gen_shl_i64(t, regs[r1], t32);
4639 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4643 tcg_gen_add_i64(o->in2, o->in2, t4);
4647 tcg_temp_free_i64(t);
4648 tcg_temp_free_i64(t4);
4649 tcg_temp_free_i64(t32);
4653 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4655 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4656 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4657 } else if (HAVE_ATOMIC128) {
4658 gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4660 gen_helper_exit_atomic(cpu_env);
4661 return DISAS_NORETURN;
4666 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4668 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4669 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4671 gen_helper_srst(cpu_env, r1, r2);
4673 tcg_temp_free_i32(r1);
4674 tcg_temp_free_i32(r2);
4679 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4681 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4682 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4684 gen_helper_srstu(cpu_env, r1, r2);
4686 tcg_temp_free_i32(r1);
4687 tcg_temp_free_i32(r2);
4692 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4694 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4698 static DisasJumpType op_subb(DisasContext *s, DisasOps *o)
4703 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4705 /* The !borrow flag is the msb of CC. Since we want the inverse of
4706 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4707 disas_jcc(s, &cmp, 8 | 4);
4708 borrow = tcg_temp_new_i64();
4710 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4712 TCGv_i32 t = tcg_temp_new_i32();
4713 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4714 tcg_gen_extu_i32_i64(borrow, t);
4715 tcg_temp_free_i32(t);
4719 tcg_gen_sub_i64(o->out, o->out, borrow);
4720 tcg_temp_free_i64(borrow);
4724 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4731 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4732 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4733 tcg_temp_free_i32(t);
4735 t = tcg_const_i32(s->ilen);
4736 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4737 tcg_temp_free_i32(t);
4739 gen_exception(EXCP_SVC);
4740 return DISAS_NORETURN;
4743 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4747 cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4748 cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4749 gen_op_movi_cc(s, cc);
4753 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4755 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4760 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4762 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4767 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4769 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4774 #ifndef CONFIG_USER_ONLY
4776 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4778 gen_helper_testblock(cc_op, cpu_env, o->in2);
4783 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4785 gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4792 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4794 TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4795 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4796 tcg_temp_free_i32(l1);
4801 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4803 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4804 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4805 tcg_temp_free_i32(l);
4810 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4812 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4813 return_low128(o->out2);
4818 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4820 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4821 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4822 tcg_temp_free_i32(l);
4827 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4829 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4830 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4831 tcg_temp_free_i32(l);
4836 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4838 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4839 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4840 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4841 TCGv_i32 tst = tcg_temp_new_i32();
4842 int m3 = get_field(s->fields, m3);
4844 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4848 tcg_gen_movi_i32(tst, -1);
4850 tcg_gen_extrl_i64_i32(tst, regs[0]);
4851 if (s->insn->opc & 3) {
4852 tcg_gen_ext8u_i32(tst, tst);
4854 tcg_gen_ext16u_i32(tst, tst);
4857 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4859 tcg_temp_free_i32(r1);
4860 tcg_temp_free_i32(r2);
4861 tcg_temp_free_i32(sizes);
4862 tcg_temp_free_i32(tst);
4867 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4869 TCGv_i32 t1 = tcg_const_i32(0xff);
4870 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4871 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4872 tcg_temp_free_i32(t1);
4877 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4879 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4880 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4881 tcg_temp_free_i32(l);
4885 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4887 int l1 = get_field(s->fields, l1) + 1;
4890 /* The length must not exceed 32 bytes. */
4892 gen_program_exception(s, PGM_SPECIFICATION);
4893 return DISAS_NORETURN;
4895 l = tcg_const_i32(l1);
4896 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4897 tcg_temp_free_i32(l);
4902 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4904 int l1 = get_field(s->fields, l1) + 1;
4907 /* The length must be even and should not exceed 64 bytes. */
4908 if ((l1 & 1) || (l1 > 64)) {
4909 gen_program_exception(s, PGM_SPECIFICATION);
4910 return DISAS_NORETURN;
4912 l = tcg_const_i32(l1);
4913 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4914 tcg_temp_free_i32(l);
4920 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4922 int d1 = get_field(s->fields, d1);
4923 int d2 = get_field(s->fields, d2);
4924 int b1 = get_field(s->fields, b1);
4925 int b2 = get_field(s->fields, b2);
4926 int l = get_field(s->fields, l1);
4929 o->addr1 = get_address(s, 0, b1, d1);
4931 /* If the addresses are identical, this is a store/memset of zero. */
4932 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4933 o->in2 = tcg_const_i64(0);
4937 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4940 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4944 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4947 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4951 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4954 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4958 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4960 gen_op_movi_cc(s, 0);
4964 /* But in general we'll defer to a helper. */
4965 o->in2 = get_address(s, 0, b2, d2);
4966 t32 = tcg_const_i32(l);
4967 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4968 tcg_temp_free_i32(t32);
4973 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4975 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4979 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4981 int shift = s->insn->data & 0xff;
4982 int size = s->insn->data >> 8;
4983 uint64_t mask = ((1ull << size) - 1) << shift;
4986 tcg_gen_shli_i64(o->in2, o->in2, shift);
4987 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4989 /* Produce the CC from only the bits manipulated. */
4990 tcg_gen_andi_i64(cc_dst, o->out, mask);
4991 set_cc_nz_u64(s, cc_dst);
4995 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4997 o->in1 = tcg_temp_new_i64();
4999 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5000 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
5002 /* Perform the atomic operation in memory. */
5003 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
5007 /* Recompute also for atomic case: needed for setting CC. */
5008 tcg_gen_xor_i64(o->out, o->in1, o->in2);
5010 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5011 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
5016 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
5018 o->out = tcg_const_i64(0);
5022 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
5024 o->out = tcg_const_i64(0);
5030 #ifndef CONFIG_USER_ONLY
5031 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
5033 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5035 gen_helper_clp(cpu_env, r2);
5036 tcg_temp_free_i32(r2);
5041 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
5043 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5044 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5046 gen_helper_pcilg(cpu_env, r1, r2);
5047 tcg_temp_free_i32(r1);
5048 tcg_temp_free_i32(r2);
5053 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
5055 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5056 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5058 gen_helper_pcistg(cpu_env, r1, r2);
5059 tcg_temp_free_i32(r1);
5060 tcg_temp_free_i32(r2);
5065 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
5067 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5068 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
5070 gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
5071 tcg_temp_free_i32(ar);
5072 tcg_temp_free_i32(r1);
5077 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
5079 gen_helper_sic(cpu_env, o->in1, o->in2);
5083 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
5085 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5086 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5088 gen_helper_rpcit(cpu_env, r1, r2);
5089 tcg_temp_free_i32(r1);
5090 tcg_temp_free_i32(r2);
5095 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5097 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5098 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
5099 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
5101 gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
5102 tcg_temp_free_i32(ar);
5103 tcg_temp_free_i32(r1);
5104 tcg_temp_free_i32(r3);
5109 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5111 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5112 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
5114 gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
5115 tcg_temp_free_i32(ar);
5116 tcg_temp_free_i32(r1);
5122 #include "translate_vx.inc.c"
5124 /* ====================================================================== */
5125 /* The "Cc OUTput" generators. Given the generated output (and in some cases
5126 the original inputs), update the various cc data structures in order to
5127 be able to compute the new condition code. */
5129 static void cout_abs32(DisasContext *s, DisasOps *o)
5131 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5134 static void cout_abs64(DisasContext *s, DisasOps *o)
5136 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5139 static void cout_adds32(DisasContext *s, DisasOps *o)
5141 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5144 static void cout_adds64(DisasContext *s, DisasOps *o)
5146 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5149 static void cout_addu32(DisasContext *s, DisasOps *o)
5151 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
5154 static void cout_addu64(DisasContext *s, DisasOps *o)
5156 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
5159 static void cout_addc32(DisasContext *s, DisasOps *o)
5161 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
5164 static void cout_addc64(DisasContext *s, DisasOps *o)
5166 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
5169 static void cout_cmps32(DisasContext *s, DisasOps *o)
5171 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5174 static void cout_cmps64(DisasContext *s, DisasOps *o)
5176 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5179 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5181 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5184 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5186 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5189 static void cout_f32(DisasContext *s, DisasOps *o)
5191 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5194 static void cout_f64(DisasContext *s, DisasOps *o)
5196 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5199 static void cout_f128(DisasContext *s, DisasOps *o)
5201 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5204 static void cout_nabs32(DisasContext *s, DisasOps *o)
5206 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5209 static void cout_nabs64(DisasContext *s, DisasOps *o)
5211 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5214 static void cout_neg32(DisasContext *s, DisasOps *o)
5216 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5219 static void cout_neg64(DisasContext *s, DisasOps *o)
5221 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5224 static void cout_nz32(DisasContext *s, DisasOps *o)
5226 tcg_gen_ext32u_i64(cc_dst, o->out);
5227 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5230 static void cout_nz64(DisasContext *s, DisasOps *o)
5232 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5235 static void cout_s32(DisasContext *s, DisasOps *o)
5237 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5240 static void cout_s64(DisasContext *s, DisasOps *o)
5242 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5245 static void cout_subs32(DisasContext *s, DisasOps *o)
5247 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5250 static void cout_subs64(DisasContext *s, DisasOps *o)
5252 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5255 static void cout_subu32(DisasContext *s, DisasOps *o)
5257 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
5260 static void cout_subu64(DisasContext *s, DisasOps *o)
5262 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
5265 static void cout_subb32(DisasContext *s, DisasOps *o)
5267 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
5270 static void cout_subb64(DisasContext *s, DisasOps *o)
5272 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
5275 static void cout_tm32(DisasContext *s, DisasOps *o)
5277 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5280 static void cout_tm64(DisasContext *s, DisasOps *o)
5282 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5285 /* ====================================================================== */
5286 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5287 with the TCG register to which we will write. Used in combination with
5288 the "wout" generators, in some cases we need a new temporary, and in
5289 some cases we can write to a TCG global. */
5291 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
5293 o->out = tcg_temp_new_i64();
5295 #define SPEC_prep_new 0
5297 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
5299 o->out = tcg_temp_new_i64();
5300 o->out2 = tcg_temp_new_i64();
5302 #define SPEC_prep_new_P 0
5304 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5306 o->out = regs[get_field(f, r1)];
5309 #define SPEC_prep_r1 0
5311 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
5313 int r1 = get_field(f, r1);
5315 o->out2 = regs[r1 + 1];
5316 o->g_out = o->g_out2 = true;
5318 #define SPEC_prep_r1_P SPEC_r1_even
5320 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5321 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5323 o->out = load_freg(get_field(f, r1));
5324 o->out2 = load_freg(get_field(f, r1) + 2);
5326 #define SPEC_prep_x1 SPEC_r1_f128
5328 /* ====================================================================== */
5329 /* The "Write OUTput" generators. These generally perform some non-trivial
5330 copy of data to TCG globals, or to main memory. The trivial cases are
5331 generally handled by having a "prep" generator install the TCG global
5332 as the destination of the operation. */
5334 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5336 store_reg(get_field(f, r1), o->out);
5338 #define SPEC_wout_r1 0
5340 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5342 int r1 = get_field(f, r1);
5343 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5345 #define SPEC_wout_r1_8 0
5347 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5349 int r1 = get_field(f, r1);
5350 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5352 #define SPEC_wout_r1_16 0
5354 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5356 store_reg32_i64(get_field(f, r1), o->out);
5358 #define SPEC_wout_r1_32 0
5360 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
5362 store_reg32h_i64(get_field(f, r1), o->out);
5364 #define SPEC_wout_r1_32h 0
5366 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5368 int r1 = get_field(f, r1);
5369 store_reg32_i64(r1, o->out);
5370 store_reg32_i64(r1 + 1, o->out2);
5372 #define SPEC_wout_r1_P32 SPEC_r1_even
5374 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5376 int r1 = get_field(f, r1);
5377 store_reg32_i64(r1 + 1, o->out);
5378 tcg_gen_shri_i64(o->out, o->out, 32);
5379 store_reg32_i64(r1, o->out);
5381 #define SPEC_wout_r1_D32 SPEC_r1_even
5383 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5385 int r3 = get_field(f, r3);
5386 store_reg32_i64(r3, o->out);
5387 store_reg32_i64(r3 + 1, o->out2);
5389 #define SPEC_wout_r3_P32 SPEC_r3_even
5391 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
5393 int r3 = get_field(f, r3);
5394 store_reg(r3, o->out);
5395 store_reg(r3 + 1, o->out2);
5397 #define SPEC_wout_r3_P64 SPEC_r3_even
5399 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5401 store_freg32_i64(get_field(f, r1), o->out);
5403 #define SPEC_wout_e1 0
5405 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5407 store_freg(get_field(f, r1), o->out);
5409 #define SPEC_wout_f1 0
5411 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5413 int f1 = get_field(s->fields, r1);
5414 store_freg(f1, o->out);
5415 store_freg(f1 + 2, o->out2);
5417 #define SPEC_wout_x1 SPEC_r1_f128
5419 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5421 if (get_field(f, r1) != get_field(f, r2)) {
5422 store_reg32_i64(get_field(f, r1), o->out);
5425 #define SPEC_wout_cond_r1r2_32 0
5427 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
5429 if (get_field(f, r1) != get_field(f, r2)) {
5430 store_freg32_i64(get_field(f, r1), o->out);
5433 #define SPEC_wout_cond_e1e2 0
5435 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5437 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5439 #define SPEC_wout_m1_8 0
5441 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5443 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5445 #define SPEC_wout_m1_16 0
5447 #ifndef CONFIG_USER_ONLY
5448 static void wout_m1_16a(DisasContext *s, DisasFields *f, DisasOps *o)
5450 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5452 #define SPEC_wout_m1_16a 0
5455 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5457 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5459 #define SPEC_wout_m1_32 0
5461 #ifndef CONFIG_USER_ONLY
5462 static void wout_m1_32a(DisasContext *s, DisasFields *f, DisasOps *o)
5464 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5466 #define SPEC_wout_m1_32a 0
5469 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5471 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5473 #define SPEC_wout_m1_64 0
5475 #ifndef CONFIG_USER_ONLY
5476 static void wout_m1_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5478 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
5480 #define SPEC_wout_m1_64a 0
5483 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5485 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5487 #define SPEC_wout_m2_32 0
5489 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5491 store_reg(get_field(f, r1), o->in2);
5493 #define SPEC_wout_in2_r1 0
5495 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5497 store_reg32_i64(get_field(f, r1), o->in2);
5499 #define SPEC_wout_in2_r1_32 0
5501 /* ====================================================================== */
5502 /* The "INput 1" generators. These load the first operand to an insn. */
5504 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5506 o->in1 = load_reg(get_field(f, r1));
5508 #define SPEC_in1_r1 0
5510 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5512 o->in1 = regs[get_field(f, r1)];
5515 #define SPEC_in1_r1_o 0
5517 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5519 o->in1 = tcg_temp_new_i64();
5520 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
5522 #define SPEC_in1_r1_32s 0
5524 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5526 o->in1 = tcg_temp_new_i64();
5527 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
5529 #define SPEC_in1_r1_32u 0
5531 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5533 o->in1 = tcg_temp_new_i64();
5534 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
5536 #define SPEC_in1_r1_sr32 0
5538 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
5540 o->in1 = load_reg(get_field(f, r1) + 1);
5542 #define SPEC_in1_r1p1 SPEC_r1_even
5544 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5546 o->in1 = tcg_temp_new_i64();
5547 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
5549 #define SPEC_in1_r1p1_32s SPEC_r1_even
5551 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5553 o->in1 = tcg_temp_new_i64();
5554 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
5556 #define SPEC_in1_r1p1_32u SPEC_r1_even
5558 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5560 int r1 = get_field(f, r1);
5561 o->in1 = tcg_temp_new_i64();
5562 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5564 #define SPEC_in1_r1_D32 SPEC_r1_even
5566 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5568 o->in1 = load_reg(get_field(f, r2));
5570 #define SPEC_in1_r2 0
5572 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5574 o->in1 = tcg_temp_new_i64();
5575 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
5577 #define SPEC_in1_r2_sr32 0
5579 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5581 o->in1 = load_reg(get_field(f, r3));
5583 #define SPEC_in1_r3 0
5585 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5587 o->in1 = regs[get_field(f, r3)];
5590 #define SPEC_in1_r3_o 0
5592 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5594 o->in1 = tcg_temp_new_i64();
5595 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
5597 #define SPEC_in1_r3_32s 0
5599 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5601 o->in1 = tcg_temp_new_i64();
5602 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
5604 #define SPEC_in1_r3_32u 0
5606 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5608 int r3 = get_field(f, r3);
5609 o->in1 = tcg_temp_new_i64();
5610 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5612 #define SPEC_in1_r3_D32 SPEC_r3_even
5614 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5616 o->in1 = load_freg32_i64(get_field(f, r1));
5618 #define SPEC_in1_e1 0
5620 static void in1_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5622 o->in1 = load_freg(get_field(f, r1));
5624 #define SPEC_in1_f1 0
5626 /* Load the high double word of an extended (128-bit) format FP number */
5627 static void in1_x2h(DisasContext *s, DisasFields *f, DisasOps *o)
5629 o->in1 = load_freg(get_field(f, r2));
5631 #define SPEC_in1_x2h SPEC_r2_f128
5633 static void in1_f3(DisasContext *s, DisasFields *f, DisasOps *o)
5635 o->in1 = load_freg(get_field(f, r3));
5637 #define SPEC_in1_f3 0
5639 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
5641 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
5643 #define SPEC_in1_la1 0
5645 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
5647 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5648 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5650 #define SPEC_in1_la2 0
5652 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5655 o->in1 = tcg_temp_new_i64();
5656 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5658 #define SPEC_in1_m1_8u 0
5660 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5663 o->in1 = tcg_temp_new_i64();
5664 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5666 #define SPEC_in1_m1_16s 0
5668 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5671 o->in1 = tcg_temp_new_i64();
5672 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5674 #define SPEC_in1_m1_16u 0
5676 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5679 o->in1 = tcg_temp_new_i64();
5680 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5682 #define SPEC_in1_m1_32s 0
5684 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5687 o->in1 = tcg_temp_new_i64();
5688 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5690 #define SPEC_in1_m1_32u 0
5692 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5695 o->in1 = tcg_temp_new_i64();
5696 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5698 #define SPEC_in1_m1_64 0
5700 /* ====================================================================== */
5701 /* The "INput 2" generators. These load the second operand to an insn. */
5703 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5705 o->in2 = regs[get_field(f, r1)];
5708 #define SPEC_in2_r1_o 0
5710 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5712 o->in2 = tcg_temp_new_i64();
5713 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5715 #define SPEC_in2_r1_16u 0
5717 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5719 o->in2 = tcg_temp_new_i64();
5720 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5722 #define SPEC_in2_r1_32u 0
5724 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5726 int r1 = get_field(f, r1);
5727 o->in2 = tcg_temp_new_i64();
5728 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5730 #define SPEC_in2_r1_D32 SPEC_r1_even
5732 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5734 o->in2 = load_reg(get_field(f, r2));
5736 #define SPEC_in2_r2 0
5738 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5740 o->in2 = regs[get_field(f, r2)];
5743 #define SPEC_in2_r2_o 0
5745 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5747 int r2 = get_field(f, r2);
5749 o->in2 = load_reg(r2);
5752 #define SPEC_in2_r2_nz 0
5754 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5756 o->in2 = tcg_temp_new_i64();
5757 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5759 #define SPEC_in2_r2_8s 0
5761 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5763 o->in2 = tcg_temp_new_i64();
5764 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5766 #define SPEC_in2_r2_8u 0
5768 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5770 o->in2 = tcg_temp_new_i64();
5771 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5773 #define SPEC_in2_r2_16s 0
5775 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5777 o->in2 = tcg_temp_new_i64();
5778 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5780 #define SPEC_in2_r2_16u 0
5782 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5784 o->in2 = load_reg(get_field(f, r3));
5786 #define SPEC_in2_r3 0
5788 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5790 o->in2 = tcg_temp_new_i64();
5791 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5793 #define SPEC_in2_r3_sr32 0
5795 static void in2_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5797 o->in2 = tcg_temp_new_i64();
5798 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r3)]);
5800 #define SPEC_in2_r3_32u 0
5802 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5804 o->in2 = tcg_temp_new_i64();
5805 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5807 #define SPEC_in2_r2_32s 0
5809 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5811 o->in2 = tcg_temp_new_i64();
5812 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5814 #define SPEC_in2_r2_32u 0
5816 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5818 o->in2 = tcg_temp_new_i64();
5819 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5821 #define SPEC_in2_r2_sr32 0
5823 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5825 o->in2 = load_freg32_i64(get_field(f, r2));
5827 #define SPEC_in2_e2 0
5829 static void in2_f2(DisasContext *s, DisasFields *f, DisasOps *o)
5831 o->in2 = load_freg(get_field(f, r2));
5833 #define SPEC_in2_f2 0
5835 /* Load the low double word of an extended (128-bit) format FP number */
5836 static void in2_x2l(DisasContext *s, DisasFields *f, DisasOps *o)
5838 o->in2 = load_freg(get_field(f, r2) + 2);
5840 #define SPEC_in2_x2l SPEC_r2_f128
5842 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5844 o->in2 = get_address(s, 0, get_field(f, r2), 0);
5846 #define SPEC_in2_ra2 0
5848 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5850 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5851 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5853 #define SPEC_in2_a2 0
5855 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5857 o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(f, i2) * 2);
5859 #define SPEC_in2_ri2 0
5861 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5863 help_l2_shift(s, f, o, 31);
5865 #define SPEC_in2_sh32 0
5867 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5869 help_l2_shift(s, f, o, 63);
5871 #define SPEC_in2_sh64 0
5873 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5876 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5878 #define SPEC_in2_m2_8u 0
5880 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5883 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5885 #define SPEC_in2_m2_16s 0
5887 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5890 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5892 #define SPEC_in2_m2_16u 0
5894 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5897 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5899 #define SPEC_in2_m2_32s 0
5901 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5904 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5906 #define SPEC_in2_m2_32u 0
5908 #ifndef CONFIG_USER_ONLY
5909 static void in2_m2_32ua(DisasContext *s, DisasFields *f, DisasOps *o)
5912 tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5914 #define SPEC_in2_m2_32ua 0
5917 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5920 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5922 #define SPEC_in2_m2_64 0
5924 #ifndef CONFIG_USER_ONLY
5925 static void in2_m2_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5928 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
5930 #define SPEC_in2_m2_64a 0
5933 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5936 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5938 #define SPEC_in2_mri2_16u 0
5940 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5943 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5945 #define SPEC_in2_mri2_32s 0
5947 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5950 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5952 #define SPEC_in2_mri2_32u 0
5954 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5957 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5959 #define SPEC_in2_mri2_64 0
5961 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5963 o->in2 = tcg_const_i64(get_field(f, i2));
5965 #define SPEC_in2_i2 0
5967 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5969 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5971 #define SPEC_in2_i2_8u 0
5973 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5975 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5977 #define SPEC_in2_i2_16u 0
5979 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5981 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5983 #define SPEC_in2_i2_32u 0
5985 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5987 uint64_t i2 = (uint16_t)get_field(f, i2);
5988 o->in2 = tcg_const_i64(i2 << s->insn->data);
5990 #define SPEC_in2_i2_16u_shl 0
5992 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5994 uint64_t i2 = (uint32_t)get_field(f, i2);
5995 o->in2 = tcg_const_i64(i2 << s->insn->data);
5997 #define SPEC_in2_i2_32u_shl 0
5999 #ifndef CONFIG_USER_ONLY
6000 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
6002 o->in2 = tcg_const_i64(s->fields->raw_insn);
6004 #define SPEC_in2_insn 0
6007 /* ====================================================================== */
6009 /* Find opc within the table of insns. This is formulated as a switch
6010 statement so that (1) we get compile-time notice of cut-paste errors
6011 for duplicated opcodes, and (2) the compiler generates the binary
6012 search tree, rather than us having to post-process the table. */
6014 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6015 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6017 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6018 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6020 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6021 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6023 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6025 enum DisasInsnEnum {
6026 #include "insn-data.def"
6030 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
6035 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
6037 .help_in1 = in1_##I1, \
6038 .help_in2 = in2_##I2, \
6039 .help_prep = prep_##P, \
6040 .help_wout = wout_##W, \
6041 .help_cout = cout_##CC, \
6042 .help_op = op_##OP, \
6046 /* Allow 0 to be used for NULL in the table below. */
6054 #define SPEC_in1_0 0
6055 #define SPEC_in2_0 0
6056 #define SPEC_prep_0 0
6057 #define SPEC_wout_0 0
6059 /* Give smaller names to the various facilities. */
6060 #define FAC_Z S390_FEAT_ZARCH
6061 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6062 #define FAC_DFP S390_FEAT_DFP
6063 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
6064 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
6065 #define FAC_EE S390_FEAT_EXECUTE_EXT
6066 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
6067 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
6068 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
6069 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
6070 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6071 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
6072 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
6073 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
6074 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6075 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
6076 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
6077 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
6078 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
6079 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
6080 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
6081 #define FAC_SFLE S390_FEAT_STFLE
6082 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6083 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6084 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6085 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
6086 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
6087 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
6088 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
6089 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6090 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
6091 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
6092 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6093 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6094 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6095 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
6096 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
6097 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
6098 #define FAC_V S390_FEAT_VECTOR /* vector facility */
6099 #define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6101 static const DisasInsn insn_info[] = {
6102 #include "insn-data.def"
6106 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6107 case OPC: return &insn_info[insn_ ## NM];
6109 static const DisasInsn *lookup_opc(uint16_t opc)
6112 #include "insn-data.def"
6123 /* Extract a field from the insn. The INSN should be left-aligned in
6124 the uint64_t so that we can more easily utilize the big-bit-endian
6125 definitions we extract from the Principals of Operation. */
6127 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6135 /* Zero extract the field from the insn. */
6136 r = (insn << f->beg) >> (64 - f->size);
6138 /* Sign-extend, or un-swap the field as necessary. */
6140 case 0: /* unsigned */
6142 case 1: /* signed */
6143 assert(f->size <= 32);
6144 m = 1u << (f->size - 1);
6147 case 2: /* dl+dh split, signed 20 bit. */
6148 r = ((int8_t)r << 12) | (r >> 8);
6150 case 3: /* MSB stored in RXB */
6151 g_assert(f->size == 4);
6154 r |= extract64(insn, 63 - 36, 1) << 4;
6157 r |= extract64(insn, 63 - 37, 1) << 4;
6160 r |= extract64(insn, 63 - 38, 1) << 4;
6163 r |= extract64(insn, 63 - 39, 1) << 4;
6166 g_assert_not_reached();
6173 /* Validate that the "compressed" encoding we selected above is valid.
6174 I.e. we havn't make two different original fields overlap. */
6175 assert(((o->presentC >> f->indexC) & 1) == 0);
6176 o->presentC |= 1 << f->indexC;
6177 o->presentO |= 1 << f->indexO;
6179 o->c[f->indexC] = r;
6182 /* Lookup the insn at the current PC, extracting the operands into O and
6183 returning the info struct for the insn. Returns NULL for invalid insn. */
6185 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
6188 uint64_t insn, pc = s->base.pc_next;
6190 const DisasInsn *info;
6192 if (unlikely(s->ex_value)) {
6193 /* Drop the EX data now, so that it's clear on exception paths. */
6194 TCGv_i64 zero = tcg_const_i64(0);
6195 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
6196 tcg_temp_free_i64(zero);
6198 /* Extract the values saved by EXECUTE. */
6199 insn = s->ex_value & 0xffffffffffff0000ull;
6200 ilen = s->ex_value & 0xf;
6203 insn = ld_code2(env, pc);
6204 op = (insn >> 8) & 0xff;
6205 ilen = get_ilen(op);
6211 insn = ld_code4(env, pc) << 32;
6214 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
6217 g_assert_not_reached();
6220 s->pc_tmp = s->base.pc_next + ilen;
6223 /* We can't actually determine the insn format until we've looked up
6224 the full insn opcode. Which we can't do without locating the
6225 secondary opcode. Assume by default that OP2 is at bit 40; for
6226 those smaller insns that don't actually have a secondary opcode
6227 this will correctly result in OP2 = 0. */
6233 case 0xb2: /* S, RRF, RRE, IE */
6234 case 0xb3: /* RRE, RRD, RRF */
6235 case 0xb9: /* RRE, RRF */
6236 case 0xe5: /* SSE, SIL */
6237 op2 = (insn << 8) >> 56;
6241 case 0xc0: /* RIL */
6242 case 0xc2: /* RIL */
6243 case 0xc4: /* RIL */
6244 case 0xc6: /* RIL */
6245 case 0xc8: /* SSF */
6246 case 0xcc: /* RIL */
6247 op2 = (insn << 12) >> 60;
6249 case 0xc5: /* MII */
6250 case 0xc7: /* SMI */
6251 case 0xd0 ... 0xdf: /* SS */
6257 case 0xee ... 0xf3: /* SS */
6258 case 0xf8 ... 0xfd: /* SS */
6262 op2 = (insn << 40) >> 56;
6266 memset(f, 0, sizeof(*f));
6271 /* Lookup the instruction. */
6272 info = lookup_opc(op << 8 | op2);
6274 /* If we found it, extract the operands. */
6276 DisasFormat fmt = info->fmt;
6279 for (i = 0; i < NUM_C_FIELD; ++i) {
6280 extract_field(f, &format_info[fmt].op[i], insn);
6286 static bool is_afp_reg(int reg)
6288 return reg % 2 || reg > 6;
6291 static bool is_fp_pair(int reg)
6293 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6294 return !(reg & 0x2);
6297 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6299 const DisasInsn *insn;
6300 DisasJumpType ret = DISAS_NEXT;
6304 /* Search for the insn in the table. */
6305 insn = extract_insn(env, s, &f);
6307 /* Emit insn_start now that we know the ILEN. */
6308 tcg_gen_insn_start(s->base.pc_next, s->cc_op, s->ilen);
6310 /* Not found means unimplemented/illegal opcode. */
6312 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6314 gen_illegal_opcode(s);
6315 return DISAS_NORETURN;
6318 #ifndef CONFIG_USER_ONLY
6319 if (s->base.tb->flags & FLAG_MASK_PER) {
6320 TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6321 gen_helper_per_ifetch(cpu_env, addr);
6322 tcg_temp_free_i64(addr);
6328 /* privileged instruction */
6329 if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6330 gen_program_exception(s, PGM_PRIVILEGED);
6331 return DISAS_NORETURN;
6334 /* if AFP is not enabled, instructions and registers are forbidden */
6335 if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6338 if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(&f, r1))) {
6341 if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(&f, r2))) {
6344 if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(&f, r3))) {
6347 if (insn->flags & IF_BFP) {
6350 if (insn->flags & IF_DFP) {
6353 if (insn->flags & IF_VEC) {
6357 gen_data_exception(dxc);
6358 return DISAS_NORETURN;
6362 /* if vector instructions not enabled, executing them is forbidden */
6363 if (insn->flags & IF_VEC) {
6364 if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6365 gen_data_exception(0xfe);
6366 return DISAS_NORETURN;
6371 /* Check for insn specification exceptions. */
6373 if ((insn->spec & SPEC_r1_even && get_field(&f, r1) & 1) ||
6374 (insn->spec & SPEC_r2_even && get_field(&f, r2) & 1) ||
6375 (insn->spec & SPEC_r3_even && get_field(&f, r3) & 1) ||
6376 (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(&f, r1))) ||
6377 (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(&f, r2)))) {
6378 gen_program_exception(s, PGM_SPECIFICATION);
6379 return DISAS_NORETURN;
6383 /* Set up the strutures we use to communicate with the helpers. */
6387 /* Implement the instruction. */
6388 if (insn->help_in1) {
6389 insn->help_in1(s, &f, &o);
6391 if (insn->help_in2) {
6392 insn->help_in2(s, &f, &o);
6394 if (insn->help_prep) {
6395 insn->help_prep(s, &f, &o);
6397 if (insn->help_op) {
6398 ret = insn->help_op(s, &o);
6400 if (ret != DISAS_NORETURN) {
6401 if (insn->help_wout) {
6402 insn->help_wout(s, &f, &o);
6404 if (insn->help_cout) {
6405 insn->help_cout(s, &o);
6409 /* Free any temporaries created by the helpers. */
6410 if (o.out && !o.g_out) {
6411 tcg_temp_free_i64(o.out);
6413 if (o.out2 && !o.g_out2) {
6414 tcg_temp_free_i64(o.out2);
6416 if (o.in1 && !o.g_in1) {
6417 tcg_temp_free_i64(o.in1);
6419 if (o.in2 && !o.g_in2) {
6420 tcg_temp_free_i64(o.in2);
6423 tcg_temp_free_i64(o.addr1);
6426 #ifndef CONFIG_USER_ONLY
6427 if (s->base.tb->flags & FLAG_MASK_PER) {
6428 /* An exception might be triggered, save PSW if not already done. */
6429 if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
6430 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6433 /* Call the helper to check for a possible PER exception. */
6434 gen_helper_per_check_exception(cpu_env);
6438 /* Advance to the next instruction. */
6439 s->base.pc_next = s->pc_tmp;
6443 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6445 DisasContext *dc = container_of(dcbase, DisasContext, base);
6448 if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6449 dc->base.pc_first &= 0x7fffffff;
6450 dc->base.pc_next = dc->base.pc_first;
6453 dc->cc_op = CC_OP_DYNAMIC;
6454 dc->ex_value = dc->base.tb->cs_base;
6455 dc->do_debug = dc->base.singlestep_enabled;
6458 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6462 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6466 static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
6467 const CPUBreakpoint *bp)
6469 DisasContext *dc = container_of(dcbase, DisasContext, base);
6472 * Emit an insn_start to accompany the breakpoint exception.
6473 * The ILEN value is a dummy, since this does not result in
6474 * an s390x exception, but an internal qemu exception which
6475 * brings us back to interact with the gdbstub.
6477 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 2);
6479 dc->base.is_jmp = DISAS_PC_STALE;
6480 dc->do_debug = true;
6481 /* The address covered by the breakpoint must be included in
6482 [tb->pc, tb->pc + tb->size) in order to for it to be
6483 properly cleared -- thus we increment the PC here so that
6484 the logic setting tb->size does the right thing. */
6485 dc->base.pc_next += 2;
6489 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6491 CPUS390XState *env = cs->env_ptr;
6492 DisasContext *dc = container_of(dcbase, DisasContext, base);
6494 dc->base.is_jmp = translate_one(env, dc);
6495 if (dc->base.is_jmp == DISAS_NEXT) {
6496 uint64_t page_start;
6498 page_start = dc->base.pc_first & TARGET_PAGE_MASK;
6499 if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
6500 dc->base.is_jmp = DISAS_TOO_MANY;
6505 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6507 DisasContext *dc = container_of(dcbase, DisasContext, base);
6509 switch (dc->base.is_jmp) {
6511 case DISAS_NORETURN:
6513 case DISAS_TOO_MANY:
6514 case DISAS_PC_STALE:
6515 case DISAS_PC_STALE_NOCHAIN:
6516 update_psw_addr(dc);
6518 case DISAS_PC_UPDATED:
6519 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6520 cc op type is in env */
6523 case DISAS_PC_CC_UPDATED:
6524 /* Exit the TB, either by raising a debug exception or by return. */
6526 gen_exception(EXCP_DEBUG);
6527 } else if (use_exit_tb(dc) ||
6528 dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
6529 tcg_gen_exit_tb(NULL, 0);
6531 tcg_gen_lookup_and_goto_ptr();
6535 g_assert_not_reached();
6539 static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
6541 DisasContext *dc = container_of(dcbase, DisasContext, base);
6543 if (unlikely(dc->ex_value)) {
6544 /* ??? Unfortunately log_target_disas can't use host memory. */
6545 qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value);
6547 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
6548 log_target_disas(cs, dc->base.pc_first, dc->base.tb->size);
6552 static const TranslatorOps s390x_tr_ops = {
6553 .init_disas_context = s390x_tr_init_disas_context,
6554 .tb_start = s390x_tr_tb_start,
6555 .insn_start = s390x_tr_insn_start,
6556 .breakpoint_check = s390x_tr_breakpoint_check,
6557 .translate_insn = s390x_tr_translate_insn,
6558 .tb_stop = s390x_tr_tb_stop,
6559 .disas_log = s390x_tr_disas_log,
6562 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
6566 translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
6569 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6572 int cc_op = data[1];
6574 env->psw.addr = data[0];
6576 /* Update the CC opcode if it is not already up-to-date. */
6577 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6582 env->int_pgm_ilen = data[2];