4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
38 #include "qemu/host-utils.h"
39 #include "exec/cpu_ldst.h"
40 #include "exec/gen-icount.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
44 #include "trace-tcg.h"
45 #include "exec/translator.h"
47 #include "qemu/atomic128.h"
50 /* Information that (most) every instruction needs to manipulate. */
51 typedef struct DisasContext DisasContext;
52 typedef struct DisasInsn DisasInsn;
53 typedef struct DisasFields DisasFields;
56 DisasContextBase base;
57 const DisasInsn *insn;
61 * During translate_one(), pc_tmp is used to determine the instruction
62 * to be executed after base.pc_next - e.g. next sequential instruction
71 /* Information carried about a condition to be evaluated. */
78 struct { TCGv_i64 a, b; } s64;
79 struct { TCGv_i32 a, b; } s32;
83 #ifdef DEBUG_INLINE_BRANCHES
84 static uint64_t inline_branch_hit[CC_OP_MAX];
85 static uint64_t inline_branch_miss[CC_OP_MAX];
88 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
92 if (s->base.tb->flags & FLAG_MASK_32) {
93 if (s->base.tb->flags & FLAG_MASK_64) {
94 tcg_gen_movi_i64(out, pc);
99 assert(!(s->base.tb->flags & FLAG_MASK_64));
100 tmp = tcg_const_i64(pc);
101 tcg_gen_deposit_i64(out, out, tmp, 0, 32);
102 tcg_temp_free_i64(tmp);
105 static TCGv_i64 psw_addr;
106 static TCGv_i64 psw_mask;
107 static TCGv_i64 gbea;
109 static TCGv_i32 cc_op;
110 static TCGv_i64 cc_src;
111 static TCGv_i64 cc_dst;
112 static TCGv_i64 cc_vr;
114 static char cpu_reg_names[16][4];
115 static TCGv_i64 regs[16];
117 void s390x_translate_init(void)
121 psw_addr = tcg_global_mem_new_i64(cpu_env,
122 offsetof(CPUS390XState, psw.addr),
124 psw_mask = tcg_global_mem_new_i64(cpu_env,
125 offsetof(CPUS390XState, psw.mask),
127 gbea = tcg_global_mem_new_i64(cpu_env,
128 offsetof(CPUS390XState, gbea),
131 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
133 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
135 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
137 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
140 for (i = 0; i < 16; i++) {
141 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
142 regs[i] = tcg_global_mem_new(cpu_env,
143 offsetof(CPUS390XState, regs[i]),
148 static inline int vec_full_reg_offset(uint8_t reg)
151 return offsetof(CPUS390XState, vregs[reg][0].d);
154 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, TCGMemOp es)
156 /* Convert element size (es) - e.g. MO_8 - to bytes */
157 const uint8_t bytes = 1 << es;
158 int offs = enr * bytes;
161 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
162 * of the 16 byte vector, on both, little and big endian systems.
164 * Big Endian (target/possible host)
165 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
166 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
167 * W: [ 0][ 1] - [ 2][ 3]
170 * Little Endian (possible host)
171 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
172 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
173 * W: [ 1][ 0] - [ 3][ 2]
176 * For 16 byte elements, the two 8 byte halves will not form a host
177 * int128 if the host is little endian, since they're in the wrong order.
178 * Some operations (e.g. xor) do not care. For operations like addition,
179 * the two 8 byte elements have to be loaded separately. Let's force all
180 * 16 byte operations to handle it in a special way.
182 g_assert(es <= MO_64);
183 #ifndef HOST_WORDS_BIGENDIAN
186 return offs + vec_full_reg_offset(reg);
189 static inline int freg64_offset(uint8_t reg)
192 return vec_reg_offset(reg, 0, MO_64);
195 static inline int freg32_offset(uint8_t reg)
198 return vec_reg_offset(reg, 0, MO_32);
201 static TCGv_i64 load_reg(int reg)
203 TCGv_i64 r = tcg_temp_new_i64();
204 tcg_gen_mov_i64(r, regs[reg]);
208 static TCGv_i64 load_freg(int reg)
210 TCGv_i64 r = tcg_temp_new_i64();
212 tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
216 static TCGv_i64 load_freg32_i64(int reg)
218 TCGv_i64 r = tcg_temp_new_i64();
220 tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
224 static void store_reg(int reg, TCGv_i64 v)
226 tcg_gen_mov_i64(regs[reg], v);
229 static void store_freg(int reg, TCGv_i64 v)
231 tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
234 static void store_reg32_i64(int reg, TCGv_i64 v)
236 /* 32 bit register writes keep the upper half */
237 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
240 static void store_reg32h_i64(int reg, TCGv_i64 v)
242 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
245 static void store_freg32_i64(int reg, TCGv_i64 v)
247 tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
250 static void return_low128(TCGv_i64 dest)
252 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
255 static void update_psw_addr(DisasContext *s)
258 tcg_gen_movi_i64(psw_addr, s->base.pc_next);
261 static void per_branch(DisasContext *s, bool to_next)
263 #ifndef CONFIG_USER_ONLY
264 tcg_gen_movi_i64(gbea, s->base.pc_next);
266 if (s->base.tb->flags & FLAG_MASK_PER) {
267 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
268 gen_helper_per_branch(cpu_env, gbea, next_pc);
270 tcg_temp_free_i64(next_pc);
276 static void per_branch_cond(DisasContext *s, TCGCond cond,
277 TCGv_i64 arg1, TCGv_i64 arg2)
279 #ifndef CONFIG_USER_ONLY
280 if (s->base.tb->flags & FLAG_MASK_PER) {
281 TCGLabel *lab = gen_new_label();
282 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
284 tcg_gen_movi_i64(gbea, s->base.pc_next);
285 gen_helper_per_branch(cpu_env, gbea, psw_addr);
289 TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
290 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
291 tcg_temp_free_i64(pc);
296 static void per_breaking_event(DisasContext *s)
298 tcg_gen_movi_i64(gbea, s->base.pc_next);
301 static void update_cc_op(DisasContext *s)
303 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
304 tcg_gen_movi_i32(cc_op, s->cc_op);
308 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
310 return (uint64_t)cpu_lduw_code(env, pc);
313 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
315 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
318 static int get_mem_index(DisasContext *s)
320 if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
324 switch (s->base.tb->flags & FLAG_MASK_ASC) {
325 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
326 return MMU_PRIMARY_IDX;
327 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
328 return MMU_SECONDARY_IDX;
329 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
337 static void gen_exception(int excp)
339 TCGv_i32 tmp = tcg_const_i32(excp);
340 gen_helper_exception(cpu_env, tmp);
341 tcg_temp_free_i32(tmp);
344 static void gen_program_exception(DisasContext *s, int code)
348 /* Remember what pgm exeption this was. */
349 tmp = tcg_const_i32(code);
350 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
351 tcg_temp_free_i32(tmp);
353 tmp = tcg_const_i32(s->ilen);
354 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
355 tcg_temp_free_i32(tmp);
363 /* Trigger exception. */
364 gen_exception(EXCP_PGM);
367 static inline void gen_illegal_opcode(DisasContext *s)
369 gen_program_exception(s, PGM_OPERATION);
372 static inline void gen_data_exception(uint8_t dxc)
374 TCGv_i32 tmp = tcg_const_i32(dxc);
375 gen_helper_data_exception(cpu_env, tmp);
376 tcg_temp_free_i32(tmp);
379 static inline void gen_trap(DisasContext *s)
381 /* Set DXC to 0xff */
382 gen_data_exception(0xff);
385 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
388 tcg_gen_addi_i64(dst, src, imm);
389 if (!(s->base.tb->flags & FLAG_MASK_64)) {
390 if (s->base.tb->flags & FLAG_MASK_32) {
391 tcg_gen_andi_i64(dst, dst, 0x7fffffff);
393 tcg_gen_andi_i64(dst, dst, 0x00ffffff);
398 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
400 TCGv_i64 tmp = tcg_temp_new_i64();
403 * Note that d2 is limited to 20 bits, signed. If we crop negative
404 * displacements early we create larger immedate addends.
407 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
408 gen_addi_and_wrap_i64(s, tmp, tmp, d2);
410 gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
412 gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
413 } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
414 if (s->base.tb->flags & FLAG_MASK_32) {
415 tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
417 tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
420 tcg_gen_movi_i64(tmp, d2);
426 static inline bool live_cc_data(DisasContext *s)
428 return (s->cc_op != CC_OP_DYNAMIC
429 && s->cc_op != CC_OP_STATIC
433 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
435 if (live_cc_data(s)) {
436 tcg_gen_discard_i64(cc_src);
437 tcg_gen_discard_i64(cc_dst);
438 tcg_gen_discard_i64(cc_vr);
440 s->cc_op = CC_OP_CONST0 + val;
443 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
445 if (live_cc_data(s)) {
446 tcg_gen_discard_i64(cc_src);
447 tcg_gen_discard_i64(cc_vr);
449 tcg_gen_mov_i64(cc_dst, dst);
453 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
456 if (live_cc_data(s)) {
457 tcg_gen_discard_i64(cc_vr);
459 tcg_gen_mov_i64(cc_src, src);
460 tcg_gen_mov_i64(cc_dst, dst);
464 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
465 TCGv_i64 dst, TCGv_i64 vr)
467 tcg_gen_mov_i64(cc_src, src);
468 tcg_gen_mov_i64(cc_dst, dst);
469 tcg_gen_mov_i64(cc_vr, vr);
473 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
475 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
478 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
480 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
483 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
485 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
488 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
490 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
493 /* CC value is in env->cc_op */
494 static void set_cc_static(DisasContext *s)
496 if (live_cc_data(s)) {
497 tcg_gen_discard_i64(cc_src);
498 tcg_gen_discard_i64(cc_dst);
499 tcg_gen_discard_i64(cc_vr);
501 s->cc_op = CC_OP_STATIC;
504 /* calculates cc into cc_op */
505 static void gen_op_calc_cc(DisasContext *s)
507 TCGv_i32 local_cc_op = NULL;
508 TCGv_i64 dummy = NULL;
512 dummy = tcg_const_i64(0);
526 local_cc_op = tcg_const_i32(s->cc_op);
542 /* s->cc_op is the cc value */
543 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
546 /* env->cc_op already is the cc value */
562 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
567 case CC_OP_LTUGTU_32:
568 case CC_OP_LTUGTU_64:
575 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
590 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
593 /* unknown operation - assume 3 arguments and cc_op in env */
594 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
601 tcg_temp_free_i32(local_cc_op);
604 tcg_temp_free_i64(dummy);
607 /* We now have cc in cc_op as constant */
611 static bool use_exit_tb(DisasContext *s)
613 return s->base.singlestep_enabled ||
614 (tb_cflags(s->base.tb) & CF_LAST_IO) ||
615 (s->base.tb->flags & FLAG_MASK_PER);
618 static bool use_goto_tb(DisasContext *s, uint64_t dest)
620 if (unlikely(use_exit_tb(s))) {
623 #ifndef CONFIG_USER_ONLY
624 return (dest & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) ||
625 (dest & TARGET_PAGE_MASK) == (s->base.pc_next & TARGET_PAGE_MASK);
631 static void account_noninline_branch(DisasContext *s, int cc_op)
633 #ifdef DEBUG_INLINE_BRANCHES
634 inline_branch_miss[cc_op]++;
638 static void account_inline_branch(DisasContext *s, int cc_op)
640 #ifdef DEBUG_INLINE_BRANCHES
641 inline_branch_hit[cc_op]++;
645 /* Table of mask values to comparison codes, given a comparison as input.
646 For such, CC=3 should not be possible. */
647 static const TCGCond ltgt_cond[16] = {
648 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
649 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
650 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
651 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
652 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
653 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
654 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
655 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
658 /* Table of mask values to comparison codes, given a logic op as input.
659 For such, only CC=0 and CC=1 should be possible. */
660 static const TCGCond nz_cond[16] = {
661 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
662 TCG_COND_NEVER, TCG_COND_NEVER,
663 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
664 TCG_COND_NE, TCG_COND_NE,
665 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
666 TCG_COND_EQ, TCG_COND_EQ,
667 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
668 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
671 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
672 details required to generate a TCG comparison. */
673 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
676 enum cc_op old_cc_op = s->cc_op;
678 if (mask == 15 || mask == 0) {
679 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
682 c->g1 = c->g2 = true;
687 /* Find the TCG condition for the mask + cc op. */
693 cond = ltgt_cond[mask];
694 if (cond == TCG_COND_NEVER) {
697 account_inline_branch(s, old_cc_op);
700 case CC_OP_LTUGTU_32:
701 case CC_OP_LTUGTU_64:
702 cond = tcg_unsigned_cond(ltgt_cond[mask]);
703 if (cond == TCG_COND_NEVER) {
706 account_inline_branch(s, old_cc_op);
710 cond = nz_cond[mask];
711 if (cond == TCG_COND_NEVER) {
714 account_inline_branch(s, old_cc_op);
729 account_inline_branch(s, old_cc_op);
744 account_inline_branch(s, old_cc_op);
748 switch (mask & 0xa) {
749 case 8: /* src == 0 -> no one bit found */
752 case 2: /* src != 0 -> one bit found */
758 account_inline_branch(s, old_cc_op);
764 case 8 | 2: /* vr == 0 */
767 case 4 | 1: /* vr != 0 */
770 case 8 | 4: /* no carry -> vr >= src */
773 case 2 | 1: /* carry -> vr < src */
779 account_inline_branch(s, old_cc_op);
784 /* Note that CC=0 is impossible; treat it as dont-care. */
786 case 2: /* zero -> op1 == op2 */
789 case 4 | 1: /* !zero -> op1 != op2 */
792 case 4: /* borrow (!carry) -> op1 < op2 */
795 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
801 account_inline_branch(s, old_cc_op);
806 /* Calculate cc value. */
811 /* Jump based on CC. We'll load up the real cond below;
812 the assignment here merely avoids a compiler warning. */
813 account_noninline_branch(s, old_cc_op);
814 old_cc_op = CC_OP_STATIC;
815 cond = TCG_COND_NEVER;
819 /* Load up the arguments of the comparison. */
821 c->g1 = c->g2 = false;
825 c->u.s32.a = tcg_temp_new_i32();
826 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
827 c->u.s32.b = tcg_const_i32(0);
830 case CC_OP_LTUGTU_32:
833 c->u.s32.a = tcg_temp_new_i32();
834 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
835 c->u.s32.b = tcg_temp_new_i32();
836 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
843 c->u.s64.b = tcg_const_i64(0);
847 case CC_OP_LTUGTU_64:
851 c->g1 = c->g2 = true;
857 c->u.s64.a = tcg_temp_new_i64();
858 c->u.s64.b = tcg_const_i64(0);
859 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
864 c->u.s32.a = tcg_temp_new_i32();
865 c->u.s32.b = tcg_temp_new_i32();
866 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
867 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
868 tcg_gen_movi_i32(c->u.s32.b, 0);
870 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
877 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
878 c->u.s64.b = tcg_const_i64(0);
890 case 0x8 | 0x4 | 0x2: /* cc != 3 */
892 c->u.s32.b = tcg_const_i32(3);
894 case 0x8 | 0x4 | 0x1: /* cc != 2 */
896 c->u.s32.b = tcg_const_i32(2);
898 case 0x8 | 0x2 | 0x1: /* cc != 1 */
900 c->u.s32.b = tcg_const_i32(1);
902 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
905 c->u.s32.a = tcg_temp_new_i32();
906 c->u.s32.b = tcg_const_i32(0);
907 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
909 case 0x8 | 0x4: /* cc < 2 */
911 c->u.s32.b = tcg_const_i32(2);
913 case 0x8: /* cc == 0 */
915 c->u.s32.b = tcg_const_i32(0);
917 case 0x4 | 0x2 | 0x1: /* cc != 0 */
919 c->u.s32.b = tcg_const_i32(0);
921 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
924 c->u.s32.a = tcg_temp_new_i32();
925 c->u.s32.b = tcg_const_i32(0);
926 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
928 case 0x4: /* cc == 1 */
930 c->u.s32.b = tcg_const_i32(1);
932 case 0x2 | 0x1: /* cc > 1 */
934 c->u.s32.b = tcg_const_i32(1);
936 case 0x2: /* cc == 2 */
938 c->u.s32.b = tcg_const_i32(2);
940 case 0x1: /* cc == 3 */
942 c->u.s32.b = tcg_const_i32(3);
945 /* CC is masked by something else: (8 >> cc) & mask. */
948 c->u.s32.a = tcg_const_i32(8);
949 c->u.s32.b = tcg_const_i32(0);
950 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
951 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
962 static void free_compare(DisasCompare *c)
966 tcg_temp_free_i64(c->u.s64.a);
968 tcg_temp_free_i32(c->u.s32.a);
973 tcg_temp_free_i64(c->u.s64.b);
975 tcg_temp_free_i32(c->u.s32.b);
980 /* ====================================================================== */
981 /* Define the insn format enumeration. */
982 #define F0(N) FMT_##N,
983 #define F1(N, X1) F0(N)
984 #define F2(N, X1, X2) F0(N)
985 #define F3(N, X1, X2, X3) F0(N)
986 #define F4(N, X1, X2, X3, X4) F0(N)
987 #define F5(N, X1, X2, X3, X4, X5) F0(N)
990 #include "insn-format.def"
1000 /* Define a structure to hold the decoded fields. We'll store each inside
1001 an array indexed by an enum. In order to conserve memory, we'll arrange
1002 for fields that do not exist at the same time to overlap, thus the "C"
1003 for compact. For checking purposes there is an "O" for original index
1004 as well that will be applied to availability bitmaps. */
1006 enum DisasFieldIndexO {
1029 enum DisasFieldIndexC {
1060 struct DisasFields {
1064 unsigned presentC:16;
1065 unsigned int presentO;
1069 /* This is the way fields are to be accessed out of DisasFields. */
1070 #define have_field(S, F) have_field1((S), FLD_O_##F)
1071 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1073 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1075 return (f->presentO >> c) & 1;
1078 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1079 enum DisasFieldIndexC c)
1081 assert(have_field1(f, o));
1085 /* Describe the layout of each field in each format. */
1086 typedef struct DisasField {
1088 unsigned int size:8;
1089 unsigned int type:2;
1090 unsigned int indexC:6;
1091 enum DisasFieldIndexO indexO:8;
1094 typedef struct DisasFormatInfo {
1095 DisasField op[NUM_C_FIELD];
1098 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1099 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1100 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1101 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1102 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1103 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1104 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1105 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1106 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1107 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1108 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1109 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1110 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1111 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1113 #define F0(N) { { } },
1114 #define F1(N, X1) { { X1 } },
1115 #define F2(N, X1, X2) { { X1, X2 } },
1116 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1117 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1118 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1120 static const DisasFormatInfo format_info[] = {
1121 #include "insn-format.def"
1139 /* Generally, we'll extract operands into this structures, operate upon
1140 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1141 of routines below for more details. */
1143 bool g_out, g_out2, g_in1, g_in2;
1144 TCGv_i64 out, out2, in1, in2;
1148 /* Instructions can place constraints on their operands, raising specification
1149 exceptions if they are violated. To make this easy to automate, each "in1",
1150 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1151 of the following, or 0. To make this easy to document, we'll put the
1152 SPEC_<name> defines next to <name>. */
1154 #define SPEC_r1_even 1
1155 #define SPEC_r2_even 2
1156 #define SPEC_r3_even 4
1157 #define SPEC_r1_f128 8
1158 #define SPEC_r2_f128 16
1160 /* Return values from translate_one, indicating the state of the TB. */
1162 /* We are not using a goto_tb (for whatever reason), but have updated
1163 the PC (for whatever reason), so there's no need to do it again on
1165 #define DISAS_PC_UPDATED DISAS_TARGET_0
1167 /* We have emitted one or more goto_tb. No fixup required. */
1168 #define DISAS_GOTO_TB DISAS_TARGET_1
1170 /* We have updated the PC and CC values. */
1171 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1173 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1174 updated the PC for the next instruction to be executed. */
1175 #define DISAS_PC_STALE DISAS_TARGET_3
1177 /* We are exiting the TB to the main loop. */
1178 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1181 /* Instruction flags */
1182 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1183 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1184 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1185 #define IF_BFP 0x0008 /* binary floating point instruction */
1186 #define IF_DFP 0x0010 /* decimal floating point instruction */
1187 #define IF_PRIV 0x0020 /* privileged instruction */
1198 /* Pre-process arguments before HELP_OP. */
1199 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1200 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1201 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1204 * Post-process output after HELP_OP.
1205 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1207 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1208 void (*help_cout)(DisasContext *, DisasOps *);
1210 /* Implement the operation itself. */
1211 DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1216 /* ====================================================================== */
1217 /* Miscellaneous helpers, used by several operations. */
1219 static void help_l2_shift(DisasContext *s, DisasFields *f,
1220 DisasOps *o, int mask)
1222 int b2 = get_field(f, b2);
1223 int d2 = get_field(f, d2);
1226 o->in2 = tcg_const_i64(d2 & mask);
1228 o->in2 = get_address(s, 0, b2, d2);
1229 tcg_gen_andi_i64(o->in2, o->in2, mask);
1233 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1235 if (dest == s->pc_tmp) {
1236 per_branch(s, true);
1239 if (use_goto_tb(s, dest)) {
1241 per_breaking_event(s);
1243 tcg_gen_movi_i64(psw_addr, dest);
1244 tcg_gen_exit_tb(s->base.tb, 0);
1245 return DISAS_GOTO_TB;
1247 tcg_gen_movi_i64(psw_addr, dest);
1248 per_branch(s, false);
1249 return DISAS_PC_UPDATED;
1253 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1254 bool is_imm, int imm, TCGv_i64 cdest)
1257 uint64_t dest = s->base.pc_next + 2 * imm;
1260 /* Take care of the special cases first. */
1261 if (c->cond == TCG_COND_NEVER) {
1266 if (dest == s->pc_tmp) {
1267 /* Branch to next. */
1268 per_branch(s, true);
1272 if (c->cond == TCG_COND_ALWAYS) {
1273 ret = help_goto_direct(s, dest);
1278 /* E.g. bcr %r0 -> no branch. */
1282 if (c->cond == TCG_COND_ALWAYS) {
1283 tcg_gen_mov_i64(psw_addr, cdest);
1284 per_branch(s, false);
1285 ret = DISAS_PC_UPDATED;
1290 if (use_goto_tb(s, s->pc_tmp)) {
1291 if (is_imm && use_goto_tb(s, dest)) {
1292 /* Both exits can use goto_tb. */
1295 lab = gen_new_label();
1297 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1299 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1302 /* Branch not taken. */
1304 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1305 tcg_gen_exit_tb(s->base.tb, 0);
1309 per_breaking_event(s);
1311 tcg_gen_movi_i64(psw_addr, dest);
1312 tcg_gen_exit_tb(s->base.tb, 1);
1314 ret = DISAS_GOTO_TB;
1316 /* Fallthru can use goto_tb, but taken branch cannot. */
1317 /* Store taken branch destination before the brcond. This
1318 avoids having to allocate a new local temp to hold it.
1319 We'll overwrite this in the not taken case anyway. */
1321 tcg_gen_mov_i64(psw_addr, cdest);
1324 lab = gen_new_label();
1326 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1328 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1331 /* Branch not taken. */
1334 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1335 tcg_gen_exit_tb(s->base.tb, 0);
1339 tcg_gen_movi_i64(psw_addr, dest);
1341 per_breaking_event(s);
1342 ret = DISAS_PC_UPDATED;
1345 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1346 Most commonly we're single-stepping or some other condition that
1347 disables all use of goto_tb. Just update the PC and exit. */
1349 TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1351 cdest = tcg_const_i64(dest);
1355 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1357 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1359 TCGv_i32 t0 = tcg_temp_new_i32();
1360 TCGv_i64 t1 = tcg_temp_new_i64();
1361 TCGv_i64 z = tcg_const_i64(0);
1362 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1363 tcg_gen_extu_i32_i64(t1, t0);
1364 tcg_temp_free_i32(t0);
1365 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1366 per_branch_cond(s, TCG_COND_NE, t1, z);
1367 tcg_temp_free_i64(t1);
1368 tcg_temp_free_i64(z);
1372 tcg_temp_free_i64(cdest);
1374 tcg_temp_free_i64(next);
1376 ret = DISAS_PC_UPDATED;
1384 /* ====================================================================== */
1385 /* The operations. These perform the bulk of the work for any insn,
1386 usually after the operands have been loaded and output initialized. */
1388 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1391 z = tcg_const_i64(0);
1392 n = tcg_temp_new_i64();
1393 tcg_gen_neg_i64(n, o->in2);
1394 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1395 tcg_temp_free_i64(n);
1396 tcg_temp_free_i64(z);
1400 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1402 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1406 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1408 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1412 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1414 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1415 tcg_gen_mov_i64(o->out2, o->in2);
1419 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1421 tcg_gen_add_i64(o->out, o->in1, o->in2);
1425 static DisasJumpType op_addc(DisasContext *s, DisasOps *o)
1430 tcg_gen_add_i64(o->out, o->in1, o->in2);
1432 /* The carry flag is the msb of CC, therefore the branch mask that would
1433 create that comparison is 3. Feeding the generated comparison to
1434 setcond produces the carry flag that we desire. */
1435 disas_jcc(s, &cmp, 3);
1436 carry = tcg_temp_new_i64();
1438 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1440 TCGv_i32 t = tcg_temp_new_i32();
1441 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1442 tcg_gen_extu_i32_i64(carry, t);
1443 tcg_temp_free_i32(t);
1447 tcg_gen_add_i64(o->out, o->out, carry);
1448 tcg_temp_free_i64(carry);
1452 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1454 o->in1 = tcg_temp_new_i64();
1456 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1457 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1459 /* Perform the atomic addition in memory. */
1460 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1464 /* Recompute also for atomic case: needed for setting CC. */
1465 tcg_gen_add_i64(o->out, o->in1, o->in2);
1467 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1468 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1473 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1475 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1479 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1481 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1485 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1487 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1488 return_low128(o->out2);
1492 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1494 tcg_gen_and_i64(o->out, o->in1, o->in2);
1498 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1500 int shift = s->insn->data & 0xff;
1501 int size = s->insn->data >> 8;
1502 uint64_t mask = ((1ull << size) - 1) << shift;
1505 tcg_gen_shli_i64(o->in2, o->in2, shift);
1506 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1507 tcg_gen_and_i64(o->out, o->in1, o->in2);
1509 /* Produce the CC from only the bits manipulated. */
1510 tcg_gen_andi_i64(cc_dst, o->out, mask);
1511 set_cc_nz_u64(s, cc_dst);
1515 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1517 o->in1 = tcg_temp_new_i64();
1519 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1520 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1522 /* Perform the atomic operation in memory. */
1523 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1527 /* Recompute also for atomic case: needed for setting CC. */
1528 tcg_gen_and_i64(o->out, o->in1, o->in2);
1530 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1531 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1536 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1538 pc_to_link_info(o->out, s, s->pc_tmp);
1540 tcg_gen_mov_i64(psw_addr, o->in2);
1541 per_branch(s, false);
1542 return DISAS_PC_UPDATED;
1548 static void save_link_info(DisasContext *s, DisasOps *o)
1552 if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1553 pc_to_link_info(o->out, s, s->pc_tmp);
1557 tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1558 tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1559 t = tcg_temp_new_i64();
1560 tcg_gen_shri_i64(t, psw_mask, 16);
1561 tcg_gen_andi_i64(t, t, 0x0f000000);
1562 tcg_gen_or_i64(o->out, o->out, t);
1563 tcg_gen_extu_i32_i64(t, cc_op);
1564 tcg_gen_shli_i64(t, t, 28);
1565 tcg_gen_or_i64(o->out, o->out, t);
1566 tcg_temp_free_i64(t);
1569 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1571 save_link_info(s, o);
1573 tcg_gen_mov_i64(psw_addr, o->in2);
1574 per_branch(s, false);
1575 return DISAS_PC_UPDATED;
1581 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1583 pc_to_link_info(o->out, s, s->pc_tmp);
1584 return help_goto_direct(s, s->base.pc_next + 2 * get_field(s->fields, i2));
1587 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1589 int m1 = get_field(s->fields, m1);
1590 bool is_imm = have_field(s->fields, i2);
1591 int imm = is_imm ? get_field(s->fields, i2) : 0;
1594 /* BCR with R2 = 0 causes no branching */
1595 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1597 /* Perform serialization */
1598 /* FIXME: check for fast-BCR-serialization facility */
1599 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1602 /* Perform serialization */
1603 /* FIXME: perform checkpoint-synchronisation */
1604 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1609 disas_jcc(s, &c, m1);
1610 return help_branch(s, &c, is_imm, imm, o->in2);
1613 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1615 int r1 = get_field(s->fields, r1);
1616 bool is_imm = have_field(s->fields, i2);
1617 int imm = is_imm ? get_field(s->fields, i2) : 0;
1621 c.cond = TCG_COND_NE;
1626 t = tcg_temp_new_i64();
1627 tcg_gen_subi_i64(t, regs[r1], 1);
1628 store_reg32_i64(r1, t);
1629 c.u.s32.a = tcg_temp_new_i32();
1630 c.u.s32.b = tcg_const_i32(0);
1631 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1632 tcg_temp_free_i64(t);
1634 return help_branch(s, &c, is_imm, imm, o->in2);
1637 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1639 int r1 = get_field(s->fields, r1);
1640 int imm = get_field(s->fields, i2);
1644 c.cond = TCG_COND_NE;
1649 t = tcg_temp_new_i64();
1650 tcg_gen_shri_i64(t, regs[r1], 32);
1651 tcg_gen_subi_i64(t, t, 1);
1652 store_reg32h_i64(r1, t);
1653 c.u.s32.a = tcg_temp_new_i32();
1654 c.u.s32.b = tcg_const_i32(0);
1655 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1656 tcg_temp_free_i64(t);
1658 return help_branch(s, &c, 1, imm, o->in2);
1661 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1663 int r1 = get_field(s->fields, r1);
1664 bool is_imm = have_field(s->fields, i2);
1665 int imm = is_imm ? get_field(s->fields, i2) : 0;
1668 c.cond = TCG_COND_NE;
1673 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1674 c.u.s64.a = regs[r1];
1675 c.u.s64.b = tcg_const_i64(0);
1677 return help_branch(s, &c, is_imm, imm, o->in2);
1680 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1682 int r1 = get_field(s->fields, r1);
1683 int r3 = get_field(s->fields, r3);
1684 bool is_imm = have_field(s->fields, i2);
1685 int imm = is_imm ? get_field(s->fields, i2) : 0;
1689 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1694 t = tcg_temp_new_i64();
1695 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1696 c.u.s32.a = tcg_temp_new_i32();
1697 c.u.s32.b = tcg_temp_new_i32();
1698 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1699 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1700 store_reg32_i64(r1, t);
1701 tcg_temp_free_i64(t);
1703 return help_branch(s, &c, is_imm, imm, o->in2);
1706 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1708 int r1 = get_field(s->fields, r1);
1709 int r3 = get_field(s->fields, r3);
1710 bool is_imm = have_field(s->fields, i2);
1711 int imm = is_imm ? get_field(s->fields, i2) : 0;
1714 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1717 if (r1 == (r3 | 1)) {
1718 c.u.s64.b = load_reg(r3 | 1);
1721 c.u.s64.b = regs[r3 | 1];
1725 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1726 c.u.s64.a = regs[r1];
1729 return help_branch(s, &c, is_imm, imm, o->in2);
1732 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1734 int imm, m3 = get_field(s->fields, m3);
1738 c.cond = ltgt_cond[m3];
1739 if (s->insn->data) {
1740 c.cond = tcg_unsigned_cond(c.cond);
1742 c.is_64 = c.g1 = c.g2 = true;
1746 is_imm = have_field(s->fields, i4);
1748 imm = get_field(s->fields, i4);
1751 o->out = get_address(s, 0, get_field(s->fields, b4),
1752 get_field(s->fields, d4));
1755 return help_branch(s, &c, is_imm, imm, o->out);
1758 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1760 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1765 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1767 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1772 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1774 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1779 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1782 const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1783 uint8_t m3 = get_field(s->fields, m3);
1784 uint8_t m4 = get_field(s->fields, m4);
1786 /* m3 field was introduced with FPE */
1787 if (!fpe && m3_with_fpe) {
1790 /* m4 field was introduced with FPE */
1791 if (!fpe && m4_with_fpe) {
1795 /* Check for valid rounding modes. Mode 3 was introduced later. */
1796 if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1797 gen_program_exception(s, PGM_SPECIFICATION);
1801 return tcg_const_i32(deposit32(m3, 4, 4, m4));
1804 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1806 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1809 return DISAS_NORETURN;
1811 gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1812 tcg_temp_free_i32(m34);
1813 gen_set_cc_nz_f32(s, o->in2);
1817 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1819 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1822 return DISAS_NORETURN;
1824 gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1825 tcg_temp_free_i32(m34);
1826 gen_set_cc_nz_f64(s, o->in2);
1830 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1832 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1835 return DISAS_NORETURN;
1837 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34);
1838 tcg_temp_free_i32(m34);
1839 gen_set_cc_nz_f128(s, o->in1, o->in2);
1843 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1845 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1848 return DISAS_NORETURN;
1850 gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1851 tcg_temp_free_i32(m34);
1852 gen_set_cc_nz_f32(s, o->in2);
1856 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1858 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1861 return DISAS_NORETURN;
1863 gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1864 tcg_temp_free_i32(m34);
1865 gen_set_cc_nz_f64(s, o->in2);
1869 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1871 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1874 return DISAS_NORETURN;
1876 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34);
1877 tcg_temp_free_i32(m34);
1878 gen_set_cc_nz_f128(s, o->in1, o->in2);
1882 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1884 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1887 return DISAS_NORETURN;
1889 gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1890 tcg_temp_free_i32(m34);
1891 gen_set_cc_nz_f32(s, o->in2);
1895 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1897 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1900 return DISAS_NORETURN;
1902 gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1903 tcg_temp_free_i32(m34);
1904 gen_set_cc_nz_f64(s, o->in2);
1908 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1910 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1913 return DISAS_NORETURN;
1915 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34);
1916 tcg_temp_free_i32(m34);
1917 gen_set_cc_nz_f128(s, o->in1, o->in2);
1921 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1923 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1926 return DISAS_NORETURN;
1928 gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1929 tcg_temp_free_i32(m34);
1930 gen_set_cc_nz_f32(s, o->in2);
1934 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1936 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1939 return DISAS_NORETURN;
1941 gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1942 tcg_temp_free_i32(m34);
1943 gen_set_cc_nz_f64(s, o->in2);
1947 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1949 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1952 return DISAS_NORETURN;
1954 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34);
1955 tcg_temp_free_i32(m34);
1956 gen_set_cc_nz_f128(s, o->in1, o->in2);
1960 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1962 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1965 return DISAS_NORETURN;
1967 gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1968 tcg_temp_free_i32(m34);
1972 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1974 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1977 return DISAS_NORETURN;
1979 gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
1980 tcg_temp_free_i32(m34);
1984 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1986 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1989 return DISAS_NORETURN;
1991 gen_helper_cxgb(o->out, cpu_env, o->in2, m34);
1992 tcg_temp_free_i32(m34);
1993 return_low128(o->out2);
1997 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1999 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2002 return DISAS_NORETURN;
2004 gen_helper_celgb(o->out, cpu_env, o->in2, m34);
2005 tcg_temp_free_i32(m34);
2009 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
2011 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2014 return DISAS_NORETURN;
2016 gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
2017 tcg_temp_free_i32(m34);
2021 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
2023 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2026 return DISAS_NORETURN;
2028 gen_helper_cxlgb(o->out, cpu_env, o->in2, m34);
2029 tcg_temp_free_i32(m34);
2030 return_low128(o->out2);
2034 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
2036 int r2 = get_field(s->fields, r2);
2037 TCGv_i64 len = tcg_temp_new_i64();
2039 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2041 return_low128(o->out);
2043 tcg_gen_add_i64(regs[r2], regs[r2], len);
2044 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2045 tcg_temp_free_i64(len);
2050 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2052 int l = get_field(s->fields, l1);
2057 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2058 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2061 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2062 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2065 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2066 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2069 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2070 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2073 vl = tcg_const_i32(l);
2074 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2075 tcg_temp_free_i32(vl);
2079 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2083 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2085 int r1 = get_field(s->fields, r1);
2086 int r2 = get_field(s->fields, r2);
2089 /* r1 and r2 must be even. */
2090 if (r1 & 1 || r2 & 1) {
2091 gen_program_exception(s, PGM_SPECIFICATION);
2092 return DISAS_NORETURN;
2095 t1 = tcg_const_i32(r1);
2096 t2 = tcg_const_i32(r2);
2097 gen_helper_clcl(cc_op, cpu_env, t1, t2);
2098 tcg_temp_free_i32(t1);
2099 tcg_temp_free_i32(t2);
2104 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2106 int r1 = get_field(s->fields, r1);
2107 int r3 = get_field(s->fields, r3);
2110 /* r1 and r3 must be even. */
2111 if (r1 & 1 || r3 & 1) {
2112 gen_program_exception(s, PGM_SPECIFICATION);
2113 return DISAS_NORETURN;
2116 t1 = tcg_const_i32(r1);
2117 t3 = tcg_const_i32(r3);
2118 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2119 tcg_temp_free_i32(t1);
2120 tcg_temp_free_i32(t3);
2125 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2127 int r1 = get_field(s->fields, r1);
2128 int r3 = get_field(s->fields, r3);
2131 /* r1 and r3 must be even. */
2132 if (r1 & 1 || r3 & 1) {
2133 gen_program_exception(s, PGM_SPECIFICATION);
2134 return DISAS_NORETURN;
2137 t1 = tcg_const_i32(r1);
2138 t3 = tcg_const_i32(r3);
2139 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2140 tcg_temp_free_i32(t1);
2141 tcg_temp_free_i32(t3);
2146 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2148 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2149 TCGv_i32 t1 = tcg_temp_new_i32();
2150 tcg_gen_extrl_i64_i32(t1, o->in1);
2151 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2153 tcg_temp_free_i32(t1);
2154 tcg_temp_free_i32(m3);
2158 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2160 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2162 return_low128(o->in2);
2166 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2168 TCGv_i64 t = tcg_temp_new_i64();
2169 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2170 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2171 tcg_gen_or_i64(o->out, o->out, t);
2172 tcg_temp_free_i64(t);
2176 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2178 int d2 = get_field(s->fields, d2);
2179 int b2 = get_field(s->fields, b2);
2182 /* Note that in1 = R3 (new value) and
2183 in2 = (zero-extended) R1 (expected value). */
2185 addr = get_address(s, 0, b2, d2);
2186 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2187 get_mem_index(s), s->insn->data | MO_ALIGN);
2188 tcg_temp_free_i64(addr);
2190 /* Are the memory and expected values (un)equal? Note that this setcond
2191 produces the output CC value, thus the NE sense of the test. */
2192 cc = tcg_temp_new_i64();
2193 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2194 tcg_gen_extrl_i64_i32(cc_op, cc);
2195 tcg_temp_free_i64(cc);
2201 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2203 int r1 = get_field(s->fields, r1);
2204 int r3 = get_field(s->fields, r3);
2205 int d2 = get_field(s->fields, d2);
2206 int b2 = get_field(s->fields, b2);
2207 DisasJumpType ret = DISAS_NEXT;
2209 TCGv_i32 t_r1, t_r3;
2211 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2212 addr = get_address(s, 0, b2, d2);
2213 t_r1 = tcg_const_i32(r1);
2214 t_r3 = tcg_const_i32(r3);
2215 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2216 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2217 } else if (HAVE_CMPXCHG128) {
2218 gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2220 gen_helper_exit_atomic(cpu_env);
2221 ret = DISAS_NORETURN;
2223 tcg_temp_free_i64(addr);
2224 tcg_temp_free_i32(t_r1);
2225 tcg_temp_free_i32(t_r3);
2231 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2233 int r3 = get_field(s->fields, r3);
2234 TCGv_i32 t_r3 = tcg_const_i32(r3);
2236 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2237 gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2239 gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2241 tcg_temp_free_i32(t_r3);
2247 #ifndef CONFIG_USER_ONLY
2248 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2250 TCGMemOp mop = s->insn->data;
2251 TCGv_i64 addr, old, cc;
2252 TCGLabel *lab = gen_new_label();
2254 /* Note that in1 = R1 (zero-extended expected value),
2255 out = R1 (original reg), out2 = R1+1 (new value). */
2257 addr = tcg_temp_new_i64();
2258 old = tcg_temp_new_i64();
2259 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2260 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2261 get_mem_index(s), mop | MO_ALIGN);
2262 tcg_temp_free_i64(addr);
2264 /* Are the memory and expected values (un)equal? */
2265 cc = tcg_temp_new_i64();
2266 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2267 tcg_gen_extrl_i64_i32(cc_op, cc);
2269 /* Write back the output now, so that it happens before the
2270 following branch, so that we don't need local temps. */
2271 if ((mop & MO_SIZE) == MO_32) {
2272 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2274 tcg_gen_mov_i64(o->out, old);
2276 tcg_temp_free_i64(old);
2278 /* If the comparison was equal, and the LSB of R2 was set,
2279 then we need to flush the TLB (for all cpus). */
2280 tcg_gen_xori_i64(cc, cc, 1);
2281 tcg_gen_and_i64(cc, cc, o->in2);
2282 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2283 tcg_temp_free_i64(cc);
2285 gen_helper_purge(cpu_env);
2292 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2294 TCGv_i64 t1 = tcg_temp_new_i64();
2295 TCGv_i32 t2 = tcg_temp_new_i32();
2296 tcg_gen_extrl_i64_i32(t2, o->in1);
2297 gen_helper_cvd(t1, t2);
2298 tcg_temp_free_i32(t2);
2299 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2300 tcg_temp_free_i64(t1);
2304 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2306 int m3 = get_field(s->fields, m3);
2307 TCGLabel *lab = gen_new_label();
2310 c = tcg_invert_cond(ltgt_cond[m3]);
2311 if (s->insn->data) {
2312 c = tcg_unsigned_cond(c);
2314 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2323 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2325 int m3 = get_field(s->fields, m3);
2326 int r1 = get_field(s->fields, r1);
2327 int r2 = get_field(s->fields, r2);
2328 TCGv_i32 tr1, tr2, chk;
2330 /* R1 and R2 must both be even. */
2331 if ((r1 | r2) & 1) {
2332 gen_program_exception(s, PGM_SPECIFICATION);
2333 return DISAS_NORETURN;
2335 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2339 tr1 = tcg_const_i32(r1);
2340 tr2 = tcg_const_i32(r2);
2341 chk = tcg_const_i32(m3);
2343 switch (s->insn->data) {
2345 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2348 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2351 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2354 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2357 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2360 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2363 g_assert_not_reached();
2366 tcg_temp_free_i32(tr1);
2367 tcg_temp_free_i32(tr2);
2368 tcg_temp_free_i32(chk);
2373 #ifndef CONFIG_USER_ONLY
2374 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2376 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2377 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2378 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2380 gen_helper_diag(cpu_env, r1, r3, func_code);
2382 tcg_temp_free_i32(func_code);
2383 tcg_temp_free_i32(r3);
2384 tcg_temp_free_i32(r1);
2389 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2391 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2392 return_low128(o->out);
2396 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2398 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2399 return_low128(o->out);
2403 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2405 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2406 return_low128(o->out);
2410 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2412 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2413 return_low128(o->out);
2417 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2419 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2423 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2425 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2429 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2431 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2432 return_low128(o->out2);
2436 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2438 int r2 = get_field(s->fields, r2);
2439 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2443 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2445 /* No cache information provided. */
2446 tcg_gen_movi_i64(o->out, -1);
2450 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2452 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2456 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2458 int r1 = get_field(s->fields, r1);
2459 int r2 = get_field(s->fields, r2);
2460 TCGv_i64 t = tcg_temp_new_i64();
2462 /* Note the "subsequently" in the PoO, which implies a defined result
2463 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2464 tcg_gen_shri_i64(t, psw_mask, 32);
2465 store_reg32_i64(r1, t);
2467 store_reg32_i64(r2, psw_mask);
2470 tcg_temp_free_i64(t);
2474 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2476 int r1 = get_field(s->fields, r1);
2480 /* Nested EXECUTE is not allowed. */
2481 if (unlikely(s->ex_value)) {
2482 gen_program_exception(s, PGM_EXECUTE);
2483 return DISAS_NORETURN;
2490 v1 = tcg_const_i64(0);
2495 ilen = tcg_const_i32(s->ilen);
2496 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2497 tcg_temp_free_i32(ilen);
2500 tcg_temp_free_i64(v1);
2503 return DISAS_PC_CC_UPDATED;
2506 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2508 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2511 return DISAS_NORETURN;
2513 gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2514 tcg_temp_free_i32(m34);
2518 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2520 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2523 return DISAS_NORETURN;
2525 gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2526 tcg_temp_free_i32(m34);
2530 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2532 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2535 return DISAS_NORETURN;
2537 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34);
2538 return_low128(o->out2);
2539 tcg_temp_free_i32(m34);
2543 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2545 /* We'll use the original input for cc computation, since we get to
2546 compare that against 0, which ought to be better than comparing
2547 the real output against 64. It also lets cc_dst be a convenient
2548 temporary during our computation. */
2549 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2551 /* R1 = IN ? CLZ(IN) : 64. */
2552 tcg_gen_clzi_i64(o->out, o->in2, 64);
2554 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2555 value by 64, which is undefined. But since the shift is 64 iff the
2556 input is zero, we still get the correct result after and'ing. */
2557 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2558 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2559 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2563 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2565 int m3 = get_field(s->fields, m3);
2566 int pos, len, base = s->insn->data;
2567 TCGv_i64 tmp = tcg_temp_new_i64();
2572 /* Effectively a 32-bit load. */
2573 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2580 /* Effectively a 16-bit load. */
2581 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2589 /* Effectively an 8-bit load. */
2590 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2595 pos = base + ctz32(m3) * 8;
2596 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2597 ccm = ((1ull << len) - 1) << pos;
2601 /* This is going to be a sequence of loads and inserts. */
2602 pos = base + 32 - 8;
2606 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2607 tcg_gen_addi_i64(o->in2, o->in2, 1);
2608 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2611 m3 = (m3 << 1) & 0xf;
2617 tcg_gen_movi_i64(tmp, ccm);
2618 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2619 tcg_temp_free_i64(tmp);
2623 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2625 int shift = s->insn->data & 0xff;
2626 int size = s->insn->data >> 8;
2627 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2631 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2636 t1 = tcg_temp_new_i64();
2637 tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2638 t2 = tcg_temp_new_i64();
2639 tcg_gen_extu_i32_i64(t2, cc_op);
2640 tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2641 tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2642 tcg_temp_free_i64(t1);
2643 tcg_temp_free_i64(t2);
2647 #ifndef CONFIG_USER_ONLY
2648 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2652 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2653 m4 = tcg_const_i32(get_field(s->fields, m4));
2655 m4 = tcg_const_i32(0);
2657 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2658 tcg_temp_free_i32(m4);
2662 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2666 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2667 m4 = tcg_const_i32(get_field(s->fields, m4));
2669 m4 = tcg_const_i32(0);
2671 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2672 tcg_temp_free_i32(m4);
2676 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2678 gen_helper_iske(o->out, cpu_env, o->in2);
2683 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2685 int r1 = have_field(s->fields, r1) ? get_field(s->fields, r1) : 0;
2686 int r2 = have_field(s->fields, r2) ? get_field(s->fields, r2) : 0;
2687 int r3 = have_field(s->fields, r3) ? get_field(s->fields, r3) : 0;
2688 TCGv_i32 t_r1, t_r2, t_r3, type;
2690 switch (s->insn->data) {
2691 case S390_FEAT_TYPE_KMCTR:
2692 if (r3 & 1 || !r3) {
2693 gen_program_exception(s, PGM_SPECIFICATION);
2694 return DISAS_NORETURN;
2697 case S390_FEAT_TYPE_PPNO:
2698 case S390_FEAT_TYPE_KMF:
2699 case S390_FEAT_TYPE_KMC:
2700 case S390_FEAT_TYPE_KMO:
2701 case S390_FEAT_TYPE_KM:
2702 if (r1 & 1 || !r1) {
2703 gen_program_exception(s, PGM_SPECIFICATION);
2704 return DISAS_NORETURN;
2707 case S390_FEAT_TYPE_KMAC:
2708 case S390_FEAT_TYPE_KIMD:
2709 case S390_FEAT_TYPE_KLMD:
2710 if (r2 & 1 || !r2) {
2711 gen_program_exception(s, PGM_SPECIFICATION);
2712 return DISAS_NORETURN;
2715 case S390_FEAT_TYPE_PCKMO:
2716 case S390_FEAT_TYPE_PCC:
2719 g_assert_not_reached();
2722 t_r1 = tcg_const_i32(r1);
2723 t_r2 = tcg_const_i32(r2);
2724 t_r3 = tcg_const_i32(r3);
2725 type = tcg_const_i32(s->insn->data);
2726 gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2728 tcg_temp_free_i32(t_r1);
2729 tcg_temp_free_i32(t_r2);
2730 tcg_temp_free_i32(t_r3);
2731 tcg_temp_free_i32(type);
2735 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2737 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2742 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2744 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2749 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2751 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2756 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2758 /* The real output is indeed the original value in memory;
2759 recompute the addition for the computation of CC. */
2760 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2761 s->insn->data | MO_ALIGN);
2762 /* However, we need to recompute the addition for setting CC. */
2763 tcg_gen_add_i64(o->out, o->in1, o->in2);
2767 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2769 /* The real output is indeed the original value in memory;
2770 recompute the addition for the computation of CC. */
2771 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2772 s->insn->data | MO_ALIGN);
2773 /* However, we need to recompute the operation for setting CC. */
2774 tcg_gen_and_i64(o->out, o->in1, o->in2);
2778 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2780 /* The real output is indeed the original value in memory;
2781 recompute the addition for the computation of CC. */
2782 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2783 s->insn->data | MO_ALIGN);
2784 /* However, we need to recompute the operation for setting CC. */
2785 tcg_gen_or_i64(o->out, o->in1, o->in2);
2789 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2791 /* The real output is indeed the original value in memory;
2792 recompute the addition for the computation of CC. */
2793 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2794 s->insn->data | MO_ALIGN);
2795 /* However, we need to recompute the operation for setting CC. */
2796 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2800 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2802 gen_helper_ldeb(o->out, cpu_env, o->in2);
2806 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2808 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2811 return DISAS_NORETURN;
2813 gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2814 tcg_temp_free_i32(m34);
2818 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2820 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2823 return DISAS_NORETURN;
2825 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34);
2826 tcg_temp_free_i32(m34);
2830 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2832 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2835 return DISAS_NORETURN;
2837 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34);
2838 tcg_temp_free_i32(m34);
2842 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2844 gen_helper_lxdb(o->out, cpu_env, o->in2);
2845 return_low128(o->out2);
2849 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2851 gen_helper_lxeb(o->out, cpu_env, o->in2);
2852 return_low128(o->out2);
2856 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2858 tcg_gen_shli_i64(o->out, o->in2, 32);
2862 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2864 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2868 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2870 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2874 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2876 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2880 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2882 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2886 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2888 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2892 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2894 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2898 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2900 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2904 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2906 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2910 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2912 TCGLabel *lab = gen_new_label();
2913 store_reg32_i64(get_field(s->fields, r1), o->in2);
2914 /* The value is stored even in case of trap. */
2915 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2921 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2923 TCGLabel *lab = gen_new_label();
2924 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2925 /* The value is stored even in case of trap. */
2926 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2932 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2934 TCGLabel *lab = gen_new_label();
2935 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2936 /* The value is stored even in case of trap. */
2937 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2943 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2945 TCGLabel *lab = gen_new_label();
2946 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2947 /* The value is stored even in case of trap. */
2948 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2954 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2956 TCGLabel *lab = gen_new_label();
2957 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2958 /* The value is stored even in case of trap. */
2959 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2965 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2969 disas_jcc(s, &c, get_field(s->fields, m3));
2972 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2976 TCGv_i32 t32 = tcg_temp_new_i32();
2979 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2982 t = tcg_temp_new_i64();
2983 tcg_gen_extu_i32_i64(t, t32);
2984 tcg_temp_free_i32(t32);
2986 z = tcg_const_i64(0);
2987 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2988 tcg_temp_free_i64(t);
2989 tcg_temp_free_i64(z);
2995 #ifndef CONFIG_USER_ONLY
2996 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2998 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2999 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3000 gen_helper_lctl(cpu_env, r1, o->in2, r3);
3001 tcg_temp_free_i32(r1);
3002 tcg_temp_free_i32(r3);
3003 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3004 return DISAS_PC_STALE_NOCHAIN;
3007 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
3009 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3010 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3011 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
3012 tcg_temp_free_i32(r1);
3013 tcg_temp_free_i32(r3);
3014 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3015 return DISAS_PC_STALE_NOCHAIN;
3018 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
3020 gen_helper_lra(o->out, cpu_env, o->in2);
3025 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
3027 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
3031 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
3035 per_breaking_event(s);
3037 t1 = tcg_temp_new_i64();
3038 t2 = tcg_temp_new_i64();
3039 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3040 MO_TEUL | MO_ALIGN_8);
3041 tcg_gen_addi_i64(o->in2, o->in2, 4);
3042 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
3043 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
3044 tcg_gen_shli_i64(t1, t1, 32);
3045 gen_helper_load_psw(cpu_env, t1, t2);
3046 tcg_temp_free_i64(t1);
3047 tcg_temp_free_i64(t2);
3048 return DISAS_NORETURN;
3051 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3055 per_breaking_event(s);
3057 t1 = tcg_temp_new_i64();
3058 t2 = tcg_temp_new_i64();
3059 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3060 MO_TEQ | MO_ALIGN_8);
3061 tcg_gen_addi_i64(o->in2, o->in2, 8);
3062 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
3063 gen_helper_load_psw(cpu_env, t1, t2);
3064 tcg_temp_free_i64(t1);
3065 tcg_temp_free_i64(t2);
3066 return DISAS_NORETURN;
3070 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3072 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3073 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3074 gen_helper_lam(cpu_env, r1, o->in2, r3);
3075 tcg_temp_free_i32(r1);
3076 tcg_temp_free_i32(r3);
3080 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3082 int r1 = get_field(s->fields, r1);
3083 int r3 = get_field(s->fields, r3);
3086 /* Only one register to read. */
3087 t1 = tcg_temp_new_i64();
3088 if (unlikely(r1 == r3)) {
3089 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3090 store_reg32_i64(r1, t1);
3095 /* First load the values of the first and last registers to trigger
3096 possible page faults. */
3097 t2 = tcg_temp_new_i64();
3098 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3099 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3100 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3101 store_reg32_i64(r1, t1);
3102 store_reg32_i64(r3, t2);
3104 /* Only two registers to read. */
3105 if (((r1 + 1) & 15) == r3) {
3111 /* Then load the remaining registers. Page fault can't occur. */
3113 tcg_gen_movi_i64(t2, 4);
3116 tcg_gen_add_i64(o->in2, o->in2, t2);
3117 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3118 store_reg32_i64(r1, t1);
3126 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3128 int r1 = get_field(s->fields, r1);
3129 int r3 = get_field(s->fields, r3);
3132 /* Only one register to read. */
3133 t1 = tcg_temp_new_i64();
3134 if (unlikely(r1 == r3)) {
3135 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3136 store_reg32h_i64(r1, t1);
3141 /* First load the values of the first and last registers to trigger
3142 possible page faults. */
3143 t2 = tcg_temp_new_i64();
3144 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3145 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3146 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3147 store_reg32h_i64(r1, t1);
3148 store_reg32h_i64(r3, t2);
3150 /* Only two registers to read. */
3151 if (((r1 + 1) & 15) == r3) {
3157 /* Then load the remaining registers. Page fault can't occur. */
3159 tcg_gen_movi_i64(t2, 4);
3162 tcg_gen_add_i64(o->in2, o->in2, t2);
3163 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3164 store_reg32h_i64(r1, t1);
3172 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3174 int r1 = get_field(s->fields, r1);
3175 int r3 = get_field(s->fields, r3);
3178 /* Only one register to read. */
3179 if (unlikely(r1 == r3)) {
3180 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3184 /* First load the values of the first and last registers to trigger
3185 possible page faults. */
3186 t1 = tcg_temp_new_i64();
3187 t2 = tcg_temp_new_i64();
3188 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3189 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3190 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3191 tcg_gen_mov_i64(regs[r1], t1);
3194 /* Only two registers to read. */
3195 if (((r1 + 1) & 15) == r3) {
3200 /* Then load the remaining registers. Page fault can't occur. */
3202 tcg_gen_movi_i64(t1, 8);
3205 tcg_gen_add_i64(o->in2, o->in2, t1);
3206 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3213 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3216 TCGMemOp mop = s->insn->data;
3218 /* In a parallel context, stop the world and single step. */
3219 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3222 gen_exception(EXCP_ATOMIC);
3223 return DISAS_NORETURN;
3226 /* In a serial context, perform the two loads ... */
3227 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
3228 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3229 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3230 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3231 tcg_temp_free_i64(a1);
3232 tcg_temp_free_i64(a2);
3234 /* ... and indicate that we performed them while interlocked. */
3235 gen_op_movi_cc(s, 0);
3239 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3241 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3242 gen_helper_lpq(o->out, cpu_env, o->in2);
3243 } else if (HAVE_ATOMIC128) {
3244 gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3246 gen_helper_exit_atomic(cpu_env);
3247 return DISAS_NORETURN;
3249 return_low128(o->out2);
3253 #ifndef CONFIG_USER_ONLY
3254 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3256 gen_helper_lura(o->out, cpu_env, o->in2);
3260 static DisasJumpType op_lurag(DisasContext *s, DisasOps *o)
3262 gen_helper_lurag(o->out, cpu_env, o->in2);
3267 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3269 tcg_gen_andi_i64(o->out, o->in2, -256);
3273 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3275 const int64_t block_size = (1ull << (get_field(s->fields, m3) + 6));
3277 if (get_field(s->fields, m3) > 6) {
3278 gen_program_exception(s, PGM_SPECIFICATION);
3279 return DISAS_NORETURN;
3282 tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3283 tcg_gen_neg_i64(o->addr1, o->addr1);
3284 tcg_gen_movi_i64(o->out, 16);
3285 tcg_gen_umin_i64(o->out, o->out, o->addr1);
3286 gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3290 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3293 o->g_out = o->g_in2;
3299 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3301 int b2 = get_field(s->fields, b2);
3302 TCGv ar1 = tcg_temp_new_i64();
3305 o->g_out = o->g_in2;
3309 switch (s->base.tb->flags & FLAG_MASK_ASC) {
3310 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3311 tcg_gen_movi_i64(ar1, 0);
3313 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3314 tcg_gen_movi_i64(ar1, 1);
3316 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3318 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3320 tcg_gen_movi_i64(ar1, 0);
3323 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3324 tcg_gen_movi_i64(ar1, 2);
3328 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3329 tcg_temp_free_i64(ar1);
3334 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3338 o->g_out = o->g_in1;
3339 o->g_out2 = o->g_in2;
3342 o->g_in1 = o->g_in2 = false;
3346 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3348 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3349 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3350 tcg_temp_free_i32(l);
3354 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3356 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3357 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3358 tcg_temp_free_i32(l);
3362 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3364 int r1 = get_field(s->fields, r1);
3365 int r2 = get_field(s->fields, r2);
3368 /* r1 and r2 must be even. */
3369 if (r1 & 1 || r2 & 1) {
3370 gen_program_exception(s, PGM_SPECIFICATION);
3371 return DISAS_NORETURN;
3374 t1 = tcg_const_i32(r1);
3375 t2 = tcg_const_i32(r2);
3376 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3377 tcg_temp_free_i32(t1);
3378 tcg_temp_free_i32(t2);
3383 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3385 int r1 = get_field(s->fields, r1);
3386 int r3 = get_field(s->fields, r3);
3389 /* r1 and r3 must be even. */
3390 if (r1 & 1 || r3 & 1) {
3391 gen_program_exception(s, PGM_SPECIFICATION);
3392 return DISAS_NORETURN;
3395 t1 = tcg_const_i32(r1);
3396 t3 = tcg_const_i32(r3);
3397 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3398 tcg_temp_free_i32(t1);
3399 tcg_temp_free_i32(t3);
3404 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3406 int r1 = get_field(s->fields, r1);
3407 int r3 = get_field(s->fields, r3);
3410 /* r1 and r3 must be even. */
3411 if (r1 & 1 || r3 & 1) {
3412 gen_program_exception(s, PGM_SPECIFICATION);
3413 return DISAS_NORETURN;
3416 t1 = tcg_const_i32(r1);
3417 t3 = tcg_const_i32(r3);
3418 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3419 tcg_temp_free_i32(t1);
3420 tcg_temp_free_i32(t3);
3425 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3427 int r3 = get_field(s->fields, r3);
3428 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3433 #ifndef CONFIG_USER_ONLY
3434 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3436 int r1 = get_field(s->fields, l1);
3437 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3442 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3444 int r1 = get_field(s->fields, l1);
3445 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3451 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3453 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3454 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3455 tcg_temp_free_i32(l);
3459 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3461 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3462 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3463 tcg_temp_free_i32(l);
3467 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3469 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3474 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3476 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3478 return_low128(o->in2);
3482 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3484 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3485 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3486 tcg_temp_free_i32(l);
3490 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3492 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3496 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3498 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3502 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3504 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3508 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3510 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3514 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3516 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3520 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3522 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3523 return_low128(o->out2);
3527 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3529 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3530 return_low128(o->out2);
3534 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3536 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3537 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3538 tcg_temp_free_i64(r3);
3542 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3544 TCGv_i64 r3 = load_freg(get_field(s->fields, r3));
3545 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3546 tcg_temp_free_i64(r3);
3550 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3552 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3553 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3554 tcg_temp_free_i64(r3);
3558 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3560 TCGv_i64 r3 = load_freg(get_field(s->fields, r3));
3561 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3562 tcg_temp_free_i64(r3);
3566 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3569 z = tcg_const_i64(0);
3570 n = tcg_temp_new_i64();
3571 tcg_gen_neg_i64(n, o->in2);
3572 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3573 tcg_temp_free_i64(n);
3574 tcg_temp_free_i64(z);
3578 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3580 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3584 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3586 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3590 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3592 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3593 tcg_gen_mov_i64(o->out2, o->in2);
3597 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3599 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3600 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3601 tcg_temp_free_i32(l);
3606 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3608 tcg_gen_neg_i64(o->out, o->in2);
3612 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3614 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3618 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3620 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3624 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3626 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3627 tcg_gen_mov_i64(o->out2, o->in2);
3631 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3633 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3634 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3635 tcg_temp_free_i32(l);
3640 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3642 tcg_gen_or_i64(o->out, o->in1, o->in2);
3646 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3648 int shift = s->insn->data & 0xff;
3649 int size = s->insn->data >> 8;
3650 uint64_t mask = ((1ull << size) - 1) << shift;
3653 tcg_gen_shli_i64(o->in2, o->in2, shift);
3654 tcg_gen_or_i64(o->out, o->in1, o->in2);
3656 /* Produce the CC from only the bits manipulated. */
3657 tcg_gen_andi_i64(cc_dst, o->out, mask);
3658 set_cc_nz_u64(s, cc_dst);
3662 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3664 o->in1 = tcg_temp_new_i64();
3666 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3667 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3669 /* Perform the atomic operation in memory. */
3670 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3674 /* Recompute also for atomic case: needed for setting CC. */
3675 tcg_gen_or_i64(o->out, o->in1, o->in2);
3677 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3678 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3683 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3685 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3686 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3687 tcg_temp_free_i32(l);
3691 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3693 int l2 = get_field(s->fields, l2) + 1;
3696 /* The length must not exceed 32 bytes. */
3698 gen_program_exception(s, PGM_SPECIFICATION);
3699 return DISAS_NORETURN;
3701 l = tcg_const_i32(l2);
3702 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3703 tcg_temp_free_i32(l);
3707 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3709 int l2 = get_field(s->fields, l2) + 1;
3712 /* The length must be even and should not exceed 64 bytes. */
3713 if ((l2 & 1) || (l2 > 64)) {
3714 gen_program_exception(s, PGM_SPECIFICATION);
3715 return DISAS_NORETURN;
3717 l = tcg_const_i32(l2);
3718 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3719 tcg_temp_free_i32(l);
3723 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3725 gen_helper_popcnt(o->out, o->in2);
3729 #ifndef CONFIG_USER_ONLY
3730 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3732 gen_helper_ptlb(cpu_env);
3737 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3739 int i3 = get_field(s->fields, i3);
3740 int i4 = get_field(s->fields, i4);
3741 int i5 = get_field(s->fields, i5);
3742 int do_zero = i4 & 0x80;
3743 uint64_t mask, imask, pmask;
3746 /* Adjust the arguments for the specific insn. */
3747 switch (s->fields->op2) {
3748 case 0x55: /* risbg */
3749 case 0x59: /* risbgn */
3754 case 0x5d: /* risbhg */
3757 pmask = 0xffffffff00000000ull;
3759 case 0x51: /* risblg */
3762 pmask = 0x00000000ffffffffull;
3765 g_assert_not_reached();
3768 /* MASK is the set of bits to be inserted from R2.
3769 Take care for I3/I4 wraparound. */
3772 mask ^= pmask >> i4 >> 1;
3774 mask |= ~(pmask >> i4 >> 1);
3778 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3779 insns, we need to keep the other half of the register. */
3780 imask = ~mask | ~pmask;
3788 if (s->fields->op2 == 0x5d) {
3792 /* In some cases we can implement this with extract. */
3793 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3794 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3798 /* In some cases we can implement this with deposit. */
3799 if (len > 0 && (imask == 0 || ~mask == imask)) {
3800 /* Note that we rotate the bits to be inserted to the lsb, not to
3801 the position as described in the PoO. */
3802 rot = (rot - pos) & 63;
3807 /* Rotate the input as necessary. */
3808 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3810 /* Insert the selected bits into the output. */
3813 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3815 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3817 } else if (imask == 0) {
3818 tcg_gen_andi_i64(o->out, o->in2, mask);
3820 tcg_gen_andi_i64(o->in2, o->in2, mask);
3821 tcg_gen_andi_i64(o->out, o->out, imask);
3822 tcg_gen_or_i64(o->out, o->out, o->in2);
3827 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3829 int i3 = get_field(s->fields, i3);
3830 int i4 = get_field(s->fields, i4);
3831 int i5 = get_field(s->fields, i5);
3834 /* If this is a test-only form, arrange to discard the result. */
3836 o->out = tcg_temp_new_i64();
3844 /* MASK is the set of bits to be operated on from R2.
3845 Take care for I3/I4 wraparound. */
3848 mask ^= ~0ull >> i4 >> 1;
3850 mask |= ~(~0ull >> i4 >> 1);
3853 /* Rotate the input as necessary. */
3854 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3857 switch (s->fields->op2) {
3858 case 0x55: /* AND */
3859 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3860 tcg_gen_and_i64(o->out, o->out, o->in2);
3863 tcg_gen_andi_i64(o->in2, o->in2, mask);
3864 tcg_gen_or_i64(o->out, o->out, o->in2);
3866 case 0x57: /* XOR */
3867 tcg_gen_andi_i64(o->in2, o->in2, mask);
3868 tcg_gen_xor_i64(o->out, o->out, o->in2);
3875 tcg_gen_andi_i64(cc_dst, o->out, mask);
3876 set_cc_nz_u64(s, cc_dst);
3880 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3882 tcg_gen_bswap16_i64(o->out, o->in2);
3886 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3888 tcg_gen_bswap32_i64(o->out, o->in2);
3892 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3894 tcg_gen_bswap64_i64(o->out, o->in2);
3898 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3900 TCGv_i32 t1 = tcg_temp_new_i32();
3901 TCGv_i32 t2 = tcg_temp_new_i32();
3902 TCGv_i32 to = tcg_temp_new_i32();
3903 tcg_gen_extrl_i64_i32(t1, o->in1);
3904 tcg_gen_extrl_i64_i32(t2, o->in2);
3905 tcg_gen_rotl_i32(to, t1, t2);
3906 tcg_gen_extu_i32_i64(o->out, to);
3907 tcg_temp_free_i32(t1);
3908 tcg_temp_free_i32(t2);
3909 tcg_temp_free_i32(to);
3913 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3915 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3919 #ifndef CONFIG_USER_ONLY
3920 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3922 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3927 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3929 gen_helper_sacf(cpu_env, o->in2);
3930 /* Addressing mode has changed, so end the block. */
3931 return DISAS_PC_STALE;
3935 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3937 int sam = s->insn->data;
3953 /* Bizarre but true, we check the address of the current insn for the
3954 specification exception, not the next to be executed. Thus the PoO
3955 documents that Bad Things Happen two bytes before the end. */
3956 if (s->base.pc_next & ~mask) {
3957 gen_program_exception(s, PGM_SPECIFICATION);
3958 return DISAS_NORETURN;
3962 tsam = tcg_const_i64(sam);
3963 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3964 tcg_temp_free_i64(tsam);
3966 /* Always exit the TB, since we (may have) changed execution mode. */
3967 return DISAS_PC_STALE;
3970 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3972 int r1 = get_field(s->fields, r1);
3973 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3977 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3979 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3983 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3985 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3989 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3991 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3992 return_low128(o->out2);
3996 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3998 gen_helper_sqeb(o->out, cpu_env, o->in2);
4002 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
4004 gen_helper_sqdb(o->out, cpu_env, o->in2);
4008 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
4010 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
4011 return_low128(o->out2);
4015 #ifndef CONFIG_USER_ONLY
4016 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
4018 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
4023 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
4025 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4026 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4027 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
4029 tcg_temp_free_i32(r1);
4030 tcg_temp_free_i32(r3);
4035 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
4042 disas_jcc(s, &c, get_field(s->fields, m3));
4044 /* We want to store when the condition is fulfilled, so branch
4045 out when it's not */
4046 c.cond = tcg_invert_cond(c.cond);
4048 lab = gen_new_label();
4050 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
4052 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
4056 r1 = get_field(s->fields, r1);
4057 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
4058 switch (s->insn->data) {
4060 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
4063 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
4065 case 2: /* STOCFH */
4066 h = tcg_temp_new_i64();
4067 tcg_gen_shri_i64(h, regs[r1], 32);
4068 tcg_gen_qemu_st32(h, a, get_mem_index(s));
4069 tcg_temp_free_i64(h);
4072 g_assert_not_reached();
4074 tcg_temp_free_i64(a);
4080 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4082 uint64_t sign = 1ull << s->insn->data;
4083 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
4084 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
4085 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4086 /* The arithmetic left shift is curious in that it does not affect
4087 the sign bit. Copy that over from the source unchanged. */
4088 tcg_gen_andi_i64(o->out, o->out, ~sign);
4089 tcg_gen_andi_i64(o->in1, o->in1, sign);
4090 tcg_gen_or_i64(o->out, o->out, o->in1);
4094 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4096 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4100 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4102 tcg_gen_sar_i64(o->out, o->in1, o->in2);
4106 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4108 tcg_gen_shr_i64(o->out, o->in1, o->in2);
4112 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4114 gen_helper_sfpc(cpu_env, o->in2);
4118 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4120 gen_helper_sfas(cpu_env, o->in2);
4124 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4126 /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4127 tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4128 gen_helper_srnm(cpu_env, o->addr1);
4132 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4134 /* Bits 0-55 are are ignored. */
4135 tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4136 gen_helper_srnm(cpu_env, o->addr1);
4140 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4142 TCGv_i64 tmp = tcg_temp_new_i64();
4144 /* Bits other than 61-63 are ignored. */
4145 tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4147 /* No need to call a helper, we don't implement dfp */
4148 tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4149 tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4150 tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4152 tcg_temp_free_i64(tmp);
4156 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4158 tcg_gen_extrl_i64_i32(cc_op, o->in1);
4159 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4162 tcg_gen_shri_i64(o->in1, o->in1, 24);
4163 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4167 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4169 int b1 = get_field(s->fields, b1);
4170 int d1 = get_field(s->fields, d1);
4171 int b2 = get_field(s->fields, b2);
4172 int d2 = get_field(s->fields, d2);
4173 int r3 = get_field(s->fields, r3);
4174 TCGv_i64 tmp = tcg_temp_new_i64();
4176 /* fetch all operands first */
4177 o->in1 = tcg_temp_new_i64();
4178 tcg_gen_addi_i64(o->in1, regs[b1], d1);
4179 o->in2 = tcg_temp_new_i64();
4180 tcg_gen_addi_i64(o->in2, regs[b2], d2);
4181 o->addr1 = get_address(s, 0, r3, 0);
4183 /* load the third operand into r3 before modifying anything */
4184 tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4186 /* subtract CPU timer from first operand and store in GR0 */
4187 gen_helper_stpt(tmp, cpu_env);
4188 tcg_gen_sub_i64(regs[0], o->in1, tmp);
4190 /* store second operand in GR1 */
4191 tcg_gen_mov_i64(regs[1], o->in2);
4193 tcg_temp_free_i64(tmp);
4197 #ifndef CONFIG_USER_ONLY
4198 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4200 tcg_gen_shri_i64(o->in2, o->in2, 4);
4201 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4205 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4207 gen_helper_sske(cpu_env, o->in1, o->in2);
4211 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4213 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4214 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4215 return DISAS_PC_STALE_NOCHAIN;
4218 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4220 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4225 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4227 gen_helper_stck(o->out, cpu_env);
4228 /* ??? We don't implement clock states. */
4229 gen_op_movi_cc(s, 0);
4233 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4235 TCGv_i64 c1 = tcg_temp_new_i64();
4236 TCGv_i64 c2 = tcg_temp_new_i64();
4237 TCGv_i64 todpr = tcg_temp_new_i64();
4238 gen_helper_stck(c1, cpu_env);
4239 /* 16 bit value store in an uint32_t (only valid bits set) */
4240 tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4241 /* Shift the 64-bit value into its place as a zero-extended
4242 104-bit value. Note that "bit positions 64-103 are always
4243 non-zero so that they compare differently to STCK"; we set
4244 the least significant bit to 1. */
4245 tcg_gen_shli_i64(c2, c1, 56);
4246 tcg_gen_shri_i64(c1, c1, 8);
4247 tcg_gen_ori_i64(c2, c2, 0x10000);
4248 tcg_gen_or_i64(c2, c2, todpr);
4249 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4250 tcg_gen_addi_i64(o->in2, o->in2, 8);
4251 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4252 tcg_temp_free_i64(c1);
4253 tcg_temp_free_i64(c2);
4254 tcg_temp_free_i64(todpr);
4255 /* ??? We don't implement clock states. */
4256 gen_op_movi_cc(s, 0);
4260 #ifndef CONFIG_USER_ONLY
4261 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4263 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
4264 gen_helper_sck(cc_op, cpu_env, o->in1);
4269 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4271 gen_helper_sckc(cpu_env, o->in2);
4275 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4277 gen_helper_sckpf(cpu_env, regs[0]);
4281 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4283 gen_helper_stckc(o->out, cpu_env);
4287 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4289 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4290 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4291 gen_helper_stctg(cpu_env, r1, o->in2, r3);
4292 tcg_temp_free_i32(r1);
4293 tcg_temp_free_i32(r3);
4297 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4299 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4300 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4301 gen_helper_stctl(cpu_env, r1, o->in2, r3);
4302 tcg_temp_free_i32(r1);
4303 tcg_temp_free_i32(r3);
4307 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4309 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4313 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4315 gen_helper_spt(cpu_env, o->in2);
4319 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4321 gen_helper_stfl(cpu_env);
4325 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4327 gen_helper_stpt(o->out, cpu_env);
4331 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4333 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4338 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4340 gen_helper_spx(cpu_env, o->in2);
4344 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4346 gen_helper_xsch(cpu_env, regs[1]);
4351 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4353 gen_helper_csch(cpu_env, regs[1]);
4358 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4360 gen_helper_hsch(cpu_env, regs[1]);
4365 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4367 gen_helper_msch(cpu_env, regs[1], o->in2);
4372 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4374 gen_helper_rchp(cpu_env, regs[1]);
4379 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4381 gen_helper_rsch(cpu_env, regs[1]);
4386 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4388 gen_helper_sal(cpu_env, regs[1]);
4392 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4394 gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4398 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4400 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4401 gen_op_movi_cc(s, 3);
4405 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4407 /* The instruction is suppressed if not provided. */
4411 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4413 gen_helper_ssch(cpu_env, regs[1], o->in2);
4418 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4420 gen_helper_stsch(cpu_env, regs[1], o->in2);
4425 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4427 gen_helper_stcrw(cpu_env, o->in2);
4432 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4434 gen_helper_tpi(cc_op, cpu_env, o->addr1);
4439 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4441 gen_helper_tsch(cpu_env, regs[1], o->in2);
4446 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4448 gen_helper_chsc(cpu_env, o->in2);
4453 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4455 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4456 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4460 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4462 uint64_t i2 = get_field(s->fields, i2);
4465 /* It is important to do what the instruction name says: STORE THEN.
4466 If we let the output hook perform the store then if we fault and
4467 restart, we'll have the wrong SYSTEM MASK in place. */
4468 t = tcg_temp_new_i64();
4469 tcg_gen_shri_i64(t, psw_mask, 56);
4470 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4471 tcg_temp_free_i64(t);
4473 if (s->fields->op == 0xac) {
4474 tcg_gen_andi_i64(psw_mask, psw_mask,
4475 (i2 << 56) | 0x00ffffffffffffffull);
4477 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4480 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4481 return DISAS_PC_STALE_NOCHAIN;
4484 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4486 gen_helper_stura(cpu_env, o->in2, o->in1);
4490 static DisasJumpType op_sturg(DisasContext *s, DisasOps *o)
4492 gen_helper_sturg(cpu_env, o->in2, o->in1);
4497 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4499 gen_helper_stfle(cc_op, cpu_env, o->in2);
4504 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4506 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4510 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4512 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4516 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4518 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4522 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4524 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4528 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4530 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4531 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4532 gen_helper_stam(cpu_env, r1, o->in2, r3);
4533 tcg_temp_free_i32(r1);
4534 tcg_temp_free_i32(r3);
4538 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4540 int m3 = get_field(s->fields, m3);
4541 int pos, base = s->insn->data;
4542 TCGv_i64 tmp = tcg_temp_new_i64();
4544 pos = base + ctz32(m3) * 8;
4547 /* Effectively a 32-bit store. */
4548 tcg_gen_shri_i64(tmp, o->in1, pos);
4549 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4555 /* Effectively a 16-bit store. */
4556 tcg_gen_shri_i64(tmp, o->in1, pos);
4557 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4564 /* Effectively an 8-bit store. */
4565 tcg_gen_shri_i64(tmp, o->in1, pos);
4566 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4570 /* This is going to be a sequence of shifts and stores. */
4571 pos = base + 32 - 8;
4574 tcg_gen_shri_i64(tmp, o->in1, pos);
4575 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4576 tcg_gen_addi_i64(o->in2, o->in2, 1);
4578 m3 = (m3 << 1) & 0xf;
4583 tcg_temp_free_i64(tmp);
4587 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4589 int r1 = get_field(s->fields, r1);
4590 int r3 = get_field(s->fields, r3);
4591 int size = s->insn->data;
4592 TCGv_i64 tsize = tcg_const_i64(size);
4596 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4598 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4603 tcg_gen_add_i64(o->in2, o->in2, tsize);
4607 tcg_temp_free_i64(tsize);
4611 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4613 int r1 = get_field(s->fields, r1);
4614 int r3 = get_field(s->fields, r3);
4615 TCGv_i64 t = tcg_temp_new_i64();
4616 TCGv_i64 t4 = tcg_const_i64(4);
4617 TCGv_i64 t32 = tcg_const_i64(32);
4620 tcg_gen_shl_i64(t, regs[r1], t32);
4621 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4625 tcg_gen_add_i64(o->in2, o->in2, t4);
4629 tcg_temp_free_i64(t);
4630 tcg_temp_free_i64(t4);
4631 tcg_temp_free_i64(t32);
4635 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4637 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4638 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4639 } else if (HAVE_ATOMIC128) {
4640 gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4642 gen_helper_exit_atomic(cpu_env);
4643 return DISAS_NORETURN;
4648 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4650 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4651 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4653 gen_helper_srst(cpu_env, r1, r2);
4655 tcg_temp_free_i32(r1);
4656 tcg_temp_free_i32(r2);
4661 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4663 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4664 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4666 gen_helper_srstu(cpu_env, r1, r2);
4668 tcg_temp_free_i32(r1);
4669 tcg_temp_free_i32(r2);
4674 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4676 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4680 static DisasJumpType op_subb(DisasContext *s, DisasOps *o)
4685 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4687 /* The !borrow flag is the msb of CC. Since we want the inverse of
4688 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4689 disas_jcc(s, &cmp, 8 | 4);
4690 borrow = tcg_temp_new_i64();
4692 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4694 TCGv_i32 t = tcg_temp_new_i32();
4695 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4696 tcg_gen_extu_i32_i64(borrow, t);
4697 tcg_temp_free_i32(t);
4701 tcg_gen_sub_i64(o->out, o->out, borrow);
4702 tcg_temp_free_i64(borrow);
4706 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4713 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4714 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4715 tcg_temp_free_i32(t);
4717 t = tcg_const_i32(s->ilen);
4718 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4719 tcg_temp_free_i32(t);
4721 gen_exception(EXCP_SVC);
4722 return DISAS_NORETURN;
4725 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4729 cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4730 cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4731 gen_op_movi_cc(s, cc);
4735 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4737 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4742 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4744 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4749 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4751 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4756 #ifndef CONFIG_USER_ONLY
4758 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4760 gen_helper_testblock(cc_op, cpu_env, o->in2);
4765 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4767 gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4774 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4776 TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4777 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4778 tcg_temp_free_i32(l1);
4783 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4785 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4786 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4787 tcg_temp_free_i32(l);
4792 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4794 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4795 return_low128(o->out2);
4800 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4802 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4803 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4804 tcg_temp_free_i32(l);
4809 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4811 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4812 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4813 tcg_temp_free_i32(l);
4818 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4820 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4821 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4822 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4823 TCGv_i32 tst = tcg_temp_new_i32();
4824 int m3 = get_field(s->fields, m3);
4826 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4830 tcg_gen_movi_i32(tst, -1);
4832 tcg_gen_extrl_i64_i32(tst, regs[0]);
4833 if (s->insn->opc & 3) {
4834 tcg_gen_ext8u_i32(tst, tst);
4836 tcg_gen_ext16u_i32(tst, tst);
4839 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4841 tcg_temp_free_i32(r1);
4842 tcg_temp_free_i32(r2);
4843 tcg_temp_free_i32(sizes);
4844 tcg_temp_free_i32(tst);
4849 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4851 TCGv_i32 t1 = tcg_const_i32(0xff);
4852 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4853 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4854 tcg_temp_free_i32(t1);
4859 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4861 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4862 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4863 tcg_temp_free_i32(l);
4867 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4869 int l1 = get_field(s->fields, l1) + 1;
4872 /* The length must not exceed 32 bytes. */
4874 gen_program_exception(s, PGM_SPECIFICATION);
4875 return DISAS_NORETURN;
4877 l = tcg_const_i32(l1);
4878 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4879 tcg_temp_free_i32(l);
4884 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4886 int l1 = get_field(s->fields, l1) + 1;
4889 /* The length must be even and should not exceed 64 bytes. */
4890 if ((l1 & 1) || (l1 > 64)) {
4891 gen_program_exception(s, PGM_SPECIFICATION);
4892 return DISAS_NORETURN;
4894 l = tcg_const_i32(l1);
4895 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4896 tcg_temp_free_i32(l);
4902 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4904 int d1 = get_field(s->fields, d1);
4905 int d2 = get_field(s->fields, d2);
4906 int b1 = get_field(s->fields, b1);
4907 int b2 = get_field(s->fields, b2);
4908 int l = get_field(s->fields, l1);
4911 o->addr1 = get_address(s, 0, b1, d1);
4913 /* If the addresses are identical, this is a store/memset of zero. */
4914 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4915 o->in2 = tcg_const_i64(0);
4919 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4922 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4926 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4929 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4933 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4936 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4940 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4942 gen_op_movi_cc(s, 0);
4946 /* But in general we'll defer to a helper. */
4947 o->in2 = get_address(s, 0, b2, d2);
4948 t32 = tcg_const_i32(l);
4949 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4950 tcg_temp_free_i32(t32);
4955 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4957 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4961 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4963 int shift = s->insn->data & 0xff;
4964 int size = s->insn->data >> 8;
4965 uint64_t mask = ((1ull << size) - 1) << shift;
4968 tcg_gen_shli_i64(o->in2, o->in2, shift);
4969 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4971 /* Produce the CC from only the bits manipulated. */
4972 tcg_gen_andi_i64(cc_dst, o->out, mask);
4973 set_cc_nz_u64(s, cc_dst);
4977 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4979 o->in1 = tcg_temp_new_i64();
4981 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4982 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4984 /* Perform the atomic operation in memory. */
4985 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4989 /* Recompute also for atomic case: needed for setting CC. */
4990 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4992 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4993 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4998 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
5000 o->out = tcg_const_i64(0);
5004 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
5006 o->out = tcg_const_i64(0);
5012 #ifndef CONFIG_USER_ONLY
5013 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
5015 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5017 gen_helper_clp(cpu_env, r2);
5018 tcg_temp_free_i32(r2);
5023 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
5025 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5026 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5028 gen_helper_pcilg(cpu_env, r1, r2);
5029 tcg_temp_free_i32(r1);
5030 tcg_temp_free_i32(r2);
5035 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
5037 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5038 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5040 gen_helper_pcistg(cpu_env, r1, r2);
5041 tcg_temp_free_i32(r1);
5042 tcg_temp_free_i32(r2);
5047 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
5049 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5050 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
5052 gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
5053 tcg_temp_free_i32(ar);
5054 tcg_temp_free_i32(r1);
5059 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
5061 gen_helper_sic(cpu_env, o->in1, o->in2);
5065 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
5067 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5068 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5070 gen_helper_rpcit(cpu_env, r1, r2);
5071 tcg_temp_free_i32(r1);
5072 tcg_temp_free_i32(r2);
5077 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5079 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5080 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
5081 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
5083 gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
5084 tcg_temp_free_i32(ar);
5085 tcg_temp_free_i32(r1);
5086 tcg_temp_free_i32(r3);
5091 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5093 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5094 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
5096 gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
5097 tcg_temp_free_i32(ar);
5098 tcg_temp_free_i32(r1);
5104 /* ====================================================================== */
5105 /* The "Cc OUTput" generators. Given the generated output (and in some cases
5106 the original inputs), update the various cc data structures in order to
5107 be able to compute the new condition code. */
5109 static void cout_abs32(DisasContext *s, DisasOps *o)
5111 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5114 static void cout_abs64(DisasContext *s, DisasOps *o)
5116 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5119 static void cout_adds32(DisasContext *s, DisasOps *o)
5121 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5124 static void cout_adds64(DisasContext *s, DisasOps *o)
5126 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5129 static void cout_addu32(DisasContext *s, DisasOps *o)
5131 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
5134 static void cout_addu64(DisasContext *s, DisasOps *o)
5136 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
5139 static void cout_addc32(DisasContext *s, DisasOps *o)
5141 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
5144 static void cout_addc64(DisasContext *s, DisasOps *o)
5146 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
5149 static void cout_cmps32(DisasContext *s, DisasOps *o)
5151 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5154 static void cout_cmps64(DisasContext *s, DisasOps *o)
5156 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5159 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5161 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5164 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5166 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5169 static void cout_f32(DisasContext *s, DisasOps *o)
5171 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5174 static void cout_f64(DisasContext *s, DisasOps *o)
5176 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5179 static void cout_f128(DisasContext *s, DisasOps *o)
5181 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5184 static void cout_nabs32(DisasContext *s, DisasOps *o)
5186 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5189 static void cout_nabs64(DisasContext *s, DisasOps *o)
5191 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5194 static void cout_neg32(DisasContext *s, DisasOps *o)
5196 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5199 static void cout_neg64(DisasContext *s, DisasOps *o)
5201 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5204 static void cout_nz32(DisasContext *s, DisasOps *o)
5206 tcg_gen_ext32u_i64(cc_dst, o->out);
5207 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5210 static void cout_nz64(DisasContext *s, DisasOps *o)
5212 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5215 static void cout_s32(DisasContext *s, DisasOps *o)
5217 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5220 static void cout_s64(DisasContext *s, DisasOps *o)
5222 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5225 static void cout_subs32(DisasContext *s, DisasOps *o)
5227 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5230 static void cout_subs64(DisasContext *s, DisasOps *o)
5232 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5235 static void cout_subu32(DisasContext *s, DisasOps *o)
5237 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
5240 static void cout_subu64(DisasContext *s, DisasOps *o)
5242 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
5245 static void cout_subb32(DisasContext *s, DisasOps *o)
5247 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
5250 static void cout_subb64(DisasContext *s, DisasOps *o)
5252 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
5255 static void cout_tm32(DisasContext *s, DisasOps *o)
5257 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5260 static void cout_tm64(DisasContext *s, DisasOps *o)
5262 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5265 /* ====================================================================== */
5266 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5267 with the TCG register to which we will write. Used in combination with
5268 the "wout" generators, in some cases we need a new temporary, and in
5269 some cases we can write to a TCG global. */
5271 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
5273 o->out = tcg_temp_new_i64();
5275 #define SPEC_prep_new 0
5277 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
5279 o->out = tcg_temp_new_i64();
5280 o->out2 = tcg_temp_new_i64();
5282 #define SPEC_prep_new_P 0
5284 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5286 o->out = regs[get_field(f, r1)];
5289 #define SPEC_prep_r1 0
5291 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
5293 int r1 = get_field(f, r1);
5295 o->out2 = regs[r1 + 1];
5296 o->g_out = o->g_out2 = true;
5298 #define SPEC_prep_r1_P SPEC_r1_even
5300 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5301 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5303 o->out = load_freg(get_field(f, r1));
5304 o->out2 = load_freg(get_field(f, r1) + 2);
5306 #define SPEC_prep_x1 SPEC_r1_f128
5308 /* ====================================================================== */
5309 /* The "Write OUTput" generators. These generally perform some non-trivial
5310 copy of data to TCG globals, or to main memory. The trivial cases are
5311 generally handled by having a "prep" generator install the TCG global
5312 as the destination of the operation. */
5314 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5316 store_reg(get_field(f, r1), o->out);
5318 #define SPEC_wout_r1 0
5320 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5322 int r1 = get_field(f, r1);
5323 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5325 #define SPEC_wout_r1_8 0
5327 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5329 int r1 = get_field(f, r1);
5330 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5332 #define SPEC_wout_r1_16 0
5334 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5336 store_reg32_i64(get_field(f, r1), o->out);
5338 #define SPEC_wout_r1_32 0
5340 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
5342 store_reg32h_i64(get_field(f, r1), o->out);
5344 #define SPEC_wout_r1_32h 0
5346 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5348 int r1 = get_field(f, r1);
5349 store_reg32_i64(r1, o->out);
5350 store_reg32_i64(r1 + 1, o->out2);
5352 #define SPEC_wout_r1_P32 SPEC_r1_even
5354 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5356 int r1 = get_field(f, r1);
5357 store_reg32_i64(r1 + 1, o->out);
5358 tcg_gen_shri_i64(o->out, o->out, 32);
5359 store_reg32_i64(r1, o->out);
5361 #define SPEC_wout_r1_D32 SPEC_r1_even
5363 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5365 int r3 = get_field(f, r3);
5366 store_reg32_i64(r3, o->out);
5367 store_reg32_i64(r3 + 1, o->out2);
5369 #define SPEC_wout_r3_P32 SPEC_r3_even
5371 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
5373 int r3 = get_field(f, r3);
5374 store_reg(r3, o->out);
5375 store_reg(r3 + 1, o->out2);
5377 #define SPEC_wout_r3_P64 SPEC_r3_even
5379 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5381 store_freg32_i64(get_field(f, r1), o->out);
5383 #define SPEC_wout_e1 0
5385 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5387 store_freg(get_field(f, r1), o->out);
5389 #define SPEC_wout_f1 0
5391 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5393 int f1 = get_field(s->fields, r1);
5394 store_freg(f1, o->out);
5395 store_freg(f1 + 2, o->out2);
5397 #define SPEC_wout_x1 SPEC_r1_f128
5399 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5401 if (get_field(f, r1) != get_field(f, r2)) {
5402 store_reg32_i64(get_field(f, r1), o->out);
5405 #define SPEC_wout_cond_r1r2_32 0
5407 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
5409 if (get_field(f, r1) != get_field(f, r2)) {
5410 store_freg32_i64(get_field(f, r1), o->out);
5413 #define SPEC_wout_cond_e1e2 0
5415 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5417 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5419 #define SPEC_wout_m1_8 0
5421 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5423 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5425 #define SPEC_wout_m1_16 0
5427 #ifndef CONFIG_USER_ONLY
5428 static void wout_m1_16a(DisasContext *s, DisasFields *f, DisasOps *o)
5430 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5432 #define SPEC_wout_m1_16a 0
5435 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5437 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5439 #define SPEC_wout_m1_32 0
5441 #ifndef CONFIG_USER_ONLY
5442 static void wout_m1_32a(DisasContext *s, DisasFields *f, DisasOps *o)
5444 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5446 #define SPEC_wout_m1_32a 0
5449 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5451 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5453 #define SPEC_wout_m1_64 0
5455 #ifndef CONFIG_USER_ONLY
5456 static void wout_m1_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5458 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
5460 #define SPEC_wout_m1_64a 0
5463 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5465 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5467 #define SPEC_wout_m2_32 0
5469 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5471 store_reg(get_field(f, r1), o->in2);
5473 #define SPEC_wout_in2_r1 0
5475 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5477 store_reg32_i64(get_field(f, r1), o->in2);
5479 #define SPEC_wout_in2_r1_32 0
5481 /* ====================================================================== */
5482 /* The "INput 1" generators. These load the first operand to an insn. */
5484 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5486 o->in1 = load_reg(get_field(f, r1));
5488 #define SPEC_in1_r1 0
5490 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5492 o->in1 = regs[get_field(f, r1)];
5495 #define SPEC_in1_r1_o 0
5497 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5499 o->in1 = tcg_temp_new_i64();
5500 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
5502 #define SPEC_in1_r1_32s 0
5504 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5506 o->in1 = tcg_temp_new_i64();
5507 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
5509 #define SPEC_in1_r1_32u 0
5511 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5513 o->in1 = tcg_temp_new_i64();
5514 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
5516 #define SPEC_in1_r1_sr32 0
5518 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
5520 o->in1 = load_reg(get_field(f, r1) + 1);
5522 #define SPEC_in1_r1p1 SPEC_r1_even
5524 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5526 o->in1 = tcg_temp_new_i64();
5527 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
5529 #define SPEC_in1_r1p1_32s SPEC_r1_even
5531 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5533 o->in1 = tcg_temp_new_i64();
5534 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
5536 #define SPEC_in1_r1p1_32u SPEC_r1_even
5538 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5540 int r1 = get_field(f, r1);
5541 o->in1 = tcg_temp_new_i64();
5542 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5544 #define SPEC_in1_r1_D32 SPEC_r1_even
5546 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5548 o->in1 = load_reg(get_field(f, r2));
5550 #define SPEC_in1_r2 0
5552 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5554 o->in1 = tcg_temp_new_i64();
5555 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
5557 #define SPEC_in1_r2_sr32 0
5559 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5561 o->in1 = load_reg(get_field(f, r3));
5563 #define SPEC_in1_r3 0
5565 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5567 o->in1 = regs[get_field(f, r3)];
5570 #define SPEC_in1_r3_o 0
5572 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5574 o->in1 = tcg_temp_new_i64();
5575 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
5577 #define SPEC_in1_r3_32s 0
5579 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5581 o->in1 = tcg_temp_new_i64();
5582 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
5584 #define SPEC_in1_r3_32u 0
5586 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5588 int r3 = get_field(f, r3);
5589 o->in1 = tcg_temp_new_i64();
5590 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5592 #define SPEC_in1_r3_D32 SPEC_r3_even
5594 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5596 o->in1 = load_freg32_i64(get_field(f, r1));
5598 #define SPEC_in1_e1 0
5600 static void in1_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5602 o->in1 = load_freg(get_field(f, r1));
5604 #define SPEC_in1_f1 0
5606 /* Load the high double word of an extended (128-bit) format FP number */
5607 static void in1_x2h(DisasContext *s, DisasFields *f, DisasOps *o)
5609 o->in1 = load_freg(get_field(f, r2));
5611 #define SPEC_in1_x2h SPEC_r2_f128
5613 static void in1_f3(DisasContext *s, DisasFields *f, DisasOps *o)
5615 o->in1 = load_freg(get_field(f, r3));
5617 #define SPEC_in1_f3 0
5619 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
5621 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
5623 #define SPEC_in1_la1 0
5625 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
5627 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5628 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5630 #define SPEC_in1_la2 0
5632 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5635 o->in1 = tcg_temp_new_i64();
5636 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5638 #define SPEC_in1_m1_8u 0
5640 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5643 o->in1 = tcg_temp_new_i64();
5644 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5646 #define SPEC_in1_m1_16s 0
5648 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5651 o->in1 = tcg_temp_new_i64();
5652 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5654 #define SPEC_in1_m1_16u 0
5656 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5659 o->in1 = tcg_temp_new_i64();
5660 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5662 #define SPEC_in1_m1_32s 0
5664 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5667 o->in1 = tcg_temp_new_i64();
5668 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5670 #define SPEC_in1_m1_32u 0
5672 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5675 o->in1 = tcg_temp_new_i64();
5676 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5678 #define SPEC_in1_m1_64 0
5680 /* ====================================================================== */
5681 /* The "INput 2" generators. These load the second operand to an insn. */
5683 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5685 o->in2 = regs[get_field(f, r1)];
5688 #define SPEC_in2_r1_o 0
5690 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5692 o->in2 = tcg_temp_new_i64();
5693 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5695 #define SPEC_in2_r1_16u 0
5697 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5699 o->in2 = tcg_temp_new_i64();
5700 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5702 #define SPEC_in2_r1_32u 0
5704 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5706 int r1 = get_field(f, r1);
5707 o->in2 = tcg_temp_new_i64();
5708 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5710 #define SPEC_in2_r1_D32 SPEC_r1_even
5712 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5714 o->in2 = load_reg(get_field(f, r2));
5716 #define SPEC_in2_r2 0
5718 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5720 o->in2 = regs[get_field(f, r2)];
5723 #define SPEC_in2_r2_o 0
5725 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5727 int r2 = get_field(f, r2);
5729 o->in2 = load_reg(r2);
5732 #define SPEC_in2_r2_nz 0
5734 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5736 o->in2 = tcg_temp_new_i64();
5737 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5739 #define SPEC_in2_r2_8s 0
5741 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5743 o->in2 = tcg_temp_new_i64();
5744 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5746 #define SPEC_in2_r2_8u 0
5748 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5750 o->in2 = tcg_temp_new_i64();
5751 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5753 #define SPEC_in2_r2_16s 0
5755 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5757 o->in2 = tcg_temp_new_i64();
5758 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5760 #define SPEC_in2_r2_16u 0
5762 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5764 o->in2 = load_reg(get_field(f, r3));
5766 #define SPEC_in2_r3 0
5768 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5770 o->in2 = tcg_temp_new_i64();
5771 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5773 #define SPEC_in2_r3_sr32 0
5775 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5777 o->in2 = tcg_temp_new_i64();
5778 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5780 #define SPEC_in2_r2_32s 0
5782 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5784 o->in2 = tcg_temp_new_i64();
5785 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5787 #define SPEC_in2_r2_32u 0
5789 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5791 o->in2 = tcg_temp_new_i64();
5792 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5794 #define SPEC_in2_r2_sr32 0
5796 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5798 o->in2 = load_freg32_i64(get_field(f, r2));
5800 #define SPEC_in2_e2 0
5802 static void in2_f2(DisasContext *s, DisasFields *f, DisasOps *o)
5804 o->in2 = load_freg(get_field(f, r2));
5806 #define SPEC_in2_f2 0
5808 /* Load the low double word of an extended (128-bit) format FP number */
5809 static void in2_x2l(DisasContext *s, DisasFields *f, DisasOps *o)
5811 o->in2 = load_freg(get_field(f, r2) + 2);
5813 #define SPEC_in2_x2l SPEC_r2_f128
5815 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5817 o->in2 = get_address(s, 0, get_field(f, r2), 0);
5819 #define SPEC_in2_ra2 0
5821 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5823 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5824 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5826 #define SPEC_in2_a2 0
5828 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5830 o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(f, i2) * 2);
5832 #define SPEC_in2_ri2 0
5834 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5836 help_l2_shift(s, f, o, 31);
5838 #define SPEC_in2_sh32 0
5840 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5842 help_l2_shift(s, f, o, 63);
5844 #define SPEC_in2_sh64 0
5846 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5849 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5851 #define SPEC_in2_m2_8u 0
5853 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5856 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5858 #define SPEC_in2_m2_16s 0
5860 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5863 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5865 #define SPEC_in2_m2_16u 0
5867 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5870 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5872 #define SPEC_in2_m2_32s 0
5874 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5877 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5879 #define SPEC_in2_m2_32u 0
5881 #ifndef CONFIG_USER_ONLY
5882 static void in2_m2_32ua(DisasContext *s, DisasFields *f, DisasOps *o)
5885 tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5887 #define SPEC_in2_m2_32ua 0
5890 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5893 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5895 #define SPEC_in2_m2_64 0
5897 #ifndef CONFIG_USER_ONLY
5898 static void in2_m2_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5901 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
5903 #define SPEC_in2_m2_64a 0
5906 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5909 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5911 #define SPEC_in2_mri2_16u 0
5913 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5916 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5918 #define SPEC_in2_mri2_32s 0
5920 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5923 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5925 #define SPEC_in2_mri2_32u 0
5927 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5930 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5932 #define SPEC_in2_mri2_64 0
5934 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5936 o->in2 = tcg_const_i64(get_field(f, i2));
5938 #define SPEC_in2_i2 0
5940 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5942 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5944 #define SPEC_in2_i2_8u 0
5946 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5948 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5950 #define SPEC_in2_i2_16u 0
5952 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5954 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5956 #define SPEC_in2_i2_32u 0
5958 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5960 uint64_t i2 = (uint16_t)get_field(f, i2);
5961 o->in2 = tcg_const_i64(i2 << s->insn->data);
5963 #define SPEC_in2_i2_16u_shl 0
5965 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5967 uint64_t i2 = (uint32_t)get_field(f, i2);
5968 o->in2 = tcg_const_i64(i2 << s->insn->data);
5970 #define SPEC_in2_i2_32u_shl 0
5972 #ifndef CONFIG_USER_ONLY
5973 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5975 o->in2 = tcg_const_i64(s->fields->raw_insn);
5977 #define SPEC_in2_insn 0
5980 /* ====================================================================== */
5982 /* Find opc within the table of insns. This is formulated as a switch
5983 statement so that (1) we get compile-time notice of cut-paste errors
5984 for duplicated opcodes, and (2) the compiler generates the binary
5985 search tree, rather than us having to post-process the table. */
5987 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5988 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
5990 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5991 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
5993 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
5994 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
5996 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
5998 enum DisasInsnEnum {
5999 #include "insn-data.def"
6003 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
6008 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
6010 .help_in1 = in1_##I1, \
6011 .help_in2 = in2_##I2, \
6012 .help_prep = prep_##P, \
6013 .help_wout = wout_##W, \
6014 .help_cout = cout_##CC, \
6015 .help_op = op_##OP, \
6019 /* Allow 0 to be used for NULL in the table below. */
6027 #define SPEC_in1_0 0
6028 #define SPEC_in2_0 0
6029 #define SPEC_prep_0 0
6030 #define SPEC_wout_0 0
6032 /* Give smaller names to the various facilities. */
6033 #define FAC_Z S390_FEAT_ZARCH
6034 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6035 #define FAC_DFP S390_FEAT_DFP
6036 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
6037 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
6038 #define FAC_EE S390_FEAT_EXECUTE_EXT
6039 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
6040 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
6041 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
6042 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
6043 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6044 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
6045 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
6046 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
6047 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6048 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
6049 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
6050 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
6051 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
6052 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
6053 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
6054 #define FAC_SFLE S390_FEAT_STFLE
6055 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6056 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6057 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6058 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
6059 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
6060 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
6061 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
6062 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6063 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
6064 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
6065 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6066 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6067 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6068 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
6069 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
6070 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
6071 #define FAC_V S390_FEAT_VECTOR /* vector facility */
6073 static const DisasInsn insn_info[] = {
6074 #include "insn-data.def"
6078 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6079 case OPC: return &insn_info[insn_ ## NM];
6081 static const DisasInsn *lookup_opc(uint16_t opc)
6084 #include "insn-data.def"
6095 /* Extract a field from the insn. The INSN should be left-aligned in
6096 the uint64_t so that we can more easily utilize the big-bit-endian
6097 definitions we extract from the Principals of Operation. */
6099 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6107 /* Zero extract the field from the insn. */
6108 r = (insn << f->beg) >> (64 - f->size);
6110 /* Sign-extend, or un-swap the field as necessary. */
6112 case 0: /* unsigned */
6114 case 1: /* signed */
6115 assert(f->size <= 32);
6116 m = 1u << (f->size - 1);
6119 case 2: /* dl+dh split, signed 20 bit. */
6120 r = ((int8_t)r << 12) | (r >> 8);
6126 /* Validate that the "compressed" encoding we selected above is valid.
6127 I.e. we havn't make two different original fields overlap. */
6128 assert(((o->presentC >> f->indexC) & 1) == 0);
6129 o->presentC |= 1 << f->indexC;
6130 o->presentO |= 1 << f->indexO;
6132 o->c[f->indexC] = r;
6135 /* Lookup the insn at the current PC, extracting the operands into O and
6136 returning the info struct for the insn. Returns NULL for invalid insn. */
6138 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
6141 uint64_t insn, pc = s->base.pc_next;
6143 const DisasInsn *info;
6145 if (unlikely(s->ex_value)) {
6146 /* Drop the EX data now, so that it's clear on exception paths. */
6147 TCGv_i64 zero = tcg_const_i64(0);
6148 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
6149 tcg_temp_free_i64(zero);
6151 /* Extract the values saved by EXECUTE. */
6152 insn = s->ex_value & 0xffffffffffff0000ull;
6153 ilen = s->ex_value & 0xf;
6156 insn = ld_code2(env, pc);
6157 op = (insn >> 8) & 0xff;
6158 ilen = get_ilen(op);
6164 insn = ld_code4(env, pc) << 32;
6167 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
6170 g_assert_not_reached();
6173 s->pc_tmp = s->base.pc_next + ilen;
6176 /* We can't actually determine the insn format until we've looked up
6177 the full insn opcode. Which we can't do without locating the
6178 secondary opcode. Assume by default that OP2 is at bit 40; for
6179 those smaller insns that don't actually have a secondary opcode
6180 this will correctly result in OP2 = 0. */
6186 case 0xb2: /* S, RRF, RRE, IE */
6187 case 0xb3: /* RRE, RRD, RRF */
6188 case 0xb9: /* RRE, RRF */
6189 case 0xe5: /* SSE, SIL */
6190 op2 = (insn << 8) >> 56;
6194 case 0xc0: /* RIL */
6195 case 0xc2: /* RIL */
6196 case 0xc4: /* RIL */
6197 case 0xc6: /* RIL */
6198 case 0xc8: /* SSF */
6199 case 0xcc: /* RIL */
6200 op2 = (insn << 12) >> 60;
6202 case 0xc5: /* MII */
6203 case 0xc7: /* SMI */
6204 case 0xd0 ... 0xdf: /* SS */
6210 case 0xee ... 0xf3: /* SS */
6211 case 0xf8 ... 0xfd: /* SS */
6215 op2 = (insn << 40) >> 56;
6219 memset(f, 0, sizeof(*f));
6224 /* Lookup the instruction. */
6225 info = lookup_opc(op << 8 | op2);
6227 /* If we found it, extract the operands. */
6229 DisasFormat fmt = info->fmt;
6232 for (i = 0; i < NUM_C_FIELD; ++i) {
6233 extract_field(f, &format_info[fmt].op[i], insn);
6239 static bool is_afp_reg(int reg)
6241 return reg % 2 || reg > 6;
6244 static bool is_fp_pair(int reg)
6246 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6247 return !(reg & 0x2);
6250 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6252 const DisasInsn *insn;
6253 DisasJumpType ret = DISAS_NEXT;
6257 /* Search for the insn in the table. */
6258 insn = extract_insn(env, s, &f);
6260 /* Not found means unimplemented/illegal opcode. */
6262 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6264 gen_illegal_opcode(s);
6265 return DISAS_NORETURN;
6268 #ifndef CONFIG_USER_ONLY
6269 if (s->base.tb->flags & FLAG_MASK_PER) {
6270 TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6271 gen_helper_per_ifetch(cpu_env, addr);
6272 tcg_temp_free_i64(addr);
6278 /* privileged instruction */
6279 if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6280 gen_program_exception(s, PGM_PRIVILEGED);
6281 return DISAS_NORETURN;
6284 /* if AFP is not enabled, instructions and registers are forbidden */
6285 if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6288 if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(&f, r1))) {
6291 if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(&f, r2))) {
6294 if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(&f, r3))) {
6297 if (insn->flags & IF_BFP) {
6300 if (insn->flags & IF_DFP) {
6304 gen_data_exception(dxc);
6305 return DISAS_NORETURN;
6310 /* Check for insn specification exceptions. */
6312 if ((insn->spec & SPEC_r1_even && get_field(&f, r1) & 1) ||
6313 (insn->spec & SPEC_r2_even && get_field(&f, r2) & 1) ||
6314 (insn->spec & SPEC_r3_even && get_field(&f, r3) & 1) ||
6315 (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(&f, r1))) ||
6316 (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(&f, r2)))) {
6317 gen_program_exception(s, PGM_SPECIFICATION);
6318 return DISAS_NORETURN;
6322 /* Set up the strutures we use to communicate with the helpers. */
6326 /* Implement the instruction. */
6327 if (insn->help_in1) {
6328 insn->help_in1(s, &f, &o);
6330 if (insn->help_in2) {
6331 insn->help_in2(s, &f, &o);
6333 if (insn->help_prep) {
6334 insn->help_prep(s, &f, &o);
6336 if (insn->help_op) {
6337 ret = insn->help_op(s, &o);
6339 if (ret != DISAS_NORETURN) {
6340 if (insn->help_wout) {
6341 insn->help_wout(s, &f, &o);
6343 if (insn->help_cout) {
6344 insn->help_cout(s, &o);
6348 /* Free any temporaries created by the helpers. */
6349 if (o.out && !o.g_out) {
6350 tcg_temp_free_i64(o.out);
6352 if (o.out2 && !o.g_out2) {
6353 tcg_temp_free_i64(o.out2);
6355 if (o.in1 && !o.g_in1) {
6356 tcg_temp_free_i64(o.in1);
6358 if (o.in2 && !o.g_in2) {
6359 tcg_temp_free_i64(o.in2);
6362 tcg_temp_free_i64(o.addr1);
6365 #ifndef CONFIG_USER_ONLY
6366 if (s->base.tb->flags & FLAG_MASK_PER) {
6367 /* An exception might be triggered, save PSW if not already done. */
6368 if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
6369 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6372 /* Call the helper to check for a possible PER exception. */
6373 gen_helper_per_check_exception(cpu_env);
6377 /* Advance to the next instruction. */
6378 s->base.pc_next = s->pc_tmp;
6382 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6384 DisasContext *dc = container_of(dcbase, DisasContext, base);
6387 if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6388 dc->base.pc_first &= 0x7fffffff;
6389 dc->base.pc_next = dc->base.pc_first;
6392 dc->cc_op = CC_OP_DYNAMIC;
6393 dc->ex_value = dc->base.tb->cs_base;
6394 dc->do_debug = dc->base.singlestep_enabled;
6397 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6401 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6403 DisasContext *dc = container_of(dcbase, DisasContext, base);
6405 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
6408 static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
6409 const CPUBreakpoint *bp)
6411 DisasContext *dc = container_of(dcbase, DisasContext, base);
6413 dc->base.is_jmp = DISAS_PC_STALE;
6414 dc->do_debug = true;
6415 /* The address covered by the breakpoint must be included in
6416 [tb->pc, tb->pc + tb->size) in order to for it to be
6417 properly cleared -- thus we increment the PC here so that
6418 the logic setting tb->size does the right thing. */
6419 dc->base.pc_next += 2;
6423 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6425 CPUS390XState *env = cs->env_ptr;
6426 DisasContext *dc = container_of(dcbase, DisasContext, base);
6428 dc->base.is_jmp = translate_one(env, dc);
6429 if (dc->base.is_jmp == DISAS_NEXT) {
6430 uint64_t page_start;
6432 page_start = dc->base.pc_first & TARGET_PAGE_MASK;
6433 if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
6434 dc->base.is_jmp = DISAS_TOO_MANY;
6439 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6441 DisasContext *dc = container_of(dcbase, DisasContext, base);
6443 switch (dc->base.is_jmp) {
6445 case DISAS_NORETURN:
6447 case DISAS_TOO_MANY:
6448 case DISAS_PC_STALE:
6449 case DISAS_PC_STALE_NOCHAIN:
6450 update_psw_addr(dc);
6452 case DISAS_PC_UPDATED:
6453 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6454 cc op type is in env */
6457 case DISAS_PC_CC_UPDATED:
6458 /* Exit the TB, either by raising a debug exception or by return. */
6460 gen_exception(EXCP_DEBUG);
6461 } else if (use_exit_tb(dc) ||
6462 dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
6463 tcg_gen_exit_tb(NULL, 0);
6465 tcg_gen_lookup_and_goto_ptr();
6469 g_assert_not_reached();
6473 static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
6475 DisasContext *dc = container_of(dcbase, DisasContext, base);
6477 if (unlikely(dc->ex_value)) {
6478 /* ??? Unfortunately log_target_disas can't use host memory. */
6479 qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value);
6481 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
6482 log_target_disas(cs, dc->base.pc_first, dc->base.tb->size);
6486 static const TranslatorOps s390x_tr_ops = {
6487 .init_disas_context = s390x_tr_init_disas_context,
6488 .tb_start = s390x_tr_tb_start,
6489 .insn_start = s390x_tr_insn_start,
6490 .breakpoint_check = s390x_tr_breakpoint_check,
6491 .translate_insn = s390x_tr_translate_insn,
6492 .tb_stop = s390x_tr_tb_stop,
6493 .disas_log = s390x_tr_disas_log,
6496 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
6500 translator_loop(&s390x_tr_ops, &dc.base, cs, tb);
6503 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6506 int cc_op = data[1];
6507 env->psw.addr = data[0];
6508 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {