4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
37 #include "tcg-op-gvec.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
46 #include "exec/translator.h"
48 #include "qemu/atomic128.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
57 DisasContextBase base;
58 const DisasInsn *insn;
62 * During translate_one(), pc_tmp is used to determine the instruction
63 * to be executed after base.pc_next - e.g. next sequential instruction
72 /* Information carried about a condition to be evaluated. */
79 struct { TCGv_i64 a, b; } s64;
80 struct { TCGv_i32 a, b; } s32;
84 #ifdef DEBUG_INLINE_BRANCHES
85 static uint64_t inline_branch_hit[CC_OP_MAX];
86 static uint64_t inline_branch_miss[CC_OP_MAX];
89 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
93 if (s->base.tb->flags & FLAG_MASK_32) {
94 if (s->base.tb->flags & FLAG_MASK_64) {
95 tcg_gen_movi_i64(out, pc);
100 assert(!(s->base.tb->flags & FLAG_MASK_64));
101 tmp = tcg_const_i64(pc);
102 tcg_gen_deposit_i64(out, out, tmp, 0, 32);
103 tcg_temp_free_i64(tmp);
106 static TCGv_i64 psw_addr;
107 static TCGv_i64 psw_mask;
108 static TCGv_i64 gbea;
110 static TCGv_i32 cc_op;
111 static TCGv_i64 cc_src;
112 static TCGv_i64 cc_dst;
113 static TCGv_i64 cc_vr;
115 static char cpu_reg_names[16][4];
116 static TCGv_i64 regs[16];
118 void s390x_translate_init(void)
122 psw_addr = tcg_global_mem_new_i64(cpu_env,
123 offsetof(CPUS390XState, psw.addr),
125 psw_mask = tcg_global_mem_new_i64(cpu_env,
126 offsetof(CPUS390XState, psw.mask),
128 gbea = tcg_global_mem_new_i64(cpu_env,
129 offsetof(CPUS390XState, gbea),
132 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
134 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
136 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
138 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
141 for (i = 0; i < 16; i++) {
142 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
143 regs[i] = tcg_global_mem_new(cpu_env,
144 offsetof(CPUS390XState, regs[i]),
149 static inline int vec_full_reg_offset(uint8_t reg)
152 return offsetof(CPUS390XState, vregs[reg][0]);
155 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
157 /* Convert element size (es) - e.g. MO_8 - to bytes */
158 const uint8_t bytes = 1 << es;
159 int offs = enr * bytes;
162 * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
163 * of the 16 byte vector, on both, little and big endian systems.
165 * Big Endian (target/possible host)
166 * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
167 * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
168 * W: [ 0][ 1] - [ 2][ 3]
171 * Little Endian (possible host)
172 * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
173 * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
174 * W: [ 1][ 0] - [ 3][ 2]
177 * For 16 byte elements, the two 8 byte halves will not form a host
178 * int128 if the host is little endian, since they're in the wrong order.
179 * Some operations (e.g. xor) do not care. For operations like addition,
180 * the two 8 byte elements have to be loaded separately. Let's force all
181 * 16 byte operations to handle it in a special way.
183 g_assert(es <= MO_64);
184 #ifndef HOST_WORDS_BIGENDIAN
187 return offs + vec_full_reg_offset(reg);
190 static inline int freg64_offset(uint8_t reg)
193 return vec_reg_offset(reg, 0, MO_64);
196 static inline int freg32_offset(uint8_t reg)
199 return vec_reg_offset(reg, 0, MO_32);
202 static TCGv_i64 load_reg(int reg)
204 TCGv_i64 r = tcg_temp_new_i64();
205 tcg_gen_mov_i64(r, regs[reg]);
209 static TCGv_i64 load_freg(int reg)
211 TCGv_i64 r = tcg_temp_new_i64();
213 tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
217 static TCGv_i64 load_freg32_i64(int reg)
219 TCGv_i64 r = tcg_temp_new_i64();
221 tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
225 static void store_reg(int reg, TCGv_i64 v)
227 tcg_gen_mov_i64(regs[reg], v);
230 static void store_freg(int reg, TCGv_i64 v)
232 tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
235 static void store_reg32_i64(int reg, TCGv_i64 v)
237 /* 32 bit register writes keep the upper half */
238 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
241 static void store_reg32h_i64(int reg, TCGv_i64 v)
243 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
246 static void store_freg32_i64(int reg, TCGv_i64 v)
248 tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
251 static void return_low128(TCGv_i64 dest)
253 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
256 static void update_psw_addr(DisasContext *s)
259 tcg_gen_movi_i64(psw_addr, s->base.pc_next);
262 static void per_branch(DisasContext *s, bool to_next)
264 #ifndef CONFIG_USER_ONLY
265 tcg_gen_movi_i64(gbea, s->base.pc_next);
267 if (s->base.tb->flags & FLAG_MASK_PER) {
268 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
269 gen_helper_per_branch(cpu_env, gbea, next_pc);
271 tcg_temp_free_i64(next_pc);
277 static void per_branch_cond(DisasContext *s, TCGCond cond,
278 TCGv_i64 arg1, TCGv_i64 arg2)
280 #ifndef CONFIG_USER_ONLY
281 if (s->base.tb->flags & FLAG_MASK_PER) {
282 TCGLabel *lab = gen_new_label();
283 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
285 tcg_gen_movi_i64(gbea, s->base.pc_next);
286 gen_helper_per_branch(cpu_env, gbea, psw_addr);
290 TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
291 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
292 tcg_temp_free_i64(pc);
297 static void per_breaking_event(DisasContext *s)
299 tcg_gen_movi_i64(gbea, s->base.pc_next);
302 static void update_cc_op(DisasContext *s)
304 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
305 tcg_gen_movi_i32(cc_op, s->cc_op);
309 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
311 return (uint64_t)cpu_lduw_code(env, pc);
314 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
316 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
319 static int get_mem_index(DisasContext *s)
321 if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
325 switch (s->base.tb->flags & FLAG_MASK_ASC) {
326 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
327 return MMU_PRIMARY_IDX;
328 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
329 return MMU_SECONDARY_IDX;
330 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
338 static void gen_exception(int excp)
340 TCGv_i32 tmp = tcg_const_i32(excp);
341 gen_helper_exception(cpu_env, tmp);
342 tcg_temp_free_i32(tmp);
345 static void gen_program_exception(DisasContext *s, int code)
349 /* Remember what pgm exeption this was. */
350 tmp = tcg_const_i32(code);
351 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
352 tcg_temp_free_i32(tmp);
354 tmp = tcg_const_i32(s->ilen);
355 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
356 tcg_temp_free_i32(tmp);
364 /* Trigger exception. */
365 gen_exception(EXCP_PGM);
368 static inline void gen_illegal_opcode(DisasContext *s)
370 gen_program_exception(s, PGM_OPERATION);
373 static inline void gen_data_exception(uint8_t dxc)
375 TCGv_i32 tmp = tcg_const_i32(dxc);
376 gen_helper_data_exception(cpu_env, tmp);
377 tcg_temp_free_i32(tmp);
380 static inline void gen_trap(DisasContext *s)
382 /* Set DXC to 0xff */
383 gen_data_exception(0xff);
386 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
389 tcg_gen_addi_i64(dst, src, imm);
390 if (!(s->base.tb->flags & FLAG_MASK_64)) {
391 if (s->base.tb->flags & FLAG_MASK_32) {
392 tcg_gen_andi_i64(dst, dst, 0x7fffffff);
394 tcg_gen_andi_i64(dst, dst, 0x00ffffff);
399 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
401 TCGv_i64 tmp = tcg_temp_new_i64();
404 * Note that d2 is limited to 20 bits, signed. If we crop negative
405 * displacements early we create larger immedate addends.
408 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
409 gen_addi_and_wrap_i64(s, tmp, tmp, d2);
411 gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
413 gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
414 } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
415 if (s->base.tb->flags & FLAG_MASK_32) {
416 tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
418 tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
421 tcg_gen_movi_i64(tmp, d2);
427 static inline bool live_cc_data(DisasContext *s)
429 return (s->cc_op != CC_OP_DYNAMIC
430 && s->cc_op != CC_OP_STATIC
434 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
436 if (live_cc_data(s)) {
437 tcg_gen_discard_i64(cc_src);
438 tcg_gen_discard_i64(cc_dst);
439 tcg_gen_discard_i64(cc_vr);
441 s->cc_op = CC_OP_CONST0 + val;
444 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
446 if (live_cc_data(s)) {
447 tcg_gen_discard_i64(cc_src);
448 tcg_gen_discard_i64(cc_vr);
450 tcg_gen_mov_i64(cc_dst, dst);
454 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
457 if (live_cc_data(s)) {
458 tcg_gen_discard_i64(cc_vr);
460 tcg_gen_mov_i64(cc_src, src);
461 tcg_gen_mov_i64(cc_dst, dst);
465 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
466 TCGv_i64 dst, TCGv_i64 vr)
468 tcg_gen_mov_i64(cc_src, src);
469 tcg_gen_mov_i64(cc_dst, dst);
470 tcg_gen_mov_i64(cc_vr, vr);
474 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
476 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
479 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
481 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
484 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
486 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
489 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
491 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
494 /* CC value is in env->cc_op */
495 static void set_cc_static(DisasContext *s)
497 if (live_cc_data(s)) {
498 tcg_gen_discard_i64(cc_src);
499 tcg_gen_discard_i64(cc_dst);
500 tcg_gen_discard_i64(cc_vr);
502 s->cc_op = CC_OP_STATIC;
505 /* calculates cc into cc_op */
506 static void gen_op_calc_cc(DisasContext *s)
508 TCGv_i32 local_cc_op = NULL;
509 TCGv_i64 dummy = NULL;
513 dummy = tcg_const_i64(0);
527 local_cc_op = tcg_const_i32(s->cc_op);
543 /* s->cc_op is the cc value */
544 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
547 /* env->cc_op already is the cc value */
563 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
568 case CC_OP_LTUGTU_32:
569 case CC_OP_LTUGTU_64:
577 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
592 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
595 /* unknown operation - assume 3 arguments and cc_op in env */
596 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
603 tcg_temp_free_i32(local_cc_op);
606 tcg_temp_free_i64(dummy);
609 /* We now have cc in cc_op as constant */
613 static bool use_exit_tb(DisasContext *s)
615 return s->base.singlestep_enabled ||
616 (tb_cflags(s->base.tb) & CF_LAST_IO) ||
617 (s->base.tb->flags & FLAG_MASK_PER);
620 static bool use_goto_tb(DisasContext *s, uint64_t dest)
622 if (unlikely(use_exit_tb(s))) {
625 #ifndef CONFIG_USER_ONLY
626 return (dest & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) ||
627 (dest & TARGET_PAGE_MASK) == (s->base.pc_next & TARGET_PAGE_MASK);
633 static void account_noninline_branch(DisasContext *s, int cc_op)
635 #ifdef DEBUG_INLINE_BRANCHES
636 inline_branch_miss[cc_op]++;
640 static void account_inline_branch(DisasContext *s, int cc_op)
642 #ifdef DEBUG_INLINE_BRANCHES
643 inline_branch_hit[cc_op]++;
647 /* Table of mask values to comparison codes, given a comparison as input.
648 For such, CC=3 should not be possible. */
649 static const TCGCond ltgt_cond[16] = {
650 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
651 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
652 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
653 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
654 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
655 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
656 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
657 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
660 /* Table of mask values to comparison codes, given a logic op as input.
661 For such, only CC=0 and CC=1 should be possible. */
662 static const TCGCond nz_cond[16] = {
663 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
664 TCG_COND_NEVER, TCG_COND_NEVER,
665 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
666 TCG_COND_NE, TCG_COND_NE,
667 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
668 TCG_COND_EQ, TCG_COND_EQ,
669 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
670 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
673 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
674 details required to generate a TCG comparison. */
675 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
678 enum cc_op old_cc_op = s->cc_op;
680 if (mask == 15 || mask == 0) {
681 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
684 c->g1 = c->g2 = true;
689 /* Find the TCG condition for the mask + cc op. */
695 cond = ltgt_cond[mask];
696 if (cond == TCG_COND_NEVER) {
699 account_inline_branch(s, old_cc_op);
702 case CC_OP_LTUGTU_32:
703 case CC_OP_LTUGTU_64:
704 cond = tcg_unsigned_cond(ltgt_cond[mask]);
705 if (cond == TCG_COND_NEVER) {
708 account_inline_branch(s, old_cc_op);
712 cond = nz_cond[mask];
713 if (cond == TCG_COND_NEVER) {
716 account_inline_branch(s, old_cc_op);
731 account_inline_branch(s, old_cc_op);
746 account_inline_branch(s, old_cc_op);
750 switch (mask & 0xa) {
751 case 8: /* src == 0 -> no one bit found */
754 case 2: /* src != 0 -> one bit found */
760 account_inline_branch(s, old_cc_op);
766 case 8 | 2: /* vr == 0 */
769 case 4 | 1: /* vr != 0 */
772 case 8 | 4: /* no carry -> vr >= src */
775 case 2 | 1: /* carry -> vr < src */
781 account_inline_branch(s, old_cc_op);
786 /* Note that CC=0 is impossible; treat it as dont-care. */
788 case 2: /* zero -> op1 == op2 */
791 case 4 | 1: /* !zero -> op1 != op2 */
794 case 4: /* borrow (!carry) -> op1 < op2 */
797 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
803 account_inline_branch(s, old_cc_op);
808 /* Calculate cc value. */
813 /* Jump based on CC. We'll load up the real cond below;
814 the assignment here merely avoids a compiler warning. */
815 account_noninline_branch(s, old_cc_op);
816 old_cc_op = CC_OP_STATIC;
817 cond = TCG_COND_NEVER;
821 /* Load up the arguments of the comparison. */
823 c->g1 = c->g2 = false;
827 c->u.s32.a = tcg_temp_new_i32();
828 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
829 c->u.s32.b = tcg_const_i32(0);
832 case CC_OP_LTUGTU_32:
835 c->u.s32.a = tcg_temp_new_i32();
836 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
837 c->u.s32.b = tcg_temp_new_i32();
838 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
845 c->u.s64.b = tcg_const_i64(0);
849 case CC_OP_LTUGTU_64:
853 c->g1 = c->g2 = true;
859 c->u.s64.a = tcg_temp_new_i64();
860 c->u.s64.b = tcg_const_i64(0);
861 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
866 c->u.s32.a = tcg_temp_new_i32();
867 c->u.s32.b = tcg_temp_new_i32();
868 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
869 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
870 tcg_gen_movi_i32(c->u.s32.b, 0);
872 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
879 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
880 c->u.s64.b = tcg_const_i64(0);
892 case 0x8 | 0x4 | 0x2: /* cc != 3 */
894 c->u.s32.b = tcg_const_i32(3);
896 case 0x8 | 0x4 | 0x1: /* cc != 2 */
898 c->u.s32.b = tcg_const_i32(2);
900 case 0x8 | 0x2 | 0x1: /* cc != 1 */
902 c->u.s32.b = tcg_const_i32(1);
904 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
907 c->u.s32.a = tcg_temp_new_i32();
908 c->u.s32.b = tcg_const_i32(0);
909 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
911 case 0x8 | 0x4: /* cc < 2 */
913 c->u.s32.b = tcg_const_i32(2);
915 case 0x8: /* cc == 0 */
917 c->u.s32.b = tcg_const_i32(0);
919 case 0x4 | 0x2 | 0x1: /* cc != 0 */
921 c->u.s32.b = tcg_const_i32(0);
923 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
926 c->u.s32.a = tcg_temp_new_i32();
927 c->u.s32.b = tcg_const_i32(0);
928 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
930 case 0x4: /* cc == 1 */
932 c->u.s32.b = tcg_const_i32(1);
934 case 0x2 | 0x1: /* cc > 1 */
936 c->u.s32.b = tcg_const_i32(1);
938 case 0x2: /* cc == 2 */
940 c->u.s32.b = tcg_const_i32(2);
942 case 0x1: /* cc == 3 */
944 c->u.s32.b = tcg_const_i32(3);
947 /* CC is masked by something else: (8 >> cc) & mask. */
950 c->u.s32.a = tcg_const_i32(8);
951 c->u.s32.b = tcg_const_i32(0);
952 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
953 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
964 static void free_compare(DisasCompare *c)
968 tcg_temp_free_i64(c->u.s64.a);
970 tcg_temp_free_i32(c->u.s32.a);
975 tcg_temp_free_i64(c->u.s64.b);
977 tcg_temp_free_i32(c->u.s32.b);
982 /* ====================================================================== */
983 /* Define the insn format enumeration. */
984 #define F0(N) FMT_##N,
985 #define F1(N, X1) F0(N)
986 #define F2(N, X1, X2) F0(N)
987 #define F3(N, X1, X2, X3) F0(N)
988 #define F4(N, X1, X2, X3, X4) F0(N)
989 #define F5(N, X1, X2, X3, X4, X5) F0(N)
990 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
993 #include "insn-format.def"
1004 /* Define a structure to hold the decoded fields. We'll store each inside
1005 an array indexed by an enum. In order to conserve memory, we'll arrange
1006 for fields that do not exist at the same time to overlap, thus the "C"
1007 for compact. For checking purposes there is an "O" for original index
1008 as well that will be applied to availability bitmaps. */
1010 enum DisasFieldIndexO {
1039 enum DisasFieldIndexC {
1076 struct DisasFields {
1080 unsigned presentC:16;
1081 unsigned int presentO;
1085 /* This is the way fields are to be accessed out of DisasFields. */
1086 #define have_field(S, F) have_field1((S), FLD_O_##F)
1087 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1089 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1091 return (f->presentO >> c) & 1;
1094 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1095 enum DisasFieldIndexC c)
1097 assert(have_field1(f, o));
1101 /* Describe the layout of each field in each format. */
1102 typedef struct DisasField {
1104 unsigned int size:8;
1105 unsigned int type:2;
1106 unsigned int indexC:6;
1107 enum DisasFieldIndexO indexO:8;
1110 typedef struct DisasFormatInfo {
1111 DisasField op[NUM_C_FIELD];
1114 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1115 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1116 #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N }
1117 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1118 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1119 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1120 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1121 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1122 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1123 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1124 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1125 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1126 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1127 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1128 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1130 #define F0(N) { { } },
1131 #define F1(N, X1) { { X1 } },
1132 #define F2(N, X1, X2) { { X1, X2 } },
1133 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1134 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1135 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1136 #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } },
1138 static const DisasFormatInfo format_info[] = {
1139 #include "insn-format.def"
1159 /* Generally, we'll extract operands into this structures, operate upon
1160 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1161 of routines below for more details. */
1163 bool g_out, g_out2, g_in1, g_in2;
1164 TCGv_i64 out, out2, in1, in2;
1168 /* Instructions can place constraints on their operands, raising specification
1169 exceptions if they are violated. To make this easy to automate, each "in1",
1170 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1171 of the following, or 0. To make this easy to document, we'll put the
1172 SPEC_<name> defines next to <name>. */
1174 #define SPEC_r1_even 1
1175 #define SPEC_r2_even 2
1176 #define SPEC_r3_even 4
1177 #define SPEC_r1_f128 8
1178 #define SPEC_r2_f128 16
1180 /* Return values from translate_one, indicating the state of the TB. */
1182 /* We are not using a goto_tb (for whatever reason), but have updated
1183 the PC (for whatever reason), so there's no need to do it again on
1185 #define DISAS_PC_UPDATED DISAS_TARGET_0
1187 /* We have emitted one or more goto_tb. No fixup required. */
1188 #define DISAS_GOTO_TB DISAS_TARGET_1
1190 /* We have updated the PC and CC values. */
1191 #define DISAS_PC_CC_UPDATED DISAS_TARGET_2
1193 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1194 updated the PC for the next instruction to be executed. */
1195 #define DISAS_PC_STALE DISAS_TARGET_3
1197 /* We are exiting the TB to the main loop. */
1198 #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
1201 /* Instruction flags */
1202 #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
1203 #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
1204 #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
1205 #define IF_BFP 0x0008 /* binary floating point instruction */
1206 #define IF_DFP 0x0010 /* decimal floating point instruction */
1207 #define IF_PRIV 0x0020 /* privileged instruction */
1208 #define IF_VEC 0x0040 /* vector instruction */
1219 /* Pre-process arguments before HELP_OP. */
1220 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1221 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1222 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1225 * Post-process output after HELP_OP.
1226 * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1228 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1229 void (*help_cout)(DisasContext *, DisasOps *);
1231 /* Implement the operation itself. */
1232 DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1237 /* ====================================================================== */
1238 /* Miscellaneous helpers, used by several operations. */
1240 static void help_l2_shift(DisasContext *s, DisasFields *f,
1241 DisasOps *o, int mask)
1243 int b2 = get_field(f, b2);
1244 int d2 = get_field(f, d2);
1247 o->in2 = tcg_const_i64(d2 & mask);
1249 o->in2 = get_address(s, 0, b2, d2);
1250 tcg_gen_andi_i64(o->in2, o->in2, mask);
1254 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1256 if (dest == s->pc_tmp) {
1257 per_branch(s, true);
1260 if (use_goto_tb(s, dest)) {
1262 per_breaking_event(s);
1264 tcg_gen_movi_i64(psw_addr, dest);
1265 tcg_gen_exit_tb(s->base.tb, 0);
1266 return DISAS_GOTO_TB;
1268 tcg_gen_movi_i64(psw_addr, dest);
1269 per_branch(s, false);
1270 return DISAS_PC_UPDATED;
1274 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1275 bool is_imm, int imm, TCGv_i64 cdest)
1278 uint64_t dest = s->base.pc_next + 2 * imm;
1281 /* Take care of the special cases first. */
1282 if (c->cond == TCG_COND_NEVER) {
1287 if (dest == s->pc_tmp) {
1288 /* Branch to next. */
1289 per_branch(s, true);
1293 if (c->cond == TCG_COND_ALWAYS) {
1294 ret = help_goto_direct(s, dest);
1299 /* E.g. bcr %r0 -> no branch. */
1303 if (c->cond == TCG_COND_ALWAYS) {
1304 tcg_gen_mov_i64(psw_addr, cdest);
1305 per_branch(s, false);
1306 ret = DISAS_PC_UPDATED;
1311 if (use_goto_tb(s, s->pc_tmp)) {
1312 if (is_imm && use_goto_tb(s, dest)) {
1313 /* Both exits can use goto_tb. */
1316 lab = gen_new_label();
1318 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1320 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1323 /* Branch not taken. */
1325 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1326 tcg_gen_exit_tb(s->base.tb, 0);
1330 per_breaking_event(s);
1332 tcg_gen_movi_i64(psw_addr, dest);
1333 tcg_gen_exit_tb(s->base.tb, 1);
1335 ret = DISAS_GOTO_TB;
1337 /* Fallthru can use goto_tb, but taken branch cannot. */
1338 /* Store taken branch destination before the brcond. This
1339 avoids having to allocate a new local temp to hold it.
1340 We'll overwrite this in the not taken case anyway. */
1342 tcg_gen_mov_i64(psw_addr, cdest);
1345 lab = gen_new_label();
1347 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1349 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1352 /* Branch not taken. */
1355 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1356 tcg_gen_exit_tb(s->base.tb, 0);
1360 tcg_gen_movi_i64(psw_addr, dest);
1362 per_breaking_event(s);
1363 ret = DISAS_PC_UPDATED;
1366 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1367 Most commonly we're single-stepping or some other condition that
1368 disables all use of goto_tb. Just update the PC and exit. */
1370 TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1372 cdest = tcg_const_i64(dest);
1376 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1378 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1380 TCGv_i32 t0 = tcg_temp_new_i32();
1381 TCGv_i64 t1 = tcg_temp_new_i64();
1382 TCGv_i64 z = tcg_const_i64(0);
1383 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1384 tcg_gen_extu_i32_i64(t1, t0);
1385 tcg_temp_free_i32(t0);
1386 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1387 per_branch_cond(s, TCG_COND_NE, t1, z);
1388 tcg_temp_free_i64(t1);
1389 tcg_temp_free_i64(z);
1393 tcg_temp_free_i64(cdest);
1395 tcg_temp_free_i64(next);
1397 ret = DISAS_PC_UPDATED;
1405 /* ====================================================================== */
1406 /* The operations. These perform the bulk of the work for any insn,
1407 usually after the operands have been loaded and output initialized. */
1409 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1411 tcg_gen_abs_i64(o->out, o->in2);
1415 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1417 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1421 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1423 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1427 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1429 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1430 tcg_gen_mov_i64(o->out2, o->in2);
1434 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1436 tcg_gen_add_i64(o->out, o->in1, o->in2);
1440 static DisasJumpType op_addc(DisasContext *s, DisasOps *o)
1445 tcg_gen_add_i64(o->out, o->in1, o->in2);
1447 /* The carry flag is the msb of CC, therefore the branch mask that would
1448 create that comparison is 3. Feeding the generated comparison to
1449 setcond produces the carry flag that we desire. */
1450 disas_jcc(s, &cmp, 3);
1451 carry = tcg_temp_new_i64();
1453 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1455 TCGv_i32 t = tcg_temp_new_i32();
1456 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1457 tcg_gen_extu_i32_i64(carry, t);
1458 tcg_temp_free_i32(t);
1462 tcg_gen_add_i64(o->out, o->out, carry);
1463 tcg_temp_free_i64(carry);
1467 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1469 o->in1 = tcg_temp_new_i64();
1471 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1472 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1474 /* Perform the atomic addition in memory. */
1475 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1479 /* Recompute also for atomic case: needed for setting CC. */
1480 tcg_gen_add_i64(o->out, o->in1, o->in2);
1482 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1483 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1488 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1490 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1494 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1496 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1500 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1502 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1503 return_low128(o->out2);
1507 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1509 tcg_gen_and_i64(o->out, o->in1, o->in2);
1513 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1515 int shift = s->insn->data & 0xff;
1516 int size = s->insn->data >> 8;
1517 uint64_t mask = ((1ull << size) - 1) << shift;
1520 tcg_gen_shli_i64(o->in2, o->in2, shift);
1521 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1522 tcg_gen_and_i64(o->out, o->in1, o->in2);
1524 /* Produce the CC from only the bits manipulated. */
1525 tcg_gen_andi_i64(cc_dst, o->out, mask);
1526 set_cc_nz_u64(s, cc_dst);
1530 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1532 o->in1 = tcg_temp_new_i64();
1534 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1535 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1537 /* Perform the atomic operation in memory. */
1538 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1542 /* Recompute also for atomic case: needed for setting CC. */
1543 tcg_gen_and_i64(o->out, o->in1, o->in2);
1545 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1546 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1551 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1553 pc_to_link_info(o->out, s, s->pc_tmp);
1555 tcg_gen_mov_i64(psw_addr, o->in2);
1556 per_branch(s, false);
1557 return DISAS_PC_UPDATED;
1563 static void save_link_info(DisasContext *s, DisasOps *o)
1567 if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1568 pc_to_link_info(o->out, s, s->pc_tmp);
1572 tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1573 tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1574 t = tcg_temp_new_i64();
1575 tcg_gen_shri_i64(t, psw_mask, 16);
1576 tcg_gen_andi_i64(t, t, 0x0f000000);
1577 tcg_gen_or_i64(o->out, o->out, t);
1578 tcg_gen_extu_i32_i64(t, cc_op);
1579 tcg_gen_shli_i64(t, t, 28);
1580 tcg_gen_or_i64(o->out, o->out, t);
1581 tcg_temp_free_i64(t);
1584 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1586 save_link_info(s, o);
1588 tcg_gen_mov_i64(psw_addr, o->in2);
1589 per_branch(s, false);
1590 return DISAS_PC_UPDATED;
1596 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1598 pc_to_link_info(o->out, s, s->pc_tmp);
1599 return help_goto_direct(s, s->base.pc_next + 2 * get_field(s->fields, i2));
1602 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1604 int m1 = get_field(s->fields, m1);
1605 bool is_imm = have_field(s->fields, i2);
1606 int imm = is_imm ? get_field(s->fields, i2) : 0;
1609 /* BCR with R2 = 0 causes no branching */
1610 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1612 /* Perform serialization */
1613 /* FIXME: check for fast-BCR-serialization facility */
1614 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1617 /* Perform serialization */
1618 /* FIXME: perform checkpoint-synchronisation */
1619 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1624 disas_jcc(s, &c, m1);
1625 return help_branch(s, &c, is_imm, imm, o->in2);
1628 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1630 int r1 = get_field(s->fields, r1);
1631 bool is_imm = have_field(s->fields, i2);
1632 int imm = is_imm ? get_field(s->fields, i2) : 0;
1636 c.cond = TCG_COND_NE;
1641 t = tcg_temp_new_i64();
1642 tcg_gen_subi_i64(t, regs[r1], 1);
1643 store_reg32_i64(r1, t);
1644 c.u.s32.a = tcg_temp_new_i32();
1645 c.u.s32.b = tcg_const_i32(0);
1646 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1647 tcg_temp_free_i64(t);
1649 return help_branch(s, &c, is_imm, imm, o->in2);
1652 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1654 int r1 = get_field(s->fields, r1);
1655 int imm = get_field(s->fields, i2);
1659 c.cond = TCG_COND_NE;
1664 t = tcg_temp_new_i64();
1665 tcg_gen_shri_i64(t, regs[r1], 32);
1666 tcg_gen_subi_i64(t, t, 1);
1667 store_reg32h_i64(r1, t);
1668 c.u.s32.a = tcg_temp_new_i32();
1669 c.u.s32.b = tcg_const_i32(0);
1670 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1671 tcg_temp_free_i64(t);
1673 return help_branch(s, &c, 1, imm, o->in2);
1676 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1678 int r1 = get_field(s->fields, r1);
1679 bool is_imm = have_field(s->fields, i2);
1680 int imm = is_imm ? get_field(s->fields, i2) : 0;
1683 c.cond = TCG_COND_NE;
1688 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1689 c.u.s64.a = regs[r1];
1690 c.u.s64.b = tcg_const_i64(0);
1692 return help_branch(s, &c, is_imm, imm, o->in2);
1695 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1697 int r1 = get_field(s->fields, r1);
1698 int r3 = get_field(s->fields, r3);
1699 bool is_imm = have_field(s->fields, i2);
1700 int imm = is_imm ? get_field(s->fields, i2) : 0;
1704 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1709 t = tcg_temp_new_i64();
1710 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1711 c.u.s32.a = tcg_temp_new_i32();
1712 c.u.s32.b = tcg_temp_new_i32();
1713 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1714 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1715 store_reg32_i64(r1, t);
1716 tcg_temp_free_i64(t);
1718 return help_branch(s, &c, is_imm, imm, o->in2);
1721 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1723 int r1 = get_field(s->fields, r1);
1724 int r3 = get_field(s->fields, r3);
1725 bool is_imm = have_field(s->fields, i2);
1726 int imm = is_imm ? get_field(s->fields, i2) : 0;
1729 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1732 if (r1 == (r3 | 1)) {
1733 c.u.s64.b = load_reg(r3 | 1);
1736 c.u.s64.b = regs[r3 | 1];
1740 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1741 c.u.s64.a = regs[r1];
1744 return help_branch(s, &c, is_imm, imm, o->in2);
1747 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1749 int imm, m3 = get_field(s->fields, m3);
1753 c.cond = ltgt_cond[m3];
1754 if (s->insn->data) {
1755 c.cond = tcg_unsigned_cond(c.cond);
1757 c.is_64 = c.g1 = c.g2 = true;
1761 is_imm = have_field(s->fields, i4);
1763 imm = get_field(s->fields, i4);
1766 o->out = get_address(s, 0, get_field(s->fields, b4),
1767 get_field(s->fields, d4));
1770 return help_branch(s, &c, is_imm, imm, o->out);
1773 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1775 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1780 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1782 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1787 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1789 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1794 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1797 const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1798 uint8_t m3 = get_field(s->fields, m3);
1799 uint8_t m4 = get_field(s->fields, m4);
1801 /* m3 field was introduced with FPE */
1802 if (!fpe && m3_with_fpe) {
1805 /* m4 field was introduced with FPE */
1806 if (!fpe && m4_with_fpe) {
1810 /* Check for valid rounding modes. Mode 3 was introduced later. */
1811 if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1812 gen_program_exception(s, PGM_SPECIFICATION);
1816 return tcg_const_i32(deposit32(m3, 4, 4, m4));
1819 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1821 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1824 return DISAS_NORETURN;
1826 gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1827 tcg_temp_free_i32(m34);
1828 gen_set_cc_nz_f32(s, o->in2);
1832 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1834 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1837 return DISAS_NORETURN;
1839 gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1840 tcg_temp_free_i32(m34);
1841 gen_set_cc_nz_f64(s, o->in2);
1845 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1847 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1850 return DISAS_NORETURN;
1852 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34);
1853 tcg_temp_free_i32(m34);
1854 gen_set_cc_nz_f128(s, o->in1, o->in2);
1858 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1860 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1863 return DISAS_NORETURN;
1865 gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1866 tcg_temp_free_i32(m34);
1867 gen_set_cc_nz_f32(s, o->in2);
1871 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1873 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1876 return DISAS_NORETURN;
1878 gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1879 tcg_temp_free_i32(m34);
1880 gen_set_cc_nz_f64(s, o->in2);
1884 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1886 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1889 return DISAS_NORETURN;
1891 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34);
1892 tcg_temp_free_i32(m34);
1893 gen_set_cc_nz_f128(s, o->in1, o->in2);
1897 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1899 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1902 return DISAS_NORETURN;
1904 gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1905 tcg_temp_free_i32(m34);
1906 gen_set_cc_nz_f32(s, o->in2);
1910 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1912 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1915 return DISAS_NORETURN;
1917 gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1918 tcg_temp_free_i32(m34);
1919 gen_set_cc_nz_f64(s, o->in2);
1923 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1925 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1928 return DISAS_NORETURN;
1930 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34);
1931 tcg_temp_free_i32(m34);
1932 gen_set_cc_nz_f128(s, o->in1, o->in2);
1936 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1938 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1941 return DISAS_NORETURN;
1943 gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1944 tcg_temp_free_i32(m34);
1945 gen_set_cc_nz_f32(s, o->in2);
1949 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1951 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1954 return DISAS_NORETURN;
1956 gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1957 tcg_temp_free_i32(m34);
1958 gen_set_cc_nz_f64(s, o->in2);
1962 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1964 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1967 return DISAS_NORETURN;
1969 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34);
1970 tcg_temp_free_i32(m34);
1971 gen_set_cc_nz_f128(s, o->in1, o->in2);
1975 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1977 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1980 return DISAS_NORETURN;
1982 gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1983 tcg_temp_free_i32(m34);
1987 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1989 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1992 return DISAS_NORETURN;
1994 gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
1995 tcg_temp_free_i32(m34);
1999 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
2001 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2004 return DISAS_NORETURN;
2006 gen_helper_cxgb(o->out, cpu_env, o->in2, m34);
2007 tcg_temp_free_i32(m34);
2008 return_low128(o->out2);
2012 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
2014 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2017 return DISAS_NORETURN;
2019 gen_helper_celgb(o->out, cpu_env, o->in2, m34);
2020 tcg_temp_free_i32(m34);
2024 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
2026 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2029 return DISAS_NORETURN;
2031 gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
2032 tcg_temp_free_i32(m34);
2036 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
2038 TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2041 return DISAS_NORETURN;
2043 gen_helper_cxlgb(o->out, cpu_env, o->in2, m34);
2044 tcg_temp_free_i32(m34);
2045 return_low128(o->out2);
2049 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
2051 int r2 = get_field(s->fields, r2);
2052 TCGv_i64 len = tcg_temp_new_i64();
2054 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2056 return_low128(o->out);
2058 tcg_gen_add_i64(regs[r2], regs[r2], len);
2059 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2060 tcg_temp_free_i64(len);
2065 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2067 int l = get_field(s->fields, l1);
2072 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2073 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2076 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2077 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2080 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2081 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2084 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2085 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2088 vl = tcg_const_i32(l);
2089 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2090 tcg_temp_free_i32(vl);
2094 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2098 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2100 int r1 = get_field(s->fields, r1);
2101 int r2 = get_field(s->fields, r2);
2104 /* r1 and r2 must be even. */
2105 if (r1 & 1 || r2 & 1) {
2106 gen_program_exception(s, PGM_SPECIFICATION);
2107 return DISAS_NORETURN;
2110 t1 = tcg_const_i32(r1);
2111 t2 = tcg_const_i32(r2);
2112 gen_helper_clcl(cc_op, cpu_env, t1, t2);
2113 tcg_temp_free_i32(t1);
2114 tcg_temp_free_i32(t2);
2119 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2121 int r1 = get_field(s->fields, r1);
2122 int r3 = get_field(s->fields, r3);
2125 /* r1 and r3 must be even. */
2126 if (r1 & 1 || r3 & 1) {
2127 gen_program_exception(s, PGM_SPECIFICATION);
2128 return DISAS_NORETURN;
2131 t1 = tcg_const_i32(r1);
2132 t3 = tcg_const_i32(r3);
2133 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2134 tcg_temp_free_i32(t1);
2135 tcg_temp_free_i32(t3);
2140 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2142 int r1 = get_field(s->fields, r1);
2143 int r3 = get_field(s->fields, r3);
2146 /* r1 and r3 must be even. */
2147 if (r1 & 1 || r3 & 1) {
2148 gen_program_exception(s, PGM_SPECIFICATION);
2149 return DISAS_NORETURN;
2152 t1 = tcg_const_i32(r1);
2153 t3 = tcg_const_i32(r3);
2154 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2155 tcg_temp_free_i32(t1);
2156 tcg_temp_free_i32(t3);
2161 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2163 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2164 TCGv_i32 t1 = tcg_temp_new_i32();
2165 tcg_gen_extrl_i64_i32(t1, o->in1);
2166 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2168 tcg_temp_free_i32(t1);
2169 tcg_temp_free_i32(m3);
2173 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2175 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2177 return_low128(o->in2);
2181 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2183 TCGv_i64 t = tcg_temp_new_i64();
2184 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2185 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2186 tcg_gen_or_i64(o->out, o->out, t);
2187 tcg_temp_free_i64(t);
2191 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2193 int d2 = get_field(s->fields, d2);
2194 int b2 = get_field(s->fields, b2);
2197 /* Note that in1 = R3 (new value) and
2198 in2 = (zero-extended) R1 (expected value). */
2200 addr = get_address(s, 0, b2, d2);
2201 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2202 get_mem_index(s), s->insn->data | MO_ALIGN);
2203 tcg_temp_free_i64(addr);
2205 /* Are the memory and expected values (un)equal? Note that this setcond
2206 produces the output CC value, thus the NE sense of the test. */
2207 cc = tcg_temp_new_i64();
2208 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2209 tcg_gen_extrl_i64_i32(cc_op, cc);
2210 tcg_temp_free_i64(cc);
2216 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2218 int r1 = get_field(s->fields, r1);
2219 int r3 = get_field(s->fields, r3);
2220 int d2 = get_field(s->fields, d2);
2221 int b2 = get_field(s->fields, b2);
2222 DisasJumpType ret = DISAS_NEXT;
2224 TCGv_i32 t_r1, t_r3;
2226 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2227 addr = get_address(s, 0, b2, d2);
2228 t_r1 = tcg_const_i32(r1);
2229 t_r3 = tcg_const_i32(r3);
2230 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2231 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2232 } else if (HAVE_CMPXCHG128) {
2233 gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2235 gen_helper_exit_atomic(cpu_env);
2236 ret = DISAS_NORETURN;
2238 tcg_temp_free_i64(addr);
2239 tcg_temp_free_i32(t_r1);
2240 tcg_temp_free_i32(t_r3);
2246 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2248 int r3 = get_field(s->fields, r3);
2249 TCGv_i32 t_r3 = tcg_const_i32(r3);
2251 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2252 gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2254 gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2256 tcg_temp_free_i32(t_r3);
2262 #ifndef CONFIG_USER_ONLY
2263 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2265 MemOp mop = s->insn->data;
2266 TCGv_i64 addr, old, cc;
2267 TCGLabel *lab = gen_new_label();
2269 /* Note that in1 = R1 (zero-extended expected value),
2270 out = R1 (original reg), out2 = R1+1 (new value). */
2272 addr = tcg_temp_new_i64();
2273 old = tcg_temp_new_i64();
2274 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2275 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2276 get_mem_index(s), mop | MO_ALIGN);
2277 tcg_temp_free_i64(addr);
2279 /* Are the memory and expected values (un)equal? */
2280 cc = tcg_temp_new_i64();
2281 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2282 tcg_gen_extrl_i64_i32(cc_op, cc);
2284 /* Write back the output now, so that it happens before the
2285 following branch, so that we don't need local temps. */
2286 if ((mop & MO_SIZE) == MO_32) {
2287 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2289 tcg_gen_mov_i64(o->out, old);
2291 tcg_temp_free_i64(old);
2293 /* If the comparison was equal, and the LSB of R2 was set,
2294 then we need to flush the TLB (for all cpus). */
2295 tcg_gen_xori_i64(cc, cc, 1);
2296 tcg_gen_and_i64(cc, cc, o->in2);
2297 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2298 tcg_temp_free_i64(cc);
2300 gen_helper_purge(cpu_env);
2307 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2309 TCGv_i64 t1 = tcg_temp_new_i64();
2310 TCGv_i32 t2 = tcg_temp_new_i32();
2311 tcg_gen_extrl_i64_i32(t2, o->in1);
2312 gen_helper_cvd(t1, t2);
2313 tcg_temp_free_i32(t2);
2314 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2315 tcg_temp_free_i64(t1);
2319 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2321 int m3 = get_field(s->fields, m3);
2322 TCGLabel *lab = gen_new_label();
2325 c = tcg_invert_cond(ltgt_cond[m3]);
2326 if (s->insn->data) {
2327 c = tcg_unsigned_cond(c);
2329 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2338 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2340 int m3 = get_field(s->fields, m3);
2341 int r1 = get_field(s->fields, r1);
2342 int r2 = get_field(s->fields, r2);
2343 TCGv_i32 tr1, tr2, chk;
2345 /* R1 and R2 must both be even. */
2346 if ((r1 | r2) & 1) {
2347 gen_program_exception(s, PGM_SPECIFICATION);
2348 return DISAS_NORETURN;
2350 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2354 tr1 = tcg_const_i32(r1);
2355 tr2 = tcg_const_i32(r2);
2356 chk = tcg_const_i32(m3);
2358 switch (s->insn->data) {
2360 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2363 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2366 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2369 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2372 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2375 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2378 g_assert_not_reached();
2381 tcg_temp_free_i32(tr1);
2382 tcg_temp_free_i32(tr2);
2383 tcg_temp_free_i32(chk);
2388 #ifndef CONFIG_USER_ONLY
2389 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2391 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2392 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2393 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2395 gen_helper_diag(cpu_env, r1, r3, func_code);
2397 tcg_temp_free_i32(func_code);
2398 tcg_temp_free_i32(r3);
2399 tcg_temp_free_i32(r1);
2404 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2406 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2407 return_low128(o->out);
2411 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2413 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2414 return_low128(o->out);
2418 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2420 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2421 return_low128(o->out);
2425 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2427 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2428 return_low128(o->out);
2432 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2434 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2438 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2440 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2444 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2446 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2447 return_low128(o->out2);
2451 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2453 int r2 = get_field(s->fields, r2);
2454 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2458 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2460 /* No cache information provided. */
2461 tcg_gen_movi_i64(o->out, -1);
2465 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2467 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2471 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2473 int r1 = get_field(s->fields, r1);
2474 int r2 = get_field(s->fields, r2);
2475 TCGv_i64 t = tcg_temp_new_i64();
2477 /* Note the "subsequently" in the PoO, which implies a defined result
2478 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2479 tcg_gen_shri_i64(t, psw_mask, 32);
2480 store_reg32_i64(r1, t);
2482 store_reg32_i64(r2, psw_mask);
2485 tcg_temp_free_i64(t);
2489 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2491 int r1 = get_field(s->fields, r1);
2495 /* Nested EXECUTE is not allowed. */
2496 if (unlikely(s->ex_value)) {
2497 gen_program_exception(s, PGM_EXECUTE);
2498 return DISAS_NORETURN;
2505 v1 = tcg_const_i64(0);
2510 ilen = tcg_const_i32(s->ilen);
2511 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2512 tcg_temp_free_i32(ilen);
2515 tcg_temp_free_i64(v1);
2518 return DISAS_PC_CC_UPDATED;
2521 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2523 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2526 return DISAS_NORETURN;
2528 gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2529 tcg_temp_free_i32(m34);
2533 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2535 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2538 return DISAS_NORETURN;
2540 gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2541 tcg_temp_free_i32(m34);
2545 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2547 TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2550 return DISAS_NORETURN;
2552 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34);
2553 return_low128(o->out2);
2554 tcg_temp_free_i32(m34);
2558 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2560 /* We'll use the original input for cc computation, since we get to
2561 compare that against 0, which ought to be better than comparing
2562 the real output against 64. It also lets cc_dst be a convenient
2563 temporary during our computation. */
2564 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2566 /* R1 = IN ? CLZ(IN) : 64. */
2567 tcg_gen_clzi_i64(o->out, o->in2, 64);
2569 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2570 value by 64, which is undefined. But since the shift is 64 iff the
2571 input is zero, we still get the correct result after and'ing. */
2572 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2573 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2574 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2578 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2580 int m3 = get_field(s->fields, m3);
2581 int pos, len, base = s->insn->data;
2582 TCGv_i64 tmp = tcg_temp_new_i64();
2587 /* Effectively a 32-bit load. */
2588 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2595 /* Effectively a 16-bit load. */
2596 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2604 /* Effectively an 8-bit load. */
2605 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2610 pos = base + ctz32(m3) * 8;
2611 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2612 ccm = ((1ull << len) - 1) << pos;
2616 /* This is going to be a sequence of loads and inserts. */
2617 pos = base + 32 - 8;
2621 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2622 tcg_gen_addi_i64(o->in2, o->in2, 1);
2623 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2626 m3 = (m3 << 1) & 0xf;
2632 tcg_gen_movi_i64(tmp, ccm);
2633 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2634 tcg_temp_free_i64(tmp);
2638 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2640 int shift = s->insn->data & 0xff;
2641 int size = s->insn->data >> 8;
2642 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2646 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2651 t1 = tcg_temp_new_i64();
2652 tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2653 t2 = tcg_temp_new_i64();
2654 tcg_gen_extu_i32_i64(t2, cc_op);
2655 tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2656 tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2657 tcg_temp_free_i64(t1);
2658 tcg_temp_free_i64(t2);
2662 #ifndef CONFIG_USER_ONLY
2663 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2667 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2668 m4 = tcg_const_i32(get_field(s->fields, m4));
2670 m4 = tcg_const_i32(0);
2672 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2673 tcg_temp_free_i32(m4);
2677 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2681 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2682 m4 = tcg_const_i32(get_field(s->fields, m4));
2684 m4 = tcg_const_i32(0);
2686 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2687 tcg_temp_free_i32(m4);
2691 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2693 gen_helper_iske(o->out, cpu_env, o->in2);
2698 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2700 int r1 = have_field(s->fields, r1) ? get_field(s->fields, r1) : 0;
2701 int r2 = have_field(s->fields, r2) ? get_field(s->fields, r2) : 0;
2702 int r3 = have_field(s->fields, r3) ? get_field(s->fields, r3) : 0;
2703 TCGv_i32 t_r1, t_r2, t_r3, type;
2705 switch (s->insn->data) {
2706 case S390_FEAT_TYPE_KMCTR:
2707 if (r3 & 1 || !r3) {
2708 gen_program_exception(s, PGM_SPECIFICATION);
2709 return DISAS_NORETURN;
2712 case S390_FEAT_TYPE_PPNO:
2713 case S390_FEAT_TYPE_KMF:
2714 case S390_FEAT_TYPE_KMC:
2715 case S390_FEAT_TYPE_KMO:
2716 case S390_FEAT_TYPE_KM:
2717 if (r1 & 1 || !r1) {
2718 gen_program_exception(s, PGM_SPECIFICATION);
2719 return DISAS_NORETURN;
2722 case S390_FEAT_TYPE_KMAC:
2723 case S390_FEAT_TYPE_KIMD:
2724 case S390_FEAT_TYPE_KLMD:
2725 if (r2 & 1 || !r2) {
2726 gen_program_exception(s, PGM_SPECIFICATION);
2727 return DISAS_NORETURN;
2730 case S390_FEAT_TYPE_PCKMO:
2731 case S390_FEAT_TYPE_PCC:
2734 g_assert_not_reached();
2737 t_r1 = tcg_const_i32(r1);
2738 t_r2 = tcg_const_i32(r2);
2739 t_r3 = tcg_const_i32(r3);
2740 type = tcg_const_i32(s->insn->data);
2741 gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2743 tcg_temp_free_i32(t_r1);
2744 tcg_temp_free_i32(t_r2);
2745 tcg_temp_free_i32(t_r3);
2746 tcg_temp_free_i32(type);
2750 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2752 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2757 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2759 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2764 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2766 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2771 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2773 /* The real output is indeed the original value in memory;
2774 recompute the addition for the computation of CC. */
2775 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2776 s->insn->data | MO_ALIGN);
2777 /* However, we need to recompute the addition for setting CC. */
2778 tcg_gen_add_i64(o->out, o->in1, o->in2);
2782 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2784 /* The real output is indeed the original value in memory;
2785 recompute the addition for the computation of CC. */
2786 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2787 s->insn->data | MO_ALIGN);
2788 /* However, we need to recompute the operation for setting CC. */
2789 tcg_gen_and_i64(o->out, o->in1, o->in2);
2793 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2795 /* The real output is indeed the original value in memory;
2796 recompute the addition for the computation of CC. */
2797 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2798 s->insn->data | MO_ALIGN);
2799 /* However, we need to recompute the operation for setting CC. */
2800 tcg_gen_or_i64(o->out, o->in1, o->in2);
2804 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2806 /* The real output is indeed the original value in memory;
2807 recompute the addition for the computation of CC. */
2808 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2809 s->insn->data | MO_ALIGN);
2810 /* However, we need to recompute the operation for setting CC. */
2811 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2815 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2817 gen_helper_ldeb(o->out, cpu_env, o->in2);
2821 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2823 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2826 return DISAS_NORETURN;
2828 gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2829 tcg_temp_free_i32(m34);
2833 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2835 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2838 return DISAS_NORETURN;
2840 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34);
2841 tcg_temp_free_i32(m34);
2845 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2847 TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2850 return DISAS_NORETURN;
2852 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34);
2853 tcg_temp_free_i32(m34);
2857 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2859 gen_helper_lxdb(o->out, cpu_env, o->in2);
2860 return_low128(o->out2);
2864 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2866 gen_helper_lxeb(o->out, cpu_env, o->in2);
2867 return_low128(o->out2);
2871 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2873 tcg_gen_shli_i64(o->out, o->in2, 32);
2877 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2879 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2883 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2885 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2889 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2891 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2895 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2897 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2901 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2903 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2907 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2909 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2913 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2915 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2919 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2921 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2925 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2927 TCGLabel *lab = gen_new_label();
2928 store_reg32_i64(get_field(s->fields, r1), o->in2);
2929 /* The value is stored even in case of trap. */
2930 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2936 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2938 TCGLabel *lab = gen_new_label();
2939 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2940 /* The value is stored even in case of trap. */
2941 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2947 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2949 TCGLabel *lab = gen_new_label();
2950 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2951 /* The value is stored even in case of trap. */
2952 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2958 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2960 TCGLabel *lab = gen_new_label();
2961 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2962 /* The value is stored even in case of trap. */
2963 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2969 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2971 TCGLabel *lab = gen_new_label();
2972 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2973 /* The value is stored even in case of trap. */
2974 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2980 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2984 disas_jcc(s, &c, get_field(s->fields, m3));
2987 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2991 TCGv_i32 t32 = tcg_temp_new_i32();
2994 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2997 t = tcg_temp_new_i64();
2998 tcg_gen_extu_i32_i64(t, t32);
2999 tcg_temp_free_i32(t32);
3001 z = tcg_const_i64(0);
3002 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
3003 tcg_temp_free_i64(t);
3004 tcg_temp_free_i64(z);
3010 #ifndef CONFIG_USER_ONLY
3011 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
3013 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3014 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3015 gen_helper_lctl(cpu_env, r1, o->in2, r3);
3016 tcg_temp_free_i32(r1);
3017 tcg_temp_free_i32(r3);
3018 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3019 return DISAS_PC_STALE_NOCHAIN;
3022 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
3024 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3025 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3026 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
3027 tcg_temp_free_i32(r1);
3028 tcg_temp_free_i32(r3);
3029 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3030 return DISAS_PC_STALE_NOCHAIN;
3033 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
3035 gen_helper_lra(o->out, cpu_env, o->in2);
3040 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
3042 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
3046 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
3050 per_breaking_event(s);
3052 t1 = tcg_temp_new_i64();
3053 t2 = tcg_temp_new_i64();
3054 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3055 MO_TEUL | MO_ALIGN_8);
3056 tcg_gen_addi_i64(o->in2, o->in2, 4);
3057 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
3058 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
3059 tcg_gen_shli_i64(t1, t1, 32);
3060 gen_helper_load_psw(cpu_env, t1, t2);
3061 tcg_temp_free_i64(t1);
3062 tcg_temp_free_i64(t2);
3063 return DISAS_NORETURN;
3066 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3070 per_breaking_event(s);
3072 t1 = tcg_temp_new_i64();
3073 t2 = tcg_temp_new_i64();
3074 tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3075 MO_TEQ | MO_ALIGN_8);
3076 tcg_gen_addi_i64(o->in2, o->in2, 8);
3077 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
3078 gen_helper_load_psw(cpu_env, t1, t2);
3079 tcg_temp_free_i64(t1);
3080 tcg_temp_free_i64(t2);
3081 return DISAS_NORETURN;
3085 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3087 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3088 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3089 gen_helper_lam(cpu_env, r1, o->in2, r3);
3090 tcg_temp_free_i32(r1);
3091 tcg_temp_free_i32(r3);
3095 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3097 int r1 = get_field(s->fields, r1);
3098 int r3 = get_field(s->fields, r3);
3101 /* Only one register to read. */
3102 t1 = tcg_temp_new_i64();
3103 if (unlikely(r1 == r3)) {
3104 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3105 store_reg32_i64(r1, t1);
3110 /* First load the values of the first and last registers to trigger
3111 possible page faults. */
3112 t2 = tcg_temp_new_i64();
3113 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3114 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3115 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3116 store_reg32_i64(r1, t1);
3117 store_reg32_i64(r3, t2);
3119 /* Only two registers to read. */
3120 if (((r1 + 1) & 15) == r3) {
3126 /* Then load the remaining registers. Page fault can't occur. */
3128 tcg_gen_movi_i64(t2, 4);
3131 tcg_gen_add_i64(o->in2, o->in2, t2);
3132 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3133 store_reg32_i64(r1, t1);
3141 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3143 int r1 = get_field(s->fields, r1);
3144 int r3 = get_field(s->fields, r3);
3147 /* Only one register to read. */
3148 t1 = tcg_temp_new_i64();
3149 if (unlikely(r1 == r3)) {
3150 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3151 store_reg32h_i64(r1, t1);
3156 /* First load the values of the first and last registers to trigger
3157 possible page faults. */
3158 t2 = tcg_temp_new_i64();
3159 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3160 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3161 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3162 store_reg32h_i64(r1, t1);
3163 store_reg32h_i64(r3, t2);
3165 /* Only two registers to read. */
3166 if (((r1 + 1) & 15) == r3) {
3172 /* Then load the remaining registers. Page fault can't occur. */
3174 tcg_gen_movi_i64(t2, 4);
3177 tcg_gen_add_i64(o->in2, o->in2, t2);
3178 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3179 store_reg32h_i64(r1, t1);
3187 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3189 int r1 = get_field(s->fields, r1);
3190 int r3 = get_field(s->fields, r3);
3193 /* Only one register to read. */
3194 if (unlikely(r1 == r3)) {
3195 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3199 /* First load the values of the first and last registers to trigger
3200 possible page faults. */
3201 t1 = tcg_temp_new_i64();
3202 t2 = tcg_temp_new_i64();
3203 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3204 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3205 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3206 tcg_gen_mov_i64(regs[r1], t1);
3209 /* Only two registers to read. */
3210 if (((r1 + 1) & 15) == r3) {
3215 /* Then load the remaining registers. Page fault can't occur. */
3217 tcg_gen_movi_i64(t1, 8);
3220 tcg_gen_add_i64(o->in2, o->in2, t1);
3221 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3228 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3231 MemOp mop = s->insn->data;
3233 /* In a parallel context, stop the world and single step. */
3234 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3237 gen_exception(EXCP_ATOMIC);
3238 return DISAS_NORETURN;
3241 /* In a serial context, perform the two loads ... */
3242 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
3243 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3244 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3245 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3246 tcg_temp_free_i64(a1);
3247 tcg_temp_free_i64(a2);
3249 /* ... and indicate that we performed them while interlocked. */
3250 gen_op_movi_cc(s, 0);
3254 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3256 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3257 gen_helper_lpq(o->out, cpu_env, o->in2);
3258 } else if (HAVE_ATOMIC128) {
3259 gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3261 gen_helper_exit_atomic(cpu_env);
3262 return DISAS_NORETURN;
3264 return_low128(o->out2);
3268 #ifndef CONFIG_USER_ONLY
3269 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3271 gen_helper_lura(o->out, cpu_env, o->in2);
3275 static DisasJumpType op_lurag(DisasContext *s, DisasOps *o)
3277 gen_helper_lurag(o->out, cpu_env, o->in2);
3282 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3284 tcg_gen_andi_i64(o->out, o->in2, -256);
3288 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3290 const int64_t block_size = (1ull << (get_field(s->fields, m3) + 6));
3292 if (get_field(s->fields, m3) > 6) {
3293 gen_program_exception(s, PGM_SPECIFICATION);
3294 return DISAS_NORETURN;
3297 tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3298 tcg_gen_neg_i64(o->addr1, o->addr1);
3299 tcg_gen_movi_i64(o->out, 16);
3300 tcg_gen_umin_i64(o->out, o->out, o->addr1);
3301 gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3305 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3308 o->g_out = o->g_in2;
3314 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3316 int b2 = get_field(s->fields, b2);
3317 TCGv ar1 = tcg_temp_new_i64();
3320 o->g_out = o->g_in2;
3324 switch (s->base.tb->flags & FLAG_MASK_ASC) {
3325 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3326 tcg_gen_movi_i64(ar1, 0);
3328 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3329 tcg_gen_movi_i64(ar1, 1);
3331 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3333 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3335 tcg_gen_movi_i64(ar1, 0);
3338 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3339 tcg_gen_movi_i64(ar1, 2);
3343 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3344 tcg_temp_free_i64(ar1);
3349 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3353 o->g_out = o->g_in1;
3354 o->g_out2 = o->g_in2;
3357 o->g_in1 = o->g_in2 = false;
3361 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3363 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3364 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3365 tcg_temp_free_i32(l);
3369 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3371 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3372 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3373 tcg_temp_free_i32(l);
3377 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3379 int r1 = get_field(s->fields, r1);
3380 int r2 = get_field(s->fields, r2);
3383 /* r1 and r2 must be even. */
3384 if (r1 & 1 || r2 & 1) {
3385 gen_program_exception(s, PGM_SPECIFICATION);
3386 return DISAS_NORETURN;
3389 t1 = tcg_const_i32(r1);
3390 t2 = tcg_const_i32(r2);
3391 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3392 tcg_temp_free_i32(t1);
3393 tcg_temp_free_i32(t2);
3398 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3400 int r1 = get_field(s->fields, r1);
3401 int r3 = get_field(s->fields, r3);
3404 /* r1 and r3 must be even. */
3405 if (r1 & 1 || r3 & 1) {
3406 gen_program_exception(s, PGM_SPECIFICATION);
3407 return DISAS_NORETURN;
3410 t1 = tcg_const_i32(r1);
3411 t3 = tcg_const_i32(r3);
3412 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3413 tcg_temp_free_i32(t1);
3414 tcg_temp_free_i32(t3);
3419 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3421 int r1 = get_field(s->fields, r1);
3422 int r3 = get_field(s->fields, r3);
3425 /* r1 and r3 must be even. */
3426 if (r1 & 1 || r3 & 1) {
3427 gen_program_exception(s, PGM_SPECIFICATION);
3428 return DISAS_NORETURN;
3431 t1 = tcg_const_i32(r1);
3432 t3 = tcg_const_i32(r3);
3433 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3434 tcg_temp_free_i32(t1);
3435 tcg_temp_free_i32(t3);
3440 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3442 int r3 = get_field(s->fields, r3);
3443 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3448 #ifndef CONFIG_USER_ONLY
3449 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3451 int r1 = get_field(s->fields, l1);
3452 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3457 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3459 int r1 = get_field(s->fields, l1);
3460 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3466 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3468 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3469 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3470 tcg_temp_free_i32(l);
3474 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3476 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3477 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3478 tcg_temp_free_i32(l);
3482 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3484 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3489 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3491 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3493 return_low128(o->in2);
3497 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3499 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3500 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3501 tcg_temp_free_i32(l);
3505 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3507 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3511 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3513 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3517 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3519 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3523 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3525 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3529 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3531 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3535 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3537 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3538 return_low128(o->out2);
3542 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3544 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3545 return_low128(o->out2);
3549 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3551 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3552 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3553 tcg_temp_free_i64(r3);
3557 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3559 TCGv_i64 r3 = load_freg(get_field(s->fields, r3));
3560 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3561 tcg_temp_free_i64(r3);
3565 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3567 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3568 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3569 tcg_temp_free_i64(r3);
3573 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3575 TCGv_i64 r3 = load_freg(get_field(s->fields, r3));
3576 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3577 tcg_temp_free_i64(r3);
3581 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3584 z = tcg_const_i64(0);
3585 n = tcg_temp_new_i64();
3586 tcg_gen_neg_i64(n, o->in2);
3587 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3588 tcg_temp_free_i64(n);
3589 tcg_temp_free_i64(z);
3593 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3595 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3599 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3601 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3605 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3607 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3608 tcg_gen_mov_i64(o->out2, o->in2);
3612 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3614 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3615 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3616 tcg_temp_free_i32(l);
3621 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3623 tcg_gen_neg_i64(o->out, o->in2);
3627 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3629 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3633 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3635 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3639 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3641 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3642 tcg_gen_mov_i64(o->out2, o->in2);
3646 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3648 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3649 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3650 tcg_temp_free_i32(l);
3655 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3657 tcg_gen_or_i64(o->out, o->in1, o->in2);
3661 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3663 int shift = s->insn->data & 0xff;
3664 int size = s->insn->data >> 8;
3665 uint64_t mask = ((1ull << size) - 1) << shift;
3668 tcg_gen_shli_i64(o->in2, o->in2, shift);
3669 tcg_gen_or_i64(o->out, o->in1, o->in2);
3671 /* Produce the CC from only the bits manipulated. */
3672 tcg_gen_andi_i64(cc_dst, o->out, mask);
3673 set_cc_nz_u64(s, cc_dst);
3677 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3679 o->in1 = tcg_temp_new_i64();
3681 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3682 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3684 /* Perform the atomic operation in memory. */
3685 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3689 /* Recompute also for atomic case: needed for setting CC. */
3690 tcg_gen_or_i64(o->out, o->in1, o->in2);
3692 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3693 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3698 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3700 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3701 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3702 tcg_temp_free_i32(l);
3706 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3708 int l2 = get_field(s->fields, l2) + 1;
3711 /* The length must not exceed 32 bytes. */
3713 gen_program_exception(s, PGM_SPECIFICATION);
3714 return DISAS_NORETURN;
3716 l = tcg_const_i32(l2);
3717 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3718 tcg_temp_free_i32(l);
3722 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3724 int l2 = get_field(s->fields, l2) + 1;
3727 /* The length must be even and should not exceed 64 bytes. */
3728 if ((l2 & 1) || (l2 > 64)) {
3729 gen_program_exception(s, PGM_SPECIFICATION);
3730 return DISAS_NORETURN;
3732 l = tcg_const_i32(l2);
3733 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3734 tcg_temp_free_i32(l);
3738 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3740 gen_helper_popcnt(o->out, o->in2);
3744 #ifndef CONFIG_USER_ONLY
3745 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3747 gen_helper_ptlb(cpu_env);
3752 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3754 int i3 = get_field(s->fields, i3);
3755 int i4 = get_field(s->fields, i4);
3756 int i5 = get_field(s->fields, i5);
3757 int do_zero = i4 & 0x80;
3758 uint64_t mask, imask, pmask;
3761 /* Adjust the arguments for the specific insn. */
3762 switch (s->fields->op2) {
3763 case 0x55: /* risbg */
3764 case 0x59: /* risbgn */
3769 case 0x5d: /* risbhg */
3772 pmask = 0xffffffff00000000ull;
3774 case 0x51: /* risblg */
3777 pmask = 0x00000000ffffffffull;
3780 g_assert_not_reached();
3783 /* MASK is the set of bits to be inserted from R2.
3784 Take care for I3/I4 wraparound. */
3787 mask ^= pmask >> i4 >> 1;
3789 mask |= ~(pmask >> i4 >> 1);
3793 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3794 insns, we need to keep the other half of the register. */
3795 imask = ~mask | ~pmask;
3803 if (s->fields->op2 == 0x5d) {
3807 /* In some cases we can implement this with extract. */
3808 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3809 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3813 /* In some cases we can implement this with deposit. */
3814 if (len > 0 && (imask == 0 || ~mask == imask)) {
3815 /* Note that we rotate the bits to be inserted to the lsb, not to
3816 the position as described in the PoO. */
3817 rot = (rot - pos) & 63;
3822 /* Rotate the input as necessary. */
3823 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3825 /* Insert the selected bits into the output. */
3828 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3830 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3832 } else if (imask == 0) {
3833 tcg_gen_andi_i64(o->out, o->in2, mask);
3835 tcg_gen_andi_i64(o->in2, o->in2, mask);
3836 tcg_gen_andi_i64(o->out, o->out, imask);
3837 tcg_gen_or_i64(o->out, o->out, o->in2);
3842 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3844 int i3 = get_field(s->fields, i3);
3845 int i4 = get_field(s->fields, i4);
3846 int i5 = get_field(s->fields, i5);
3849 /* If this is a test-only form, arrange to discard the result. */
3851 o->out = tcg_temp_new_i64();
3859 /* MASK is the set of bits to be operated on from R2.
3860 Take care for I3/I4 wraparound. */
3863 mask ^= ~0ull >> i4 >> 1;
3865 mask |= ~(~0ull >> i4 >> 1);
3868 /* Rotate the input as necessary. */
3869 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3872 switch (s->fields->op2) {
3873 case 0x55: /* AND */
3874 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3875 tcg_gen_and_i64(o->out, o->out, o->in2);
3878 tcg_gen_andi_i64(o->in2, o->in2, mask);
3879 tcg_gen_or_i64(o->out, o->out, o->in2);
3881 case 0x57: /* XOR */
3882 tcg_gen_andi_i64(o->in2, o->in2, mask);
3883 tcg_gen_xor_i64(o->out, o->out, o->in2);
3890 tcg_gen_andi_i64(cc_dst, o->out, mask);
3891 set_cc_nz_u64(s, cc_dst);
3895 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3897 tcg_gen_bswap16_i64(o->out, o->in2);
3901 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3903 tcg_gen_bswap32_i64(o->out, o->in2);
3907 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3909 tcg_gen_bswap64_i64(o->out, o->in2);
3913 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3915 TCGv_i32 t1 = tcg_temp_new_i32();
3916 TCGv_i32 t2 = tcg_temp_new_i32();
3917 TCGv_i32 to = tcg_temp_new_i32();
3918 tcg_gen_extrl_i64_i32(t1, o->in1);
3919 tcg_gen_extrl_i64_i32(t2, o->in2);
3920 tcg_gen_rotl_i32(to, t1, t2);
3921 tcg_gen_extu_i32_i64(o->out, to);
3922 tcg_temp_free_i32(t1);
3923 tcg_temp_free_i32(t2);
3924 tcg_temp_free_i32(to);
3928 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3930 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3934 #ifndef CONFIG_USER_ONLY
3935 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3937 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3942 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3944 gen_helper_sacf(cpu_env, o->in2);
3945 /* Addressing mode has changed, so end the block. */
3946 return DISAS_PC_STALE;
3950 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3952 int sam = s->insn->data;
3968 /* Bizarre but true, we check the address of the current insn for the
3969 specification exception, not the next to be executed. Thus the PoO
3970 documents that Bad Things Happen two bytes before the end. */
3971 if (s->base.pc_next & ~mask) {
3972 gen_program_exception(s, PGM_SPECIFICATION);
3973 return DISAS_NORETURN;
3977 tsam = tcg_const_i64(sam);
3978 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3979 tcg_temp_free_i64(tsam);
3981 /* Always exit the TB, since we (may have) changed execution mode. */
3982 return DISAS_PC_STALE;
3985 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3987 int r1 = get_field(s->fields, r1);
3988 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3992 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3994 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3998 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
4000 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
4004 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
4006 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
4007 return_low128(o->out2);
4011 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
4013 gen_helper_sqeb(o->out, cpu_env, o->in2);
4017 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
4019 gen_helper_sqdb(o->out, cpu_env, o->in2);
4023 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
4025 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
4026 return_low128(o->out2);
4030 #ifndef CONFIG_USER_ONLY
4031 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
4033 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
4038 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
4040 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4041 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4042 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
4044 tcg_temp_free_i32(r1);
4045 tcg_temp_free_i32(r3);
4050 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
4057 disas_jcc(s, &c, get_field(s->fields, m3));
4059 /* We want to store when the condition is fulfilled, so branch
4060 out when it's not */
4061 c.cond = tcg_invert_cond(c.cond);
4063 lab = gen_new_label();
4065 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
4067 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
4071 r1 = get_field(s->fields, r1);
4072 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
4073 switch (s->insn->data) {
4075 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
4078 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
4080 case 2: /* STOCFH */
4081 h = tcg_temp_new_i64();
4082 tcg_gen_shri_i64(h, regs[r1], 32);
4083 tcg_gen_qemu_st32(h, a, get_mem_index(s));
4084 tcg_temp_free_i64(h);
4087 g_assert_not_reached();
4089 tcg_temp_free_i64(a);
4095 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4097 uint64_t sign = 1ull << s->insn->data;
4098 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
4099 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
4100 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4101 /* The arithmetic left shift is curious in that it does not affect
4102 the sign bit. Copy that over from the source unchanged. */
4103 tcg_gen_andi_i64(o->out, o->out, ~sign);
4104 tcg_gen_andi_i64(o->in1, o->in1, sign);
4105 tcg_gen_or_i64(o->out, o->out, o->in1);
4109 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4111 tcg_gen_shl_i64(o->out, o->in1, o->in2);
4115 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4117 tcg_gen_sar_i64(o->out, o->in1, o->in2);
4121 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4123 tcg_gen_shr_i64(o->out, o->in1, o->in2);
4127 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4129 gen_helper_sfpc(cpu_env, o->in2);
4133 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4135 gen_helper_sfas(cpu_env, o->in2);
4139 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4141 /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4142 tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4143 gen_helper_srnm(cpu_env, o->addr1);
4147 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4149 /* Bits 0-55 are are ignored. */
4150 tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4151 gen_helper_srnm(cpu_env, o->addr1);
4155 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4157 TCGv_i64 tmp = tcg_temp_new_i64();
4159 /* Bits other than 61-63 are ignored. */
4160 tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4162 /* No need to call a helper, we don't implement dfp */
4163 tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4164 tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4165 tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4167 tcg_temp_free_i64(tmp);
4171 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4173 tcg_gen_extrl_i64_i32(cc_op, o->in1);
4174 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4177 tcg_gen_shri_i64(o->in1, o->in1, 24);
4178 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4182 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4184 int b1 = get_field(s->fields, b1);
4185 int d1 = get_field(s->fields, d1);
4186 int b2 = get_field(s->fields, b2);
4187 int d2 = get_field(s->fields, d2);
4188 int r3 = get_field(s->fields, r3);
4189 TCGv_i64 tmp = tcg_temp_new_i64();
4191 /* fetch all operands first */
4192 o->in1 = tcg_temp_new_i64();
4193 tcg_gen_addi_i64(o->in1, regs[b1], d1);
4194 o->in2 = tcg_temp_new_i64();
4195 tcg_gen_addi_i64(o->in2, regs[b2], d2);
4196 o->addr1 = get_address(s, 0, r3, 0);
4198 /* load the third operand into r3 before modifying anything */
4199 tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4201 /* subtract CPU timer from first operand and store in GR0 */
4202 gen_helper_stpt(tmp, cpu_env);
4203 tcg_gen_sub_i64(regs[0], o->in1, tmp);
4205 /* store second operand in GR1 */
4206 tcg_gen_mov_i64(regs[1], o->in2);
4208 tcg_temp_free_i64(tmp);
4212 #ifndef CONFIG_USER_ONLY
4213 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4215 tcg_gen_shri_i64(o->in2, o->in2, 4);
4216 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4220 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4222 gen_helper_sske(cpu_env, o->in1, o->in2);
4226 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4228 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4229 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4230 return DISAS_PC_STALE_NOCHAIN;
4233 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4235 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4240 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4242 gen_helper_stck(o->out, cpu_env);
4243 /* ??? We don't implement clock states. */
4244 gen_op_movi_cc(s, 0);
4248 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4250 TCGv_i64 c1 = tcg_temp_new_i64();
4251 TCGv_i64 c2 = tcg_temp_new_i64();
4252 TCGv_i64 todpr = tcg_temp_new_i64();
4253 gen_helper_stck(c1, cpu_env);
4254 /* 16 bit value store in an uint32_t (only valid bits set) */
4255 tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4256 /* Shift the 64-bit value into its place as a zero-extended
4257 104-bit value. Note that "bit positions 64-103 are always
4258 non-zero so that they compare differently to STCK"; we set
4259 the least significant bit to 1. */
4260 tcg_gen_shli_i64(c2, c1, 56);
4261 tcg_gen_shri_i64(c1, c1, 8);
4262 tcg_gen_ori_i64(c2, c2, 0x10000);
4263 tcg_gen_or_i64(c2, c2, todpr);
4264 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4265 tcg_gen_addi_i64(o->in2, o->in2, 8);
4266 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4267 tcg_temp_free_i64(c1);
4268 tcg_temp_free_i64(c2);
4269 tcg_temp_free_i64(todpr);
4270 /* ??? We don't implement clock states. */
4271 gen_op_movi_cc(s, 0);
4275 #ifndef CONFIG_USER_ONLY
4276 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4278 tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
4279 gen_helper_sck(cc_op, cpu_env, o->in1);
4284 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4286 gen_helper_sckc(cpu_env, o->in2);
4290 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4292 gen_helper_sckpf(cpu_env, regs[0]);
4296 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4298 gen_helper_stckc(o->out, cpu_env);
4302 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4304 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4305 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4306 gen_helper_stctg(cpu_env, r1, o->in2, r3);
4307 tcg_temp_free_i32(r1);
4308 tcg_temp_free_i32(r3);
4312 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4314 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4315 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4316 gen_helper_stctl(cpu_env, r1, o->in2, r3);
4317 tcg_temp_free_i32(r1);
4318 tcg_temp_free_i32(r3);
4322 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4324 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4328 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4330 gen_helper_spt(cpu_env, o->in2);
4334 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4336 gen_helper_stfl(cpu_env);
4340 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4342 gen_helper_stpt(o->out, cpu_env);
4346 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4348 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4353 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4355 gen_helper_spx(cpu_env, o->in2);
4359 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4361 gen_helper_xsch(cpu_env, regs[1]);
4366 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4368 gen_helper_csch(cpu_env, regs[1]);
4373 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4375 gen_helper_hsch(cpu_env, regs[1]);
4380 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4382 gen_helper_msch(cpu_env, regs[1], o->in2);
4387 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4389 gen_helper_rchp(cpu_env, regs[1]);
4394 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4396 gen_helper_rsch(cpu_env, regs[1]);
4401 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4403 gen_helper_sal(cpu_env, regs[1]);
4407 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4409 gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4413 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4415 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4416 gen_op_movi_cc(s, 3);
4420 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4422 /* The instruction is suppressed if not provided. */
4426 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4428 gen_helper_ssch(cpu_env, regs[1], o->in2);
4433 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4435 gen_helper_stsch(cpu_env, regs[1], o->in2);
4440 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4442 gen_helper_stcrw(cpu_env, o->in2);
4447 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4449 gen_helper_tpi(cc_op, cpu_env, o->addr1);
4454 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4456 gen_helper_tsch(cpu_env, regs[1], o->in2);
4461 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4463 gen_helper_chsc(cpu_env, o->in2);
4468 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4470 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4471 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4475 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4477 uint64_t i2 = get_field(s->fields, i2);
4480 /* It is important to do what the instruction name says: STORE THEN.
4481 If we let the output hook perform the store then if we fault and
4482 restart, we'll have the wrong SYSTEM MASK in place. */
4483 t = tcg_temp_new_i64();
4484 tcg_gen_shri_i64(t, psw_mask, 56);
4485 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4486 tcg_temp_free_i64(t);
4488 if (s->fields->op == 0xac) {
4489 tcg_gen_andi_i64(psw_mask, psw_mask,
4490 (i2 << 56) | 0x00ffffffffffffffull);
4492 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4495 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4496 return DISAS_PC_STALE_NOCHAIN;
4499 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4501 gen_helper_stura(cpu_env, o->in2, o->in1);
4505 static DisasJumpType op_sturg(DisasContext *s, DisasOps *o)
4507 gen_helper_sturg(cpu_env, o->in2, o->in1);
4512 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4514 gen_helper_stfle(cc_op, cpu_env, o->in2);
4519 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4521 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4525 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4527 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4531 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4533 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4537 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4539 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4543 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4545 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4546 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4547 gen_helper_stam(cpu_env, r1, o->in2, r3);
4548 tcg_temp_free_i32(r1);
4549 tcg_temp_free_i32(r3);
4553 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4555 int m3 = get_field(s->fields, m3);
4556 int pos, base = s->insn->data;
4557 TCGv_i64 tmp = tcg_temp_new_i64();
4559 pos = base + ctz32(m3) * 8;
4562 /* Effectively a 32-bit store. */
4563 tcg_gen_shri_i64(tmp, o->in1, pos);
4564 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4570 /* Effectively a 16-bit store. */
4571 tcg_gen_shri_i64(tmp, o->in1, pos);
4572 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4579 /* Effectively an 8-bit store. */
4580 tcg_gen_shri_i64(tmp, o->in1, pos);
4581 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4585 /* This is going to be a sequence of shifts and stores. */
4586 pos = base + 32 - 8;
4589 tcg_gen_shri_i64(tmp, o->in1, pos);
4590 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4591 tcg_gen_addi_i64(o->in2, o->in2, 1);
4593 m3 = (m3 << 1) & 0xf;
4598 tcg_temp_free_i64(tmp);
4602 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4604 int r1 = get_field(s->fields, r1);
4605 int r3 = get_field(s->fields, r3);
4606 int size = s->insn->data;
4607 TCGv_i64 tsize = tcg_const_i64(size);
4611 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4613 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4618 tcg_gen_add_i64(o->in2, o->in2, tsize);
4622 tcg_temp_free_i64(tsize);
4626 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4628 int r1 = get_field(s->fields, r1);
4629 int r3 = get_field(s->fields, r3);
4630 TCGv_i64 t = tcg_temp_new_i64();
4631 TCGv_i64 t4 = tcg_const_i64(4);
4632 TCGv_i64 t32 = tcg_const_i64(32);
4635 tcg_gen_shl_i64(t, regs[r1], t32);
4636 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4640 tcg_gen_add_i64(o->in2, o->in2, t4);
4644 tcg_temp_free_i64(t);
4645 tcg_temp_free_i64(t4);
4646 tcg_temp_free_i64(t32);
4650 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4652 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4653 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4654 } else if (HAVE_ATOMIC128) {
4655 gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4657 gen_helper_exit_atomic(cpu_env);
4658 return DISAS_NORETURN;
4663 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4665 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4666 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4668 gen_helper_srst(cpu_env, r1, r2);
4670 tcg_temp_free_i32(r1);
4671 tcg_temp_free_i32(r2);
4676 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4678 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4679 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4681 gen_helper_srstu(cpu_env, r1, r2);
4683 tcg_temp_free_i32(r1);
4684 tcg_temp_free_i32(r2);
4689 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4691 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4695 static DisasJumpType op_subb(DisasContext *s, DisasOps *o)
4700 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4702 /* The !borrow flag is the msb of CC. Since we want the inverse of
4703 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4704 disas_jcc(s, &cmp, 8 | 4);
4705 borrow = tcg_temp_new_i64();
4707 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4709 TCGv_i32 t = tcg_temp_new_i32();
4710 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4711 tcg_gen_extu_i32_i64(borrow, t);
4712 tcg_temp_free_i32(t);
4716 tcg_gen_sub_i64(o->out, o->out, borrow);
4717 tcg_temp_free_i64(borrow);
4721 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4728 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4729 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4730 tcg_temp_free_i32(t);
4732 t = tcg_const_i32(s->ilen);
4733 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4734 tcg_temp_free_i32(t);
4736 gen_exception(EXCP_SVC);
4737 return DISAS_NORETURN;
4740 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4744 cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4745 cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4746 gen_op_movi_cc(s, cc);
4750 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4752 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4757 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4759 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4764 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4766 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4771 #ifndef CONFIG_USER_ONLY
4773 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4775 gen_helper_testblock(cc_op, cpu_env, o->in2);
4780 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4782 gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4789 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4791 TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4792 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4793 tcg_temp_free_i32(l1);
4798 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4800 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4801 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4802 tcg_temp_free_i32(l);
4807 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4809 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4810 return_low128(o->out2);
4815 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4817 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4818 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4819 tcg_temp_free_i32(l);
4824 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4826 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4827 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4828 tcg_temp_free_i32(l);
4833 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4835 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4836 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4837 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4838 TCGv_i32 tst = tcg_temp_new_i32();
4839 int m3 = get_field(s->fields, m3);
4841 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4845 tcg_gen_movi_i32(tst, -1);
4847 tcg_gen_extrl_i64_i32(tst, regs[0]);
4848 if (s->insn->opc & 3) {
4849 tcg_gen_ext8u_i32(tst, tst);
4851 tcg_gen_ext16u_i32(tst, tst);
4854 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4856 tcg_temp_free_i32(r1);
4857 tcg_temp_free_i32(r2);
4858 tcg_temp_free_i32(sizes);
4859 tcg_temp_free_i32(tst);
4864 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4866 TCGv_i32 t1 = tcg_const_i32(0xff);
4867 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4868 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4869 tcg_temp_free_i32(t1);
4874 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4876 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4877 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4878 tcg_temp_free_i32(l);
4882 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4884 int l1 = get_field(s->fields, l1) + 1;
4887 /* The length must not exceed 32 bytes. */
4889 gen_program_exception(s, PGM_SPECIFICATION);
4890 return DISAS_NORETURN;
4892 l = tcg_const_i32(l1);
4893 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4894 tcg_temp_free_i32(l);
4899 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4901 int l1 = get_field(s->fields, l1) + 1;
4904 /* The length must be even and should not exceed 64 bytes. */
4905 if ((l1 & 1) || (l1 > 64)) {
4906 gen_program_exception(s, PGM_SPECIFICATION);
4907 return DISAS_NORETURN;
4909 l = tcg_const_i32(l1);
4910 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4911 tcg_temp_free_i32(l);
4917 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4919 int d1 = get_field(s->fields, d1);
4920 int d2 = get_field(s->fields, d2);
4921 int b1 = get_field(s->fields, b1);
4922 int b2 = get_field(s->fields, b2);
4923 int l = get_field(s->fields, l1);
4926 o->addr1 = get_address(s, 0, b1, d1);
4928 /* If the addresses are identical, this is a store/memset of zero. */
4929 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4930 o->in2 = tcg_const_i64(0);
4934 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4937 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4941 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4944 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4948 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4951 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4955 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4957 gen_op_movi_cc(s, 0);
4961 /* But in general we'll defer to a helper. */
4962 o->in2 = get_address(s, 0, b2, d2);
4963 t32 = tcg_const_i32(l);
4964 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4965 tcg_temp_free_i32(t32);
4970 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4972 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4976 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4978 int shift = s->insn->data & 0xff;
4979 int size = s->insn->data >> 8;
4980 uint64_t mask = ((1ull << size) - 1) << shift;
4983 tcg_gen_shli_i64(o->in2, o->in2, shift);
4984 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4986 /* Produce the CC from only the bits manipulated. */
4987 tcg_gen_andi_i64(cc_dst, o->out, mask);
4988 set_cc_nz_u64(s, cc_dst);
4992 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4994 o->in1 = tcg_temp_new_i64();
4996 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4997 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4999 /* Perform the atomic operation in memory. */
5000 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
5004 /* Recompute also for atomic case: needed for setting CC. */
5005 tcg_gen_xor_i64(o->out, o->in1, o->in2);
5007 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5008 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
5013 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
5015 o->out = tcg_const_i64(0);
5019 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
5021 o->out = tcg_const_i64(0);
5027 #ifndef CONFIG_USER_ONLY
5028 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
5030 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5032 gen_helper_clp(cpu_env, r2);
5033 tcg_temp_free_i32(r2);
5038 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
5040 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5041 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5043 gen_helper_pcilg(cpu_env, r1, r2);
5044 tcg_temp_free_i32(r1);
5045 tcg_temp_free_i32(r2);
5050 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
5052 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5053 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5055 gen_helper_pcistg(cpu_env, r1, r2);
5056 tcg_temp_free_i32(r1);
5057 tcg_temp_free_i32(r2);
5062 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
5064 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5065 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
5067 gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
5068 tcg_temp_free_i32(ar);
5069 tcg_temp_free_i32(r1);
5074 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
5076 gen_helper_sic(cpu_env, o->in1, o->in2);
5080 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
5082 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5083 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5085 gen_helper_rpcit(cpu_env, r1, r2);
5086 tcg_temp_free_i32(r1);
5087 tcg_temp_free_i32(r2);
5092 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5094 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5095 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
5096 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
5098 gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
5099 tcg_temp_free_i32(ar);
5100 tcg_temp_free_i32(r1);
5101 tcg_temp_free_i32(r3);
5106 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5108 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5109 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
5111 gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
5112 tcg_temp_free_i32(ar);
5113 tcg_temp_free_i32(r1);
5119 #include "translate_vx.inc.c"
5121 /* ====================================================================== */
5122 /* The "Cc OUTput" generators. Given the generated output (and in some cases
5123 the original inputs), update the various cc data structures in order to
5124 be able to compute the new condition code. */
5126 static void cout_abs32(DisasContext *s, DisasOps *o)
5128 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5131 static void cout_abs64(DisasContext *s, DisasOps *o)
5133 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5136 static void cout_adds32(DisasContext *s, DisasOps *o)
5138 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5141 static void cout_adds64(DisasContext *s, DisasOps *o)
5143 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5146 static void cout_addu32(DisasContext *s, DisasOps *o)
5148 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
5151 static void cout_addu64(DisasContext *s, DisasOps *o)
5153 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
5156 static void cout_addc32(DisasContext *s, DisasOps *o)
5158 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
5161 static void cout_addc64(DisasContext *s, DisasOps *o)
5163 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
5166 static void cout_cmps32(DisasContext *s, DisasOps *o)
5168 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5171 static void cout_cmps64(DisasContext *s, DisasOps *o)
5173 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5176 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5178 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5181 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5183 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5186 static void cout_f32(DisasContext *s, DisasOps *o)
5188 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5191 static void cout_f64(DisasContext *s, DisasOps *o)
5193 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5196 static void cout_f128(DisasContext *s, DisasOps *o)
5198 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5201 static void cout_nabs32(DisasContext *s, DisasOps *o)
5203 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5206 static void cout_nabs64(DisasContext *s, DisasOps *o)
5208 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5211 static void cout_neg32(DisasContext *s, DisasOps *o)
5213 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5216 static void cout_neg64(DisasContext *s, DisasOps *o)
5218 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5221 static void cout_nz32(DisasContext *s, DisasOps *o)
5223 tcg_gen_ext32u_i64(cc_dst, o->out);
5224 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5227 static void cout_nz64(DisasContext *s, DisasOps *o)
5229 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5232 static void cout_s32(DisasContext *s, DisasOps *o)
5234 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5237 static void cout_s64(DisasContext *s, DisasOps *o)
5239 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5242 static void cout_subs32(DisasContext *s, DisasOps *o)
5244 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5247 static void cout_subs64(DisasContext *s, DisasOps *o)
5249 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5252 static void cout_subu32(DisasContext *s, DisasOps *o)
5254 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
5257 static void cout_subu64(DisasContext *s, DisasOps *o)
5259 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
5262 static void cout_subb32(DisasContext *s, DisasOps *o)
5264 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
5267 static void cout_subb64(DisasContext *s, DisasOps *o)
5269 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
5272 static void cout_tm32(DisasContext *s, DisasOps *o)
5274 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5277 static void cout_tm64(DisasContext *s, DisasOps *o)
5279 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5282 /* ====================================================================== */
5283 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5284 with the TCG register to which we will write. Used in combination with
5285 the "wout" generators, in some cases we need a new temporary, and in
5286 some cases we can write to a TCG global. */
5288 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
5290 o->out = tcg_temp_new_i64();
5292 #define SPEC_prep_new 0
5294 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
5296 o->out = tcg_temp_new_i64();
5297 o->out2 = tcg_temp_new_i64();
5299 #define SPEC_prep_new_P 0
5301 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5303 o->out = regs[get_field(f, r1)];
5306 #define SPEC_prep_r1 0
5308 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
5310 int r1 = get_field(f, r1);
5312 o->out2 = regs[r1 + 1];
5313 o->g_out = o->g_out2 = true;
5315 #define SPEC_prep_r1_P SPEC_r1_even
5317 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5318 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5320 o->out = load_freg(get_field(f, r1));
5321 o->out2 = load_freg(get_field(f, r1) + 2);
5323 #define SPEC_prep_x1 SPEC_r1_f128
5325 /* ====================================================================== */
5326 /* The "Write OUTput" generators. These generally perform some non-trivial
5327 copy of data to TCG globals, or to main memory. The trivial cases are
5328 generally handled by having a "prep" generator install the TCG global
5329 as the destination of the operation. */
5331 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5333 store_reg(get_field(f, r1), o->out);
5335 #define SPEC_wout_r1 0
5337 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5339 int r1 = get_field(f, r1);
5340 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5342 #define SPEC_wout_r1_8 0
5344 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5346 int r1 = get_field(f, r1);
5347 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5349 #define SPEC_wout_r1_16 0
5351 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5353 store_reg32_i64(get_field(f, r1), o->out);
5355 #define SPEC_wout_r1_32 0
5357 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
5359 store_reg32h_i64(get_field(f, r1), o->out);
5361 #define SPEC_wout_r1_32h 0
5363 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5365 int r1 = get_field(f, r1);
5366 store_reg32_i64(r1, o->out);
5367 store_reg32_i64(r1 + 1, o->out2);
5369 #define SPEC_wout_r1_P32 SPEC_r1_even
5371 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5373 int r1 = get_field(f, r1);
5374 store_reg32_i64(r1 + 1, o->out);
5375 tcg_gen_shri_i64(o->out, o->out, 32);
5376 store_reg32_i64(r1, o->out);
5378 #define SPEC_wout_r1_D32 SPEC_r1_even
5380 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5382 int r3 = get_field(f, r3);
5383 store_reg32_i64(r3, o->out);
5384 store_reg32_i64(r3 + 1, o->out2);
5386 #define SPEC_wout_r3_P32 SPEC_r3_even
5388 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
5390 int r3 = get_field(f, r3);
5391 store_reg(r3, o->out);
5392 store_reg(r3 + 1, o->out2);
5394 #define SPEC_wout_r3_P64 SPEC_r3_even
5396 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5398 store_freg32_i64(get_field(f, r1), o->out);
5400 #define SPEC_wout_e1 0
5402 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5404 store_freg(get_field(f, r1), o->out);
5406 #define SPEC_wout_f1 0
5408 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5410 int f1 = get_field(s->fields, r1);
5411 store_freg(f1, o->out);
5412 store_freg(f1 + 2, o->out2);
5414 #define SPEC_wout_x1 SPEC_r1_f128
5416 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5418 if (get_field(f, r1) != get_field(f, r2)) {
5419 store_reg32_i64(get_field(f, r1), o->out);
5422 #define SPEC_wout_cond_r1r2_32 0
5424 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
5426 if (get_field(f, r1) != get_field(f, r2)) {
5427 store_freg32_i64(get_field(f, r1), o->out);
5430 #define SPEC_wout_cond_e1e2 0
5432 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5434 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5436 #define SPEC_wout_m1_8 0
5438 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5440 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5442 #define SPEC_wout_m1_16 0
5444 #ifndef CONFIG_USER_ONLY
5445 static void wout_m1_16a(DisasContext *s, DisasFields *f, DisasOps *o)
5447 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5449 #define SPEC_wout_m1_16a 0
5452 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5454 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5456 #define SPEC_wout_m1_32 0
5458 #ifndef CONFIG_USER_ONLY
5459 static void wout_m1_32a(DisasContext *s, DisasFields *f, DisasOps *o)
5461 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5463 #define SPEC_wout_m1_32a 0
5466 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5468 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5470 #define SPEC_wout_m1_64 0
5472 #ifndef CONFIG_USER_ONLY
5473 static void wout_m1_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5475 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
5477 #define SPEC_wout_m1_64a 0
5480 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5482 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5484 #define SPEC_wout_m2_32 0
5486 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5488 store_reg(get_field(f, r1), o->in2);
5490 #define SPEC_wout_in2_r1 0
5492 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5494 store_reg32_i64(get_field(f, r1), o->in2);
5496 #define SPEC_wout_in2_r1_32 0
5498 /* ====================================================================== */
5499 /* The "INput 1" generators. These load the first operand to an insn. */
5501 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5503 o->in1 = load_reg(get_field(f, r1));
5505 #define SPEC_in1_r1 0
5507 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5509 o->in1 = regs[get_field(f, r1)];
5512 #define SPEC_in1_r1_o 0
5514 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5516 o->in1 = tcg_temp_new_i64();
5517 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
5519 #define SPEC_in1_r1_32s 0
5521 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5523 o->in1 = tcg_temp_new_i64();
5524 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
5526 #define SPEC_in1_r1_32u 0
5528 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5530 o->in1 = tcg_temp_new_i64();
5531 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
5533 #define SPEC_in1_r1_sr32 0
5535 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
5537 o->in1 = load_reg(get_field(f, r1) + 1);
5539 #define SPEC_in1_r1p1 SPEC_r1_even
5541 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5543 o->in1 = tcg_temp_new_i64();
5544 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
5546 #define SPEC_in1_r1p1_32s SPEC_r1_even
5548 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5550 o->in1 = tcg_temp_new_i64();
5551 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
5553 #define SPEC_in1_r1p1_32u SPEC_r1_even
5555 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5557 int r1 = get_field(f, r1);
5558 o->in1 = tcg_temp_new_i64();
5559 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5561 #define SPEC_in1_r1_D32 SPEC_r1_even
5563 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5565 o->in1 = load_reg(get_field(f, r2));
5567 #define SPEC_in1_r2 0
5569 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5571 o->in1 = tcg_temp_new_i64();
5572 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
5574 #define SPEC_in1_r2_sr32 0
5576 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5578 o->in1 = load_reg(get_field(f, r3));
5580 #define SPEC_in1_r3 0
5582 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5584 o->in1 = regs[get_field(f, r3)];
5587 #define SPEC_in1_r3_o 0
5589 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5591 o->in1 = tcg_temp_new_i64();
5592 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
5594 #define SPEC_in1_r3_32s 0
5596 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5598 o->in1 = tcg_temp_new_i64();
5599 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
5601 #define SPEC_in1_r3_32u 0
5603 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5605 int r3 = get_field(f, r3);
5606 o->in1 = tcg_temp_new_i64();
5607 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5609 #define SPEC_in1_r3_D32 SPEC_r3_even
5611 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5613 o->in1 = load_freg32_i64(get_field(f, r1));
5615 #define SPEC_in1_e1 0
5617 static void in1_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5619 o->in1 = load_freg(get_field(f, r1));
5621 #define SPEC_in1_f1 0
5623 /* Load the high double word of an extended (128-bit) format FP number */
5624 static void in1_x2h(DisasContext *s, DisasFields *f, DisasOps *o)
5626 o->in1 = load_freg(get_field(f, r2));
5628 #define SPEC_in1_x2h SPEC_r2_f128
5630 static void in1_f3(DisasContext *s, DisasFields *f, DisasOps *o)
5632 o->in1 = load_freg(get_field(f, r3));
5634 #define SPEC_in1_f3 0
5636 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
5638 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
5640 #define SPEC_in1_la1 0
5642 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
5644 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5645 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5647 #define SPEC_in1_la2 0
5649 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5652 o->in1 = tcg_temp_new_i64();
5653 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5655 #define SPEC_in1_m1_8u 0
5657 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5660 o->in1 = tcg_temp_new_i64();
5661 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5663 #define SPEC_in1_m1_16s 0
5665 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5668 o->in1 = tcg_temp_new_i64();
5669 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5671 #define SPEC_in1_m1_16u 0
5673 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5676 o->in1 = tcg_temp_new_i64();
5677 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5679 #define SPEC_in1_m1_32s 0
5681 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5684 o->in1 = tcg_temp_new_i64();
5685 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5687 #define SPEC_in1_m1_32u 0
5689 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5692 o->in1 = tcg_temp_new_i64();
5693 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5695 #define SPEC_in1_m1_64 0
5697 /* ====================================================================== */
5698 /* The "INput 2" generators. These load the second operand to an insn. */
5700 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5702 o->in2 = regs[get_field(f, r1)];
5705 #define SPEC_in2_r1_o 0
5707 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5709 o->in2 = tcg_temp_new_i64();
5710 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5712 #define SPEC_in2_r1_16u 0
5714 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5716 o->in2 = tcg_temp_new_i64();
5717 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5719 #define SPEC_in2_r1_32u 0
5721 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5723 int r1 = get_field(f, r1);
5724 o->in2 = tcg_temp_new_i64();
5725 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5727 #define SPEC_in2_r1_D32 SPEC_r1_even
5729 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5731 o->in2 = load_reg(get_field(f, r2));
5733 #define SPEC_in2_r2 0
5735 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5737 o->in2 = regs[get_field(f, r2)];
5740 #define SPEC_in2_r2_o 0
5742 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5744 int r2 = get_field(f, r2);
5746 o->in2 = load_reg(r2);
5749 #define SPEC_in2_r2_nz 0
5751 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5753 o->in2 = tcg_temp_new_i64();
5754 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5756 #define SPEC_in2_r2_8s 0
5758 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5760 o->in2 = tcg_temp_new_i64();
5761 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5763 #define SPEC_in2_r2_8u 0
5765 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5767 o->in2 = tcg_temp_new_i64();
5768 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5770 #define SPEC_in2_r2_16s 0
5772 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5774 o->in2 = tcg_temp_new_i64();
5775 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5777 #define SPEC_in2_r2_16u 0
5779 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5781 o->in2 = load_reg(get_field(f, r3));
5783 #define SPEC_in2_r3 0
5785 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5787 o->in2 = tcg_temp_new_i64();
5788 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5790 #define SPEC_in2_r3_sr32 0
5792 static void in2_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5794 o->in2 = tcg_temp_new_i64();
5795 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r3)]);
5797 #define SPEC_in2_r3_32u 0
5799 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5801 o->in2 = tcg_temp_new_i64();
5802 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5804 #define SPEC_in2_r2_32s 0
5806 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5808 o->in2 = tcg_temp_new_i64();
5809 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5811 #define SPEC_in2_r2_32u 0
5813 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5815 o->in2 = tcg_temp_new_i64();
5816 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5818 #define SPEC_in2_r2_sr32 0
5820 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5822 o->in2 = load_freg32_i64(get_field(f, r2));
5824 #define SPEC_in2_e2 0
5826 static void in2_f2(DisasContext *s, DisasFields *f, DisasOps *o)
5828 o->in2 = load_freg(get_field(f, r2));
5830 #define SPEC_in2_f2 0
5832 /* Load the low double word of an extended (128-bit) format FP number */
5833 static void in2_x2l(DisasContext *s, DisasFields *f, DisasOps *o)
5835 o->in2 = load_freg(get_field(f, r2) + 2);
5837 #define SPEC_in2_x2l SPEC_r2_f128
5839 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5841 o->in2 = get_address(s, 0, get_field(f, r2), 0);
5843 #define SPEC_in2_ra2 0
5845 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5847 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5848 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5850 #define SPEC_in2_a2 0
5852 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5854 o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(f, i2) * 2);
5856 #define SPEC_in2_ri2 0
5858 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5860 help_l2_shift(s, f, o, 31);
5862 #define SPEC_in2_sh32 0
5864 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5866 help_l2_shift(s, f, o, 63);
5868 #define SPEC_in2_sh64 0
5870 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5873 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5875 #define SPEC_in2_m2_8u 0
5877 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5880 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5882 #define SPEC_in2_m2_16s 0
5884 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5887 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5889 #define SPEC_in2_m2_16u 0
5891 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5894 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5896 #define SPEC_in2_m2_32s 0
5898 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5901 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5903 #define SPEC_in2_m2_32u 0
5905 #ifndef CONFIG_USER_ONLY
5906 static void in2_m2_32ua(DisasContext *s, DisasFields *f, DisasOps *o)
5909 tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5911 #define SPEC_in2_m2_32ua 0
5914 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5917 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5919 #define SPEC_in2_m2_64 0
5921 #ifndef CONFIG_USER_ONLY
5922 static void in2_m2_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5925 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
5927 #define SPEC_in2_m2_64a 0
5930 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5933 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5935 #define SPEC_in2_mri2_16u 0
5937 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5940 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5942 #define SPEC_in2_mri2_32s 0
5944 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5947 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5949 #define SPEC_in2_mri2_32u 0
5951 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5954 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5956 #define SPEC_in2_mri2_64 0
5958 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5960 o->in2 = tcg_const_i64(get_field(f, i2));
5962 #define SPEC_in2_i2 0
5964 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5966 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5968 #define SPEC_in2_i2_8u 0
5970 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5972 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5974 #define SPEC_in2_i2_16u 0
5976 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5978 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5980 #define SPEC_in2_i2_32u 0
5982 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5984 uint64_t i2 = (uint16_t)get_field(f, i2);
5985 o->in2 = tcg_const_i64(i2 << s->insn->data);
5987 #define SPEC_in2_i2_16u_shl 0
5989 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5991 uint64_t i2 = (uint32_t)get_field(f, i2);
5992 o->in2 = tcg_const_i64(i2 << s->insn->data);
5994 #define SPEC_in2_i2_32u_shl 0
5996 #ifndef CONFIG_USER_ONLY
5997 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5999 o->in2 = tcg_const_i64(s->fields->raw_insn);
6001 #define SPEC_in2_insn 0
6004 /* ====================================================================== */
6006 /* Find opc within the table of insns. This is formulated as a switch
6007 statement so that (1) we get compile-time notice of cut-paste errors
6008 for duplicated opcodes, and (2) the compiler generates the binary
6009 search tree, rather than us having to post-process the table. */
6011 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6012 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6014 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6015 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6017 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6018 E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6020 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6022 enum DisasInsnEnum {
6023 #include "insn-data.def"
6027 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
6032 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
6034 .help_in1 = in1_##I1, \
6035 .help_in2 = in2_##I2, \
6036 .help_prep = prep_##P, \
6037 .help_wout = wout_##W, \
6038 .help_cout = cout_##CC, \
6039 .help_op = op_##OP, \
6043 /* Allow 0 to be used for NULL in the table below. */
6051 #define SPEC_in1_0 0
6052 #define SPEC_in2_0 0
6053 #define SPEC_prep_0 0
6054 #define SPEC_wout_0 0
6056 /* Give smaller names to the various facilities. */
6057 #define FAC_Z S390_FEAT_ZARCH
6058 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6059 #define FAC_DFP S390_FEAT_DFP
6060 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
6061 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
6062 #define FAC_EE S390_FEAT_EXECUTE_EXT
6063 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
6064 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
6065 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
6066 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
6067 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6068 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
6069 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
6070 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
6071 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6072 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
6073 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
6074 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
6075 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
6076 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
6077 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
6078 #define FAC_SFLE S390_FEAT_STFLE
6079 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6080 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6081 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6082 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
6083 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
6084 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
6085 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
6086 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6087 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
6088 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
6089 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6090 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6091 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6092 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
6093 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
6094 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
6095 #define FAC_V S390_FEAT_VECTOR /* vector facility */
6096 #define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6098 static const DisasInsn insn_info[] = {
6099 #include "insn-data.def"
6103 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6104 case OPC: return &insn_info[insn_ ## NM];
6106 static const DisasInsn *lookup_opc(uint16_t opc)
6109 #include "insn-data.def"
6120 /* Extract a field from the insn. The INSN should be left-aligned in
6121 the uint64_t so that we can more easily utilize the big-bit-endian
6122 definitions we extract from the Principals of Operation. */
6124 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6132 /* Zero extract the field from the insn. */
6133 r = (insn << f->beg) >> (64 - f->size);
6135 /* Sign-extend, or un-swap the field as necessary. */
6137 case 0: /* unsigned */
6139 case 1: /* signed */
6140 assert(f->size <= 32);
6141 m = 1u << (f->size - 1);
6144 case 2: /* dl+dh split, signed 20 bit. */
6145 r = ((int8_t)r << 12) | (r >> 8);
6147 case 3: /* MSB stored in RXB */
6148 g_assert(f->size == 4);
6151 r |= extract64(insn, 63 - 36, 1) << 4;
6154 r |= extract64(insn, 63 - 37, 1) << 4;
6157 r |= extract64(insn, 63 - 38, 1) << 4;
6160 r |= extract64(insn, 63 - 39, 1) << 4;
6163 g_assert_not_reached();
6170 /* Validate that the "compressed" encoding we selected above is valid.
6171 I.e. we havn't make two different original fields overlap. */
6172 assert(((o->presentC >> f->indexC) & 1) == 0);
6173 o->presentC |= 1 << f->indexC;
6174 o->presentO |= 1 << f->indexO;
6176 o->c[f->indexC] = r;
6179 /* Lookup the insn at the current PC, extracting the operands into O and
6180 returning the info struct for the insn. Returns NULL for invalid insn. */
6182 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
6185 uint64_t insn, pc = s->base.pc_next;
6187 const DisasInsn *info;
6189 if (unlikely(s->ex_value)) {
6190 /* Drop the EX data now, so that it's clear on exception paths. */
6191 TCGv_i64 zero = tcg_const_i64(0);
6192 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
6193 tcg_temp_free_i64(zero);
6195 /* Extract the values saved by EXECUTE. */
6196 insn = s->ex_value & 0xffffffffffff0000ull;
6197 ilen = s->ex_value & 0xf;
6200 insn = ld_code2(env, pc);
6201 op = (insn >> 8) & 0xff;
6202 ilen = get_ilen(op);
6208 insn = ld_code4(env, pc) << 32;
6211 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
6214 g_assert_not_reached();
6217 s->pc_tmp = s->base.pc_next + ilen;
6220 /* We can't actually determine the insn format until we've looked up
6221 the full insn opcode. Which we can't do without locating the
6222 secondary opcode. Assume by default that OP2 is at bit 40; for
6223 those smaller insns that don't actually have a secondary opcode
6224 this will correctly result in OP2 = 0. */
6230 case 0xb2: /* S, RRF, RRE, IE */
6231 case 0xb3: /* RRE, RRD, RRF */
6232 case 0xb9: /* RRE, RRF */
6233 case 0xe5: /* SSE, SIL */
6234 op2 = (insn << 8) >> 56;
6238 case 0xc0: /* RIL */
6239 case 0xc2: /* RIL */
6240 case 0xc4: /* RIL */
6241 case 0xc6: /* RIL */
6242 case 0xc8: /* SSF */
6243 case 0xcc: /* RIL */
6244 op2 = (insn << 12) >> 60;
6246 case 0xc5: /* MII */
6247 case 0xc7: /* SMI */
6248 case 0xd0 ... 0xdf: /* SS */
6254 case 0xee ... 0xf3: /* SS */
6255 case 0xf8 ... 0xfd: /* SS */
6259 op2 = (insn << 40) >> 56;
6263 memset(f, 0, sizeof(*f));
6268 /* Lookup the instruction. */
6269 info = lookup_opc(op << 8 | op2);
6271 /* If we found it, extract the operands. */
6273 DisasFormat fmt = info->fmt;
6276 for (i = 0; i < NUM_C_FIELD; ++i) {
6277 extract_field(f, &format_info[fmt].op[i], insn);
6283 static bool is_afp_reg(int reg)
6285 return reg % 2 || reg > 6;
6288 static bool is_fp_pair(int reg)
6290 /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6291 return !(reg & 0x2);
6294 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6296 const DisasInsn *insn;
6297 DisasJumpType ret = DISAS_NEXT;
6301 /* Search for the insn in the table. */
6302 insn = extract_insn(env, s, &f);
6304 /* Not found means unimplemented/illegal opcode. */
6306 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6308 gen_illegal_opcode(s);
6309 return DISAS_NORETURN;
6312 #ifndef CONFIG_USER_ONLY
6313 if (s->base.tb->flags & FLAG_MASK_PER) {
6314 TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6315 gen_helper_per_ifetch(cpu_env, addr);
6316 tcg_temp_free_i64(addr);
6322 /* privileged instruction */
6323 if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6324 gen_program_exception(s, PGM_PRIVILEGED);
6325 return DISAS_NORETURN;
6328 /* if AFP is not enabled, instructions and registers are forbidden */
6329 if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6332 if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(&f, r1))) {
6335 if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(&f, r2))) {
6338 if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(&f, r3))) {
6341 if (insn->flags & IF_BFP) {
6344 if (insn->flags & IF_DFP) {
6347 if (insn->flags & IF_VEC) {
6351 gen_data_exception(dxc);
6352 return DISAS_NORETURN;
6356 /* if vector instructions not enabled, executing them is forbidden */
6357 if (insn->flags & IF_VEC) {
6358 if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6359 gen_data_exception(0xfe);
6360 return DISAS_NORETURN;
6365 /* Check for insn specification exceptions. */
6367 if ((insn->spec & SPEC_r1_even && get_field(&f, r1) & 1) ||
6368 (insn->spec & SPEC_r2_even && get_field(&f, r2) & 1) ||
6369 (insn->spec & SPEC_r3_even && get_field(&f, r3) & 1) ||
6370 (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(&f, r1))) ||
6371 (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(&f, r2)))) {
6372 gen_program_exception(s, PGM_SPECIFICATION);
6373 return DISAS_NORETURN;
6377 /* Set up the strutures we use to communicate with the helpers. */
6381 /* Implement the instruction. */
6382 if (insn->help_in1) {
6383 insn->help_in1(s, &f, &o);
6385 if (insn->help_in2) {
6386 insn->help_in2(s, &f, &o);
6388 if (insn->help_prep) {
6389 insn->help_prep(s, &f, &o);
6391 if (insn->help_op) {
6392 ret = insn->help_op(s, &o);
6394 if (ret != DISAS_NORETURN) {
6395 if (insn->help_wout) {
6396 insn->help_wout(s, &f, &o);
6398 if (insn->help_cout) {
6399 insn->help_cout(s, &o);
6403 /* Free any temporaries created by the helpers. */
6404 if (o.out && !o.g_out) {
6405 tcg_temp_free_i64(o.out);
6407 if (o.out2 && !o.g_out2) {
6408 tcg_temp_free_i64(o.out2);
6410 if (o.in1 && !o.g_in1) {
6411 tcg_temp_free_i64(o.in1);
6413 if (o.in2 && !o.g_in2) {
6414 tcg_temp_free_i64(o.in2);
6417 tcg_temp_free_i64(o.addr1);
6420 #ifndef CONFIG_USER_ONLY
6421 if (s->base.tb->flags & FLAG_MASK_PER) {
6422 /* An exception might be triggered, save PSW if not already done. */
6423 if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
6424 tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6427 /* Call the helper to check for a possible PER exception. */
6428 gen_helper_per_check_exception(cpu_env);
6432 /* Advance to the next instruction. */
6433 s->base.pc_next = s->pc_tmp;
6437 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6439 DisasContext *dc = container_of(dcbase, DisasContext, base);
6442 if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6443 dc->base.pc_first &= 0x7fffffff;
6444 dc->base.pc_next = dc->base.pc_first;
6447 dc->cc_op = CC_OP_DYNAMIC;
6448 dc->ex_value = dc->base.tb->cs_base;
6449 dc->do_debug = dc->base.singlestep_enabled;
6452 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6456 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6458 DisasContext *dc = container_of(dcbase, DisasContext, base);
6460 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
6463 static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
6464 const CPUBreakpoint *bp)
6466 DisasContext *dc = container_of(dcbase, DisasContext, base);
6468 dc->base.is_jmp = DISAS_PC_STALE;
6469 dc->do_debug = true;
6470 /* The address covered by the breakpoint must be included in
6471 [tb->pc, tb->pc + tb->size) in order to for it to be
6472 properly cleared -- thus we increment the PC here so that
6473 the logic setting tb->size does the right thing. */
6474 dc->base.pc_next += 2;
6478 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6480 CPUS390XState *env = cs->env_ptr;
6481 DisasContext *dc = container_of(dcbase, DisasContext, base);
6483 dc->base.is_jmp = translate_one(env, dc);
6484 if (dc->base.is_jmp == DISAS_NEXT) {
6485 uint64_t page_start;
6487 page_start = dc->base.pc_first & TARGET_PAGE_MASK;
6488 if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
6489 dc->base.is_jmp = DISAS_TOO_MANY;
6494 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6496 DisasContext *dc = container_of(dcbase, DisasContext, base);
6498 switch (dc->base.is_jmp) {
6500 case DISAS_NORETURN:
6502 case DISAS_TOO_MANY:
6503 case DISAS_PC_STALE:
6504 case DISAS_PC_STALE_NOCHAIN:
6505 update_psw_addr(dc);
6507 case DISAS_PC_UPDATED:
6508 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6509 cc op type is in env */
6512 case DISAS_PC_CC_UPDATED:
6513 /* Exit the TB, either by raising a debug exception or by return. */
6515 gen_exception(EXCP_DEBUG);
6516 } else if (use_exit_tb(dc) ||
6517 dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
6518 tcg_gen_exit_tb(NULL, 0);
6520 tcg_gen_lookup_and_goto_ptr();
6524 g_assert_not_reached();
6528 static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
6530 DisasContext *dc = container_of(dcbase, DisasContext, base);
6532 if (unlikely(dc->ex_value)) {
6533 /* ??? Unfortunately log_target_disas can't use host memory. */
6534 qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value);
6536 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
6537 log_target_disas(cs, dc->base.pc_first, dc->base.tb->size);
6541 static const TranslatorOps s390x_tr_ops = {
6542 .init_disas_context = s390x_tr_init_disas_context,
6543 .tb_start = s390x_tr_tb_start,
6544 .insn_start = s390x_tr_insn_start,
6545 .breakpoint_check = s390x_tr_breakpoint_check,
6546 .translate_insn = s390x_tr_translate_insn,
6547 .tb_stop = s390x_tr_tb_stop,
6548 .disas_log = s390x_tr_disas_log,
6551 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
6555 translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
6558 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6561 int cc_op = data[1];
6562 env->psw.addr = data[0];
6563 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {