4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
38 #include "qemu/host-utils.h"
39 #include "exec/cpu_ldst.h"
40 #include "exec/gen-icount.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
44 #include "trace-tcg.h"
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext;
50 typedef struct DisasInsn DisasInsn;
51 typedef struct DisasFields DisasFields;
54 struct TranslationBlock *tb;
55 const DisasInsn *insn;
61 bool singlestep_enabled;
64 /* Information carried about a condition to be evaluated. */
71 struct { TCGv_i64 a, b; } s64;
72 struct { TCGv_i32 a, b; } s32;
76 /* is_jmp field values */
77 #define DISAS_EXCP DISAS_TARGET_0
79 #ifdef DEBUG_INLINE_BRANCHES
80 static uint64_t inline_branch_hit[CC_OP_MAX];
81 static uint64_t inline_branch_miss[CC_OP_MAX];
84 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
86 if (!(s->tb->flags & FLAG_MASK_64)) {
87 if (s->tb->flags & FLAG_MASK_32) {
88 return pc | 0x80000000;
94 static TCGv_i64 psw_addr;
95 static TCGv_i64 psw_mask;
98 static TCGv_i32 cc_op;
99 static TCGv_i64 cc_src;
100 static TCGv_i64 cc_dst;
101 static TCGv_i64 cc_vr;
103 static char cpu_reg_names[32][4];
104 static TCGv_i64 regs[16];
105 static TCGv_i64 fregs[16];
107 void s390x_translate_init(void)
111 psw_addr = tcg_global_mem_new_i64(cpu_env,
112 offsetof(CPUS390XState, psw.addr),
114 psw_mask = tcg_global_mem_new_i64(cpu_env,
115 offsetof(CPUS390XState, psw.mask),
117 gbea = tcg_global_mem_new_i64(cpu_env,
118 offsetof(CPUS390XState, gbea),
121 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
123 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
125 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
127 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
130 for (i = 0; i < 16; i++) {
131 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
132 regs[i] = tcg_global_mem_new(cpu_env,
133 offsetof(CPUS390XState, regs[i]),
137 for (i = 0; i < 16; i++) {
138 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
139 fregs[i] = tcg_global_mem_new(cpu_env,
140 offsetof(CPUS390XState, vregs[i][0].d),
141 cpu_reg_names[i + 16]);
145 static TCGv_i64 load_reg(int reg)
147 TCGv_i64 r = tcg_temp_new_i64();
148 tcg_gen_mov_i64(r, regs[reg]);
152 static TCGv_i64 load_freg32_i64(int reg)
154 TCGv_i64 r = tcg_temp_new_i64();
155 tcg_gen_shri_i64(r, fregs[reg], 32);
159 static void store_reg(int reg, TCGv_i64 v)
161 tcg_gen_mov_i64(regs[reg], v);
164 static void store_freg(int reg, TCGv_i64 v)
166 tcg_gen_mov_i64(fregs[reg], v);
169 static void store_reg32_i64(int reg, TCGv_i64 v)
171 /* 32 bit register writes keep the upper half */
172 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
175 static void store_reg32h_i64(int reg, TCGv_i64 v)
177 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
180 static void store_freg32_i64(int reg, TCGv_i64 v)
182 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
185 static void return_low128(TCGv_i64 dest)
187 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
190 static void update_psw_addr(DisasContext *s)
193 tcg_gen_movi_i64(psw_addr, s->pc);
196 static void per_branch(DisasContext *s, bool to_next)
198 #ifndef CONFIG_USER_ONLY
199 tcg_gen_movi_i64(gbea, s->pc);
201 if (s->tb->flags & FLAG_MASK_PER) {
202 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
203 gen_helper_per_branch(cpu_env, gbea, next_pc);
205 tcg_temp_free_i64(next_pc);
211 static void per_branch_cond(DisasContext *s, TCGCond cond,
212 TCGv_i64 arg1, TCGv_i64 arg2)
214 #ifndef CONFIG_USER_ONLY
215 if (s->tb->flags & FLAG_MASK_PER) {
216 TCGLabel *lab = gen_new_label();
217 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
219 tcg_gen_movi_i64(gbea, s->pc);
220 gen_helper_per_branch(cpu_env, gbea, psw_addr);
224 TCGv_i64 pc = tcg_const_i64(s->pc);
225 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
226 tcg_temp_free_i64(pc);
231 static void per_breaking_event(DisasContext *s)
233 tcg_gen_movi_i64(gbea, s->pc);
236 static void update_cc_op(DisasContext *s)
238 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
239 tcg_gen_movi_i32(cc_op, s->cc_op);
243 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
245 return (uint64_t)cpu_lduw_code(env, pc);
248 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
250 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
253 static int get_mem_index(DisasContext *s)
255 if (!(s->tb->flags & FLAG_MASK_DAT)) {
259 switch (s->tb->flags & FLAG_MASK_ASC) {
260 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
261 return MMU_PRIMARY_IDX;
262 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
263 return MMU_SECONDARY_IDX;
264 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
272 static void gen_exception(int excp)
274 TCGv_i32 tmp = tcg_const_i32(excp);
275 gen_helper_exception(cpu_env, tmp);
276 tcg_temp_free_i32(tmp);
279 static void gen_program_exception(DisasContext *s, int code)
283 /* Remember what pgm exeption this was. */
284 tmp = tcg_const_i32(code);
285 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
286 tcg_temp_free_i32(tmp);
288 tmp = tcg_const_i32(s->ilen);
289 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
290 tcg_temp_free_i32(tmp);
298 /* Trigger exception. */
299 gen_exception(EXCP_PGM);
302 static inline void gen_illegal_opcode(DisasContext *s)
304 gen_program_exception(s, PGM_OPERATION);
307 static inline void gen_trap(DisasContext *s)
311 /* Set DXC to 0xff. */
312 t = tcg_temp_new_i32();
313 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
314 tcg_gen_ori_i32(t, t, 0xff00);
315 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
316 tcg_temp_free_i32(t);
318 gen_program_exception(s, PGM_DATA);
321 #ifndef CONFIG_USER_ONLY
322 static void check_privileged(DisasContext *s)
324 if (s->tb->flags & FLAG_MASK_PSTATE) {
325 gen_program_exception(s, PGM_PRIVILEGED);
330 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
332 TCGv_i64 tmp = tcg_temp_new_i64();
333 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
335 /* Note that d2 is limited to 20 bits, signed. If we crop negative
336 displacements early we create larger immedate addends. */
338 /* Note that addi optimizes the imm==0 case. */
340 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
341 tcg_gen_addi_i64(tmp, tmp, d2);
343 tcg_gen_addi_i64(tmp, regs[b2], d2);
345 tcg_gen_addi_i64(tmp, regs[x2], d2);
351 tcg_gen_movi_i64(tmp, d2);
354 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
360 static inline bool live_cc_data(DisasContext *s)
362 return (s->cc_op != CC_OP_DYNAMIC
363 && s->cc_op != CC_OP_STATIC
367 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
369 if (live_cc_data(s)) {
370 tcg_gen_discard_i64(cc_src);
371 tcg_gen_discard_i64(cc_dst);
372 tcg_gen_discard_i64(cc_vr);
374 s->cc_op = CC_OP_CONST0 + val;
377 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
379 if (live_cc_data(s)) {
380 tcg_gen_discard_i64(cc_src);
381 tcg_gen_discard_i64(cc_vr);
383 tcg_gen_mov_i64(cc_dst, dst);
387 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
390 if (live_cc_data(s)) {
391 tcg_gen_discard_i64(cc_vr);
393 tcg_gen_mov_i64(cc_src, src);
394 tcg_gen_mov_i64(cc_dst, dst);
398 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
399 TCGv_i64 dst, TCGv_i64 vr)
401 tcg_gen_mov_i64(cc_src, src);
402 tcg_gen_mov_i64(cc_dst, dst);
403 tcg_gen_mov_i64(cc_vr, vr);
407 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
409 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
412 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
414 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
417 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
419 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
422 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
424 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
427 /* CC value is in env->cc_op */
428 static void set_cc_static(DisasContext *s)
430 if (live_cc_data(s)) {
431 tcg_gen_discard_i64(cc_src);
432 tcg_gen_discard_i64(cc_dst);
433 tcg_gen_discard_i64(cc_vr);
435 s->cc_op = CC_OP_STATIC;
438 /* calculates cc into cc_op */
439 static void gen_op_calc_cc(DisasContext *s)
441 TCGv_i32 local_cc_op = NULL;
442 TCGv_i64 dummy = NULL;
446 dummy = tcg_const_i64(0);
460 local_cc_op = tcg_const_i32(s->cc_op);
476 /* s->cc_op is the cc value */
477 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
480 /* env->cc_op already is the cc value */
495 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
500 case CC_OP_LTUGTU_32:
501 case CC_OP_LTUGTU_64:
508 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
523 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
526 /* unknown operation - assume 3 arguments and cc_op in env */
527 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
534 tcg_temp_free_i32(local_cc_op);
537 tcg_temp_free_i64(dummy);
540 /* We now have cc in cc_op as constant */
544 static bool use_exit_tb(DisasContext *s)
546 return (s->singlestep_enabled ||
547 (tb_cflags(s->tb) & CF_LAST_IO) ||
548 (s->tb->flags & FLAG_MASK_PER));
551 static bool use_goto_tb(DisasContext *s, uint64_t dest)
553 if (unlikely(use_exit_tb(s))) {
556 #ifndef CONFIG_USER_ONLY
557 return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
558 (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
564 static void account_noninline_branch(DisasContext *s, int cc_op)
566 #ifdef DEBUG_INLINE_BRANCHES
567 inline_branch_miss[cc_op]++;
571 static void account_inline_branch(DisasContext *s, int cc_op)
573 #ifdef DEBUG_INLINE_BRANCHES
574 inline_branch_hit[cc_op]++;
578 /* Table of mask values to comparison codes, given a comparison as input.
579 For such, CC=3 should not be possible. */
580 static const TCGCond ltgt_cond[16] = {
581 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
582 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
583 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
584 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
585 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
586 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
587 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
588 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
591 /* Table of mask values to comparison codes, given a logic op as input.
592 For such, only CC=0 and CC=1 should be possible. */
593 static const TCGCond nz_cond[16] = {
594 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
595 TCG_COND_NEVER, TCG_COND_NEVER,
596 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
597 TCG_COND_NE, TCG_COND_NE,
598 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
599 TCG_COND_EQ, TCG_COND_EQ,
600 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
601 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
604 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
605 details required to generate a TCG comparison. */
606 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
609 enum cc_op old_cc_op = s->cc_op;
611 if (mask == 15 || mask == 0) {
612 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
615 c->g1 = c->g2 = true;
620 /* Find the TCG condition for the mask + cc op. */
626 cond = ltgt_cond[mask];
627 if (cond == TCG_COND_NEVER) {
630 account_inline_branch(s, old_cc_op);
633 case CC_OP_LTUGTU_32:
634 case CC_OP_LTUGTU_64:
635 cond = tcg_unsigned_cond(ltgt_cond[mask]);
636 if (cond == TCG_COND_NEVER) {
639 account_inline_branch(s, old_cc_op);
643 cond = nz_cond[mask];
644 if (cond == TCG_COND_NEVER) {
647 account_inline_branch(s, old_cc_op);
662 account_inline_branch(s, old_cc_op);
677 account_inline_branch(s, old_cc_op);
681 switch (mask & 0xa) {
682 case 8: /* src == 0 -> no one bit found */
685 case 2: /* src != 0 -> one bit found */
691 account_inline_branch(s, old_cc_op);
697 case 8 | 2: /* vr == 0 */
700 case 4 | 1: /* vr != 0 */
703 case 8 | 4: /* no carry -> vr >= src */
706 case 2 | 1: /* carry -> vr < src */
712 account_inline_branch(s, old_cc_op);
717 /* Note that CC=0 is impossible; treat it as dont-care. */
719 case 2: /* zero -> op1 == op2 */
722 case 4 | 1: /* !zero -> op1 != op2 */
725 case 4: /* borrow (!carry) -> op1 < op2 */
728 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
734 account_inline_branch(s, old_cc_op);
739 /* Calculate cc value. */
744 /* Jump based on CC. We'll load up the real cond below;
745 the assignment here merely avoids a compiler warning. */
746 account_noninline_branch(s, old_cc_op);
747 old_cc_op = CC_OP_STATIC;
748 cond = TCG_COND_NEVER;
752 /* Load up the arguments of the comparison. */
754 c->g1 = c->g2 = false;
758 c->u.s32.a = tcg_temp_new_i32();
759 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
760 c->u.s32.b = tcg_const_i32(0);
763 case CC_OP_LTUGTU_32:
766 c->u.s32.a = tcg_temp_new_i32();
767 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
768 c->u.s32.b = tcg_temp_new_i32();
769 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
776 c->u.s64.b = tcg_const_i64(0);
780 case CC_OP_LTUGTU_64:
784 c->g1 = c->g2 = true;
790 c->u.s64.a = tcg_temp_new_i64();
791 c->u.s64.b = tcg_const_i64(0);
792 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
797 c->u.s32.a = tcg_temp_new_i32();
798 c->u.s32.b = tcg_temp_new_i32();
799 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
800 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
801 tcg_gen_movi_i32(c->u.s32.b, 0);
803 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
810 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
811 c->u.s64.b = tcg_const_i64(0);
823 case 0x8 | 0x4 | 0x2: /* cc != 3 */
825 c->u.s32.b = tcg_const_i32(3);
827 case 0x8 | 0x4 | 0x1: /* cc != 2 */
829 c->u.s32.b = tcg_const_i32(2);
831 case 0x8 | 0x2 | 0x1: /* cc != 1 */
833 c->u.s32.b = tcg_const_i32(1);
835 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
838 c->u.s32.a = tcg_temp_new_i32();
839 c->u.s32.b = tcg_const_i32(0);
840 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
842 case 0x8 | 0x4: /* cc < 2 */
844 c->u.s32.b = tcg_const_i32(2);
846 case 0x8: /* cc == 0 */
848 c->u.s32.b = tcg_const_i32(0);
850 case 0x4 | 0x2 | 0x1: /* cc != 0 */
852 c->u.s32.b = tcg_const_i32(0);
854 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
857 c->u.s32.a = tcg_temp_new_i32();
858 c->u.s32.b = tcg_const_i32(0);
859 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
861 case 0x4: /* cc == 1 */
863 c->u.s32.b = tcg_const_i32(1);
865 case 0x2 | 0x1: /* cc > 1 */
867 c->u.s32.b = tcg_const_i32(1);
869 case 0x2: /* cc == 2 */
871 c->u.s32.b = tcg_const_i32(2);
873 case 0x1: /* cc == 3 */
875 c->u.s32.b = tcg_const_i32(3);
878 /* CC is masked by something else: (8 >> cc) & mask. */
881 c->u.s32.a = tcg_const_i32(8);
882 c->u.s32.b = tcg_const_i32(0);
883 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
884 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
895 static void free_compare(DisasCompare *c)
899 tcg_temp_free_i64(c->u.s64.a);
901 tcg_temp_free_i32(c->u.s32.a);
906 tcg_temp_free_i64(c->u.s64.b);
908 tcg_temp_free_i32(c->u.s32.b);
913 /* ====================================================================== */
914 /* Define the insn format enumeration. */
915 #define F0(N) FMT_##N,
916 #define F1(N, X1) F0(N)
917 #define F2(N, X1, X2) F0(N)
918 #define F3(N, X1, X2, X3) F0(N)
919 #define F4(N, X1, X2, X3, X4) F0(N)
920 #define F5(N, X1, X2, X3, X4, X5) F0(N)
923 #include "insn-format.def"
933 /* Define a structure to hold the decoded fields. We'll store each inside
934 an array indexed by an enum. In order to conserve memory, we'll arrange
935 for fields that do not exist at the same time to overlap, thus the "C"
936 for compact. For checking purposes there is an "O" for original index
937 as well that will be applied to availability bitmaps. */
939 enum DisasFieldIndexO {
962 enum DisasFieldIndexC {
997 unsigned presentC:16;
998 unsigned int presentO;
1002 /* This is the way fields are to be accessed out of DisasFields. */
1003 #define have_field(S, F) have_field1((S), FLD_O_##F)
1004 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1006 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1008 return (f->presentO >> c) & 1;
1011 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1012 enum DisasFieldIndexC c)
1014 assert(have_field1(f, o));
1018 /* Describe the layout of each field in each format. */
1019 typedef struct DisasField {
1021 unsigned int size:8;
1022 unsigned int type:2;
1023 unsigned int indexC:6;
1024 enum DisasFieldIndexO indexO:8;
1027 typedef struct DisasFormatInfo {
1028 DisasField op[NUM_C_FIELD];
1031 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1032 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1033 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1034 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1035 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1036 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1037 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1038 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1039 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1040 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1041 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1042 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1043 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1044 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1046 #define F0(N) { { } },
1047 #define F1(N, X1) { { X1 } },
1048 #define F2(N, X1, X2) { { X1, X2 } },
1049 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1050 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1051 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1053 static const DisasFormatInfo format_info[] = {
1054 #include "insn-format.def"
1072 /* Generally, we'll extract operands into this structures, operate upon
1073 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1074 of routines below for more details. */
1076 bool g_out, g_out2, g_in1, g_in2;
1077 TCGv_i64 out, out2, in1, in2;
1081 /* Instructions can place constraints on their operands, raising specification
1082 exceptions if they are violated. To make this easy to automate, each "in1",
1083 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1084 of the following, or 0. To make this easy to document, we'll put the
1085 SPEC_<name> defines next to <name>. */
1087 #define SPEC_r1_even 1
1088 #define SPEC_r2_even 2
1089 #define SPEC_r3_even 4
1090 #define SPEC_r1_f128 8
1091 #define SPEC_r2_f128 16
1093 /* Return values from translate_one, indicating the state of the TB. */
1095 /* Continue the TB. */
1097 /* We have emitted one or more goto_tb. No fixup required. */
1099 /* We are not using a goto_tb (for whatever reason), but have updated
1100 the PC (for whatever reason), so there's no need to do it again on
1103 /* We have updated the PC and CC values. */
1105 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1106 updated the PC for the next instruction to be executed. */
1108 /* We are exiting the TB to the main loop. */
1109 EXIT_PC_STALE_NOCHAIN,
1110 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1111 No following code will be executed. */
1123 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1124 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1125 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1126 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1127 void (*help_cout)(DisasContext *, DisasOps *);
1128 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1133 /* ====================================================================== */
1134 /* Miscellaneous helpers, used by several operations. */
1136 static void help_l2_shift(DisasContext *s, DisasFields *f,
1137 DisasOps *o, int mask)
1139 int b2 = get_field(f, b2);
1140 int d2 = get_field(f, d2);
1143 o->in2 = tcg_const_i64(d2 & mask);
1145 o->in2 = get_address(s, 0, b2, d2);
1146 tcg_gen_andi_i64(o->in2, o->in2, mask);
1150 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1152 if (dest == s->next_pc) {
1153 per_branch(s, true);
1156 if (use_goto_tb(s, dest)) {
1158 per_breaking_event(s);
1160 tcg_gen_movi_i64(psw_addr, dest);
1161 tcg_gen_exit_tb((uintptr_t)s->tb);
1162 return EXIT_GOTO_TB;
1164 tcg_gen_movi_i64(psw_addr, dest);
1165 per_branch(s, false);
1166 return EXIT_PC_UPDATED;
1170 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1171 bool is_imm, int imm, TCGv_i64 cdest)
1174 uint64_t dest = s->pc + 2 * imm;
1177 /* Take care of the special cases first. */
1178 if (c->cond == TCG_COND_NEVER) {
1183 if (dest == s->next_pc) {
1184 /* Branch to next. */
1185 per_branch(s, true);
1189 if (c->cond == TCG_COND_ALWAYS) {
1190 ret = help_goto_direct(s, dest);
1195 /* E.g. bcr %r0 -> no branch. */
1199 if (c->cond == TCG_COND_ALWAYS) {
1200 tcg_gen_mov_i64(psw_addr, cdest);
1201 per_branch(s, false);
1202 ret = EXIT_PC_UPDATED;
1207 if (use_goto_tb(s, s->next_pc)) {
1208 if (is_imm && use_goto_tb(s, dest)) {
1209 /* Both exits can use goto_tb. */
1212 lab = gen_new_label();
1214 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1216 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1219 /* Branch not taken. */
1221 tcg_gen_movi_i64(psw_addr, s->next_pc);
1222 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1226 per_breaking_event(s);
1228 tcg_gen_movi_i64(psw_addr, dest);
1229 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1233 /* Fallthru can use goto_tb, but taken branch cannot. */
1234 /* Store taken branch destination before the brcond. This
1235 avoids having to allocate a new local temp to hold it.
1236 We'll overwrite this in the not taken case anyway. */
1238 tcg_gen_mov_i64(psw_addr, cdest);
1241 lab = gen_new_label();
1243 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1245 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1248 /* Branch not taken. */
1251 tcg_gen_movi_i64(psw_addr, s->next_pc);
1252 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1256 tcg_gen_movi_i64(psw_addr, dest);
1258 per_breaking_event(s);
1259 ret = EXIT_PC_UPDATED;
1262 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1263 Most commonly we're single-stepping or some other condition that
1264 disables all use of goto_tb. Just update the PC and exit. */
1266 TCGv_i64 next = tcg_const_i64(s->next_pc);
1268 cdest = tcg_const_i64(dest);
1272 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1274 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1276 TCGv_i32 t0 = tcg_temp_new_i32();
1277 TCGv_i64 t1 = tcg_temp_new_i64();
1278 TCGv_i64 z = tcg_const_i64(0);
1279 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1280 tcg_gen_extu_i32_i64(t1, t0);
1281 tcg_temp_free_i32(t0);
1282 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1283 per_branch_cond(s, TCG_COND_NE, t1, z);
1284 tcg_temp_free_i64(t1);
1285 tcg_temp_free_i64(z);
1289 tcg_temp_free_i64(cdest);
1291 tcg_temp_free_i64(next);
1293 ret = EXIT_PC_UPDATED;
1301 /* ====================================================================== */
1302 /* The operations. These perform the bulk of the work for any insn,
1303 usually after the operands have been loaded and output initialized. */
1305 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1308 z = tcg_const_i64(0);
1309 n = tcg_temp_new_i64();
1310 tcg_gen_neg_i64(n, o->in2);
1311 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1312 tcg_temp_free_i64(n);
1313 tcg_temp_free_i64(z);
1317 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1319 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1323 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1325 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1329 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1331 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1332 tcg_gen_mov_i64(o->out2, o->in2);
1336 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1338 tcg_gen_add_i64(o->out, o->in1, o->in2);
1342 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1347 tcg_gen_add_i64(o->out, o->in1, o->in2);
1349 /* The carry flag is the msb of CC, therefore the branch mask that would
1350 create that comparison is 3. Feeding the generated comparison to
1351 setcond produces the carry flag that we desire. */
1352 disas_jcc(s, &cmp, 3);
1353 carry = tcg_temp_new_i64();
1355 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1357 TCGv_i32 t = tcg_temp_new_i32();
1358 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1359 tcg_gen_extu_i32_i64(carry, t);
1360 tcg_temp_free_i32(t);
1364 tcg_gen_add_i64(o->out, o->out, carry);
1365 tcg_temp_free_i64(carry);
1369 static ExitStatus op_asi(DisasContext *s, DisasOps *o)
1371 o->in1 = tcg_temp_new_i64();
1373 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1374 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1376 /* Perform the atomic addition in memory. */
1377 tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1381 /* Recompute also for atomic case: needed for setting CC. */
1382 tcg_gen_add_i64(o->out, o->in1, o->in2);
1384 if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1385 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1390 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1392 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1396 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1398 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1402 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1404 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1405 return_low128(o->out2);
1409 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1411 tcg_gen_and_i64(o->out, o->in1, o->in2);
1415 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1417 int shift = s->insn->data & 0xff;
1418 int size = s->insn->data >> 8;
1419 uint64_t mask = ((1ull << size) - 1) << shift;
1422 tcg_gen_shli_i64(o->in2, o->in2, shift);
1423 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1424 tcg_gen_and_i64(o->out, o->in1, o->in2);
1426 /* Produce the CC from only the bits manipulated. */
1427 tcg_gen_andi_i64(cc_dst, o->out, mask);
1428 set_cc_nz_u64(s, cc_dst);
1432 static ExitStatus op_ni(DisasContext *s, DisasOps *o)
1434 o->in1 = tcg_temp_new_i64();
1436 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1437 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1439 /* Perform the atomic operation in memory. */
1440 tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1444 /* Recompute also for atomic case: needed for setting CC. */
1445 tcg_gen_and_i64(o->out, o->in1, o->in2);
1447 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1448 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1453 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1455 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1457 tcg_gen_mov_i64(psw_addr, o->in2);
1458 per_branch(s, false);
1459 return EXIT_PC_UPDATED;
1465 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1467 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1468 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1471 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1473 int m1 = get_field(s->fields, m1);
1474 bool is_imm = have_field(s->fields, i2);
1475 int imm = is_imm ? get_field(s->fields, i2) : 0;
1478 /* BCR with R2 = 0 causes no branching */
1479 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1481 /* Perform serialization */
1482 /* FIXME: check for fast-BCR-serialization facility */
1483 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1486 /* Perform serialization */
1487 /* FIXME: perform checkpoint-synchronisation */
1488 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1493 disas_jcc(s, &c, m1);
1494 return help_branch(s, &c, is_imm, imm, o->in2);
1497 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1499 int r1 = get_field(s->fields, r1);
1500 bool is_imm = have_field(s->fields, i2);
1501 int imm = is_imm ? get_field(s->fields, i2) : 0;
1505 c.cond = TCG_COND_NE;
1510 t = tcg_temp_new_i64();
1511 tcg_gen_subi_i64(t, regs[r1], 1);
1512 store_reg32_i64(r1, t);
1513 c.u.s32.a = tcg_temp_new_i32();
1514 c.u.s32.b = tcg_const_i32(0);
1515 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1516 tcg_temp_free_i64(t);
1518 return help_branch(s, &c, is_imm, imm, o->in2);
1521 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1523 int r1 = get_field(s->fields, r1);
1524 int imm = get_field(s->fields, i2);
1528 c.cond = TCG_COND_NE;
1533 t = tcg_temp_new_i64();
1534 tcg_gen_shri_i64(t, regs[r1], 32);
1535 tcg_gen_subi_i64(t, t, 1);
1536 store_reg32h_i64(r1, t);
1537 c.u.s32.a = tcg_temp_new_i32();
1538 c.u.s32.b = tcg_const_i32(0);
1539 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1540 tcg_temp_free_i64(t);
1542 return help_branch(s, &c, 1, imm, o->in2);
1545 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1547 int r1 = get_field(s->fields, r1);
1548 bool is_imm = have_field(s->fields, i2);
1549 int imm = is_imm ? get_field(s->fields, i2) : 0;
1552 c.cond = TCG_COND_NE;
1557 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1558 c.u.s64.a = regs[r1];
1559 c.u.s64.b = tcg_const_i64(0);
1561 return help_branch(s, &c, is_imm, imm, o->in2);
1564 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1566 int r1 = get_field(s->fields, r1);
1567 int r3 = get_field(s->fields, r3);
1568 bool is_imm = have_field(s->fields, i2);
1569 int imm = is_imm ? get_field(s->fields, i2) : 0;
1573 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1578 t = tcg_temp_new_i64();
1579 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1580 c.u.s32.a = tcg_temp_new_i32();
1581 c.u.s32.b = tcg_temp_new_i32();
1582 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1583 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1584 store_reg32_i64(r1, t);
1585 tcg_temp_free_i64(t);
1587 return help_branch(s, &c, is_imm, imm, o->in2);
1590 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1592 int r1 = get_field(s->fields, r1);
1593 int r3 = get_field(s->fields, r3);
1594 bool is_imm = have_field(s->fields, i2);
1595 int imm = is_imm ? get_field(s->fields, i2) : 0;
1598 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1601 if (r1 == (r3 | 1)) {
1602 c.u.s64.b = load_reg(r3 | 1);
1605 c.u.s64.b = regs[r3 | 1];
1609 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1610 c.u.s64.a = regs[r1];
1613 return help_branch(s, &c, is_imm, imm, o->in2);
1616 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1618 int imm, m3 = get_field(s->fields, m3);
1622 c.cond = ltgt_cond[m3];
1623 if (s->insn->data) {
1624 c.cond = tcg_unsigned_cond(c.cond);
1626 c.is_64 = c.g1 = c.g2 = true;
1630 is_imm = have_field(s->fields, i4);
1632 imm = get_field(s->fields, i4);
1635 o->out = get_address(s, 0, get_field(s->fields, b4),
1636 get_field(s->fields, d4));
1639 return help_branch(s, &c, is_imm, imm, o->out);
1642 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1644 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1649 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1651 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1656 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1658 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1663 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1665 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1666 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1667 tcg_temp_free_i32(m3);
1668 gen_set_cc_nz_f32(s, o->in2);
1672 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1674 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1675 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1676 tcg_temp_free_i32(m3);
1677 gen_set_cc_nz_f64(s, o->in2);
1681 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1683 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1684 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1685 tcg_temp_free_i32(m3);
1686 gen_set_cc_nz_f128(s, o->in1, o->in2);
1690 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1692 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1693 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1694 tcg_temp_free_i32(m3);
1695 gen_set_cc_nz_f32(s, o->in2);
1699 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1701 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1702 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1703 tcg_temp_free_i32(m3);
1704 gen_set_cc_nz_f64(s, o->in2);
1708 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1710 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1711 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1712 tcg_temp_free_i32(m3);
1713 gen_set_cc_nz_f128(s, o->in1, o->in2);
1717 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1719 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1720 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1721 tcg_temp_free_i32(m3);
1722 gen_set_cc_nz_f32(s, o->in2);
1726 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1728 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1729 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1730 tcg_temp_free_i32(m3);
1731 gen_set_cc_nz_f64(s, o->in2);
1735 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1737 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1738 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1739 tcg_temp_free_i32(m3);
1740 gen_set_cc_nz_f128(s, o->in1, o->in2);
1744 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1746 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1747 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1748 tcg_temp_free_i32(m3);
1749 gen_set_cc_nz_f32(s, o->in2);
1753 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1755 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1756 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1757 tcg_temp_free_i32(m3);
1758 gen_set_cc_nz_f64(s, o->in2);
1762 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1764 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1765 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1766 tcg_temp_free_i32(m3);
1767 gen_set_cc_nz_f128(s, o->in1, o->in2);
1771 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1773 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1774 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1775 tcg_temp_free_i32(m3);
1779 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1781 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1782 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1783 tcg_temp_free_i32(m3);
1787 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1789 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1790 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1791 tcg_temp_free_i32(m3);
1792 return_low128(o->out2);
1796 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1798 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1799 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1800 tcg_temp_free_i32(m3);
1804 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1806 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1807 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1808 tcg_temp_free_i32(m3);
1812 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1814 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1815 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1816 tcg_temp_free_i32(m3);
1817 return_low128(o->out2);
1821 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1823 int r2 = get_field(s->fields, r2);
1824 TCGv_i64 len = tcg_temp_new_i64();
1826 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1828 return_low128(o->out);
1830 tcg_gen_add_i64(regs[r2], regs[r2], len);
1831 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1832 tcg_temp_free_i64(len);
1837 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1839 int l = get_field(s->fields, l1);
1844 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1845 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1848 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1849 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1852 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1853 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1856 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1857 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1860 vl = tcg_const_i32(l);
1861 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1862 tcg_temp_free_i32(vl);
1866 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1870 static ExitStatus op_clcl(DisasContext *s, DisasOps *o)
1872 int r1 = get_field(s->fields, r1);
1873 int r2 = get_field(s->fields, r2);
1876 /* r1 and r2 must be even. */
1877 if (r1 & 1 || r2 & 1) {
1878 gen_program_exception(s, PGM_SPECIFICATION);
1879 return EXIT_NORETURN;
1882 t1 = tcg_const_i32(r1);
1883 t2 = tcg_const_i32(r2);
1884 gen_helper_clcl(cc_op, cpu_env, t1, t2);
1885 tcg_temp_free_i32(t1);
1886 tcg_temp_free_i32(t2);
1891 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1893 int r1 = get_field(s->fields, r1);
1894 int r3 = get_field(s->fields, r3);
1897 /* r1 and r3 must be even. */
1898 if (r1 & 1 || r3 & 1) {
1899 gen_program_exception(s, PGM_SPECIFICATION);
1900 return EXIT_NORETURN;
1903 t1 = tcg_const_i32(r1);
1904 t3 = tcg_const_i32(r3);
1905 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
1906 tcg_temp_free_i32(t1);
1907 tcg_temp_free_i32(t3);
1912 static ExitStatus op_clclu(DisasContext *s, DisasOps *o)
1914 int r1 = get_field(s->fields, r1);
1915 int r3 = get_field(s->fields, r3);
1918 /* r1 and r3 must be even. */
1919 if (r1 & 1 || r3 & 1) {
1920 gen_program_exception(s, PGM_SPECIFICATION);
1921 return EXIT_NORETURN;
1924 t1 = tcg_const_i32(r1);
1925 t3 = tcg_const_i32(r3);
1926 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
1927 tcg_temp_free_i32(t1);
1928 tcg_temp_free_i32(t3);
1933 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1935 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1936 TCGv_i32 t1 = tcg_temp_new_i32();
1937 tcg_gen_extrl_i64_i32(t1, o->in1);
1938 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1940 tcg_temp_free_i32(t1);
1941 tcg_temp_free_i32(m3);
1945 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1947 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1949 return_low128(o->in2);
1953 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1955 TCGv_i64 t = tcg_temp_new_i64();
1956 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1957 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1958 tcg_gen_or_i64(o->out, o->out, t);
1959 tcg_temp_free_i64(t);
1963 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1965 int d2 = get_field(s->fields, d2);
1966 int b2 = get_field(s->fields, b2);
1969 /* Note that in1 = R3 (new value) and
1970 in2 = (zero-extended) R1 (expected value). */
1972 addr = get_address(s, 0, b2, d2);
1973 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
1974 get_mem_index(s), s->insn->data | MO_ALIGN);
1975 tcg_temp_free_i64(addr);
1977 /* Are the memory and expected values (un)equal? Note that this setcond
1978 produces the output CC value, thus the NE sense of the test. */
1979 cc = tcg_temp_new_i64();
1980 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1981 tcg_gen_extrl_i64_i32(cc_op, cc);
1982 tcg_temp_free_i64(cc);
1988 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1990 int r1 = get_field(s->fields, r1);
1991 int r3 = get_field(s->fields, r3);
1992 int d2 = get_field(s->fields, d2);
1993 int b2 = get_field(s->fields, b2);
1995 TCGv_i32 t_r1, t_r3;
1997 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1998 addr = get_address(s, 0, b2, d2);
1999 t_r1 = tcg_const_i32(r1);
2000 t_r3 = tcg_const_i32(r3);
2001 if (tb_cflags(s->tb) & CF_PARALLEL) {
2002 gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2004 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2006 tcg_temp_free_i64(addr);
2007 tcg_temp_free_i32(t_r1);
2008 tcg_temp_free_i32(t_r3);
2014 static ExitStatus op_csst(DisasContext *s, DisasOps *o)
2016 int r3 = get_field(s->fields, r3);
2017 TCGv_i32 t_r3 = tcg_const_i32(r3);
2019 if (tb_cflags(s->tb) & CF_PARALLEL) {
2020 gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->in1, o->in2);
2022 gen_helper_csst(cc_op, cpu_env, t_r3, o->in1, o->in2);
2024 tcg_temp_free_i32(t_r3);
2030 #ifndef CONFIG_USER_ONLY
2031 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
2033 TCGMemOp mop = s->insn->data;
2034 TCGv_i64 addr, old, cc;
2035 TCGLabel *lab = gen_new_label();
2037 /* Note that in1 = R1 (zero-extended expected value),
2038 out = R1 (original reg), out2 = R1+1 (new value). */
2040 check_privileged(s);
2041 addr = tcg_temp_new_i64();
2042 old = tcg_temp_new_i64();
2043 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2044 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2045 get_mem_index(s), mop | MO_ALIGN);
2046 tcg_temp_free_i64(addr);
2048 /* Are the memory and expected values (un)equal? */
2049 cc = tcg_temp_new_i64();
2050 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2051 tcg_gen_extrl_i64_i32(cc_op, cc);
2053 /* Write back the output now, so that it happens before the
2054 following branch, so that we don't need local temps. */
2055 if ((mop & MO_SIZE) == MO_32) {
2056 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2058 tcg_gen_mov_i64(o->out, old);
2060 tcg_temp_free_i64(old);
2062 /* If the comparison was equal, and the LSB of R2 was set,
2063 then we need to flush the TLB (for all cpus). */
2064 tcg_gen_xori_i64(cc, cc, 1);
2065 tcg_gen_and_i64(cc, cc, o->in2);
2066 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2067 tcg_temp_free_i64(cc);
2069 gen_helper_purge(cpu_env);
2076 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2078 TCGv_i64 t1 = tcg_temp_new_i64();
2079 TCGv_i32 t2 = tcg_temp_new_i32();
2080 tcg_gen_extrl_i64_i32(t2, o->in1);
2081 gen_helper_cvd(t1, t2);
2082 tcg_temp_free_i32(t2);
2083 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2084 tcg_temp_free_i64(t1);
2088 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2090 int m3 = get_field(s->fields, m3);
2091 TCGLabel *lab = gen_new_label();
2094 c = tcg_invert_cond(ltgt_cond[m3]);
2095 if (s->insn->data) {
2096 c = tcg_unsigned_cond(c);
2098 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2107 static ExitStatus op_cuXX(DisasContext *s, DisasOps *o)
2109 int m3 = get_field(s->fields, m3);
2110 int r1 = get_field(s->fields, r1);
2111 int r2 = get_field(s->fields, r2);
2112 TCGv_i32 tr1, tr2, chk;
2114 /* R1 and R2 must both be even. */
2115 if ((r1 | r2) & 1) {
2116 gen_program_exception(s, PGM_SPECIFICATION);
2117 return EXIT_NORETURN;
2119 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2123 tr1 = tcg_const_i32(r1);
2124 tr2 = tcg_const_i32(r2);
2125 chk = tcg_const_i32(m3);
2127 switch (s->insn->data) {
2129 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2132 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2135 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2138 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2141 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2144 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2147 g_assert_not_reached();
2150 tcg_temp_free_i32(tr1);
2151 tcg_temp_free_i32(tr2);
2152 tcg_temp_free_i32(chk);
2157 #ifndef CONFIG_USER_ONLY
2158 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2160 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2161 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2162 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2164 check_privileged(s);
2165 gen_helper_diag(cpu_env, r1, r3, func_code);
2167 tcg_temp_free_i32(func_code);
2168 tcg_temp_free_i32(r3);
2169 tcg_temp_free_i32(r1);
2174 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2176 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2177 return_low128(o->out);
2181 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2183 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2184 return_low128(o->out);
2188 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2190 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2191 return_low128(o->out);
2195 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2197 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2198 return_low128(o->out);
2202 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2204 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2208 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2210 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2214 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2216 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2217 return_low128(o->out2);
2221 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2223 int r2 = get_field(s->fields, r2);
2224 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2228 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2230 /* No cache information provided. */
2231 tcg_gen_movi_i64(o->out, -1);
2235 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2237 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2241 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2243 int r1 = get_field(s->fields, r1);
2244 int r2 = get_field(s->fields, r2);
2245 TCGv_i64 t = tcg_temp_new_i64();
2247 /* Note the "subsequently" in the PoO, which implies a defined result
2248 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2249 tcg_gen_shri_i64(t, psw_mask, 32);
2250 store_reg32_i64(r1, t);
2252 store_reg32_i64(r2, psw_mask);
2255 tcg_temp_free_i64(t);
2259 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2261 int r1 = get_field(s->fields, r1);
2265 /* Nested EXECUTE is not allowed. */
2266 if (unlikely(s->ex_value)) {
2267 gen_program_exception(s, PGM_EXECUTE);
2268 return EXIT_NORETURN;
2275 v1 = tcg_const_i64(0);
2280 ilen = tcg_const_i32(s->ilen);
2281 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2282 tcg_temp_free_i32(ilen);
2285 tcg_temp_free_i64(v1);
2288 return EXIT_PC_CC_UPDATED;
2291 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2293 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2294 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2295 tcg_temp_free_i32(m3);
2299 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2301 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2302 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2303 tcg_temp_free_i32(m3);
2307 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2309 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2310 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2311 return_low128(o->out2);
2312 tcg_temp_free_i32(m3);
2316 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2318 /* We'll use the original input for cc computation, since we get to
2319 compare that against 0, which ought to be better than comparing
2320 the real output against 64. It also lets cc_dst be a convenient
2321 temporary during our computation. */
2322 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2324 /* R1 = IN ? CLZ(IN) : 64. */
2325 tcg_gen_clzi_i64(o->out, o->in2, 64);
2327 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2328 value by 64, which is undefined. But since the shift is 64 iff the
2329 input is zero, we still get the correct result after and'ing. */
2330 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2331 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2332 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2336 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2338 int m3 = get_field(s->fields, m3);
2339 int pos, len, base = s->insn->data;
2340 TCGv_i64 tmp = tcg_temp_new_i64();
2345 /* Effectively a 32-bit load. */
2346 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2353 /* Effectively a 16-bit load. */
2354 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2362 /* Effectively an 8-bit load. */
2363 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2368 pos = base + ctz32(m3) * 8;
2369 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2370 ccm = ((1ull << len) - 1) << pos;
2374 /* This is going to be a sequence of loads and inserts. */
2375 pos = base + 32 - 8;
2379 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2380 tcg_gen_addi_i64(o->in2, o->in2, 1);
2381 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2384 m3 = (m3 << 1) & 0xf;
2390 tcg_gen_movi_i64(tmp, ccm);
2391 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2392 tcg_temp_free_i64(tmp);
2396 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2398 int shift = s->insn->data & 0xff;
2399 int size = s->insn->data >> 8;
2400 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2404 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2409 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2411 t1 = tcg_temp_new_i64();
2412 tcg_gen_shli_i64(t1, psw_mask, 20);
2413 tcg_gen_shri_i64(t1, t1, 36);
2414 tcg_gen_or_i64(o->out, o->out, t1);
2416 tcg_gen_extu_i32_i64(t1, cc_op);
2417 tcg_gen_shli_i64(t1, t1, 28);
2418 tcg_gen_or_i64(o->out, o->out, t1);
2419 tcg_temp_free_i64(t1);
2423 #ifndef CONFIG_USER_ONLY
2424 static ExitStatus op_idte(DisasContext *s, DisasOps *o)
2428 check_privileged(s);
2429 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2430 m4 = tcg_const_i32(get_field(s->fields, m4));
2432 m4 = tcg_const_i32(0);
2434 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2435 tcg_temp_free_i32(m4);
2439 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2443 check_privileged(s);
2444 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2445 m4 = tcg_const_i32(get_field(s->fields, m4));
2447 m4 = tcg_const_i32(0);
2449 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2450 tcg_temp_free_i32(m4);
2454 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2456 check_privileged(s);
2457 gen_helper_iske(o->out, cpu_env, o->in2);
2462 static ExitStatus op_msa(DisasContext *s, DisasOps *o)
2464 int r1 = have_field(s->fields, r1) ? get_field(s->fields, r1) : 0;
2465 int r2 = have_field(s->fields, r2) ? get_field(s->fields, r2) : 0;
2466 int r3 = have_field(s->fields, r3) ? get_field(s->fields, r3) : 0;
2467 TCGv_i32 t_r1, t_r2, t_r3, type;
2469 switch (s->insn->data) {
2470 case S390_FEAT_TYPE_KMCTR:
2471 if (r3 & 1 || !r3) {
2472 gen_program_exception(s, PGM_SPECIFICATION);
2473 return EXIT_NORETURN;
2476 case S390_FEAT_TYPE_PPNO:
2477 case S390_FEAT_TYPE_KMF:
2478 case S390_FEAT_TYPE_KMC:
2479 case S390_FEAT_TYPE_KMO:
2480 case S390_FEAT_TYPE_KM:
2481 if (r1 & 1 || !r1) {
2482 gen_program_exception(s, PGM_SPECIFICATION);
2483 return EXIT_NORETURN;
2486 case S390_FEAT_TYPE_KMAC:
2487 case S390_FEAT_TYPE_KIMD:
2488 case S390_FEAT_TYPE_KLMD:
2489 if (r2 & 1 || !r2) {
2490 gen_program_exception(s, PGM_SPECIFICATION);
2491 return EXIT_NORETURN;
2494 case S390_FEAT_TYPE_PCKMO:
2495 case S390_FEAT_TYPE_PCC:
2498 g_assert_not_reached();
2501 t_r1 = tcg_const_i32(r1);
2502 t_r2 = tcg_const_i32(r2);
2503 t_r3 = tcg_const_i32(r3);
2504 type = tcg_const_i32(s->insn->data);
2505 gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2507 tcg_temp_free_i32(t_r1);
2508 tcg_temp_free_i32(t_r2);
2509 tcg_temp_free_i32(t_r3);
2510 tcg_temp_free_i32(type);
2514 static ExitStatus op_keb(DisasContext *s, DisasOps *o)
2516 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2521 static ExitStatus op_kdb(DisasContext *s, DisasOps *o)
2523 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2528 static ExitStatus op_kxb(DisasContext *s, DisasOps *o)
2530 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2535 static ExitStatus op_laa(DisasContext *s, DisasOps *o)
2537 /* The real output is indeed the original value in memory;
2538 recompute the addition for the computation of CC. */
2539 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2540 s->insn->data | MO_ALIGN);
2541 /* However, we need to recompute the addition for setting CC. */
2542 tcg_gen_add_i64(o->out, o->in1, o->in2);
2546 static ExitStatus op_lan(DisasContext *s, DisasOps *o)
2548 /* The real output is indeed the original value in memory;
2549 recompute the addition for the computation of CC. */
2550 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2551 s->insn->data | MO_ALIGN);
2552 /* However, we need to recompute the operation for setting CC. */
2553 tcg_gen_and_i64(o->out, o->in1, o->in2);
2557 static ExitStatus op_lao(DisasContext *s, DisasOps *o)
2559 /* The real output is indeed the original value in memory;
2560 recompute the addition for the computation of CC. */
2561 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2562 s->insn->data | MO_ALIGN);
2563 /* However, we need to recompute the operation for setting CC. */
2564 tcg_gen_or_i64(o->out, o->in1, o->in2);
2568 static ExitStatus op_lax(DisasContext *s, DisasOps *o)
2570 /* The real output is indeed the original value in memory;
2571 recompute the addition for the computation of CC. */
2572 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2573 s->insn->data | MO_ALIGN);
2574 /* However, we need to recompute the operation for setting CC. */
2575 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2579 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2581 gen_helper_ldeb(o->out, cpu_env, o->in2);
2585 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2587 gen_helper_ledb(o->out, cpu_env, o->in2);
2591 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2593 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2597 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2599 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2603 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2605 gen_helper_lxdb(o->out, cpu_env, o->in2);
2606 return_low128(o->out2);
2610 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2612 gen_helper_lxeb(o->out, cpu_env, o->in2);
2613 return_low128(o->out2);
2617 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2619 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2623 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2625 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2629 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2631 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2635 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2637 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2641 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2643 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2647 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2649 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2653 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2655 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2659 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2661 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2665 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2667 TCGLabel *lab = gen_new_label();
2668 store_reg32_i64(get_field(s->fields, r1), o->in2);
2669 /* The value is stored even in case of trap. */
2670 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2676 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2678 TCGLabel *lab = gen_new_label();
2679 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2680 /* The value is stored even in case of trap. */
2681 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2687 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2689 TCGLabel *lab = gen_new_label();
2690 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2691 /* The value is stored even in case of trap. */
2692 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2698 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2700 TCGLabel *lab = gen_new_label();
2701 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2702 /* The value is stored even in case of trap. */
2703 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2709 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2711 TCGLabel *lab = gen_new_label();
2712 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2713 /* The value is stored even in case of trap. */
2714 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2720 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2724 disas_jcc(s, &c, get_field(s->fields, m3));
2727 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2731 TCGv_i32 t32 = tcg_temp_new_i32();
2734 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2737 t = tcg_temp_new_i64();
2738 tcg_gen_extu_i32_i64(t, t32);
2739 tcg_temp_free_i32(t32);
2741 z = tcg_const_i64(0);
2742 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2743 tcg_temp_free_i64(t);
2744 tcg_temp_free_i64(z);
2750 #ifndef CONFIG_USER_ONLY
2751 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2753 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2754 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2755 check_privileged(s);
2756 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2757 tcg_temp_free_i32(r1);
2758 tcg_temp_free_i32(r3);
2759 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2760 return EXIT_PC_STALE_NOCHAIN;
2763 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2765 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2766 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2767 check_privileged(s);
2768 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2769 tcg_temp_free_i32(r1);
2770 tcg_temp_free_i32(r3);
2771 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2772 return EXIT_PC_STALE_NOCHAIN;
2775 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2777 check_privileged(s);
2778 gen_helper_lra(o->out, cpu_env, o->in2);
2783 static ExitStatus op_lpp(DisasContext *s, DisasOps *o)
2785 check_privileged(s);
2787 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2791 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2795 check_privileged(s);
2796 per_breaking_event(s);
2798 t1 = tcg_temp_new_i64();
2799 t2 = tcg_temp_new_i64();
2800 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2801 tcg_gen_addi_i64(o->in2, o->in2, 4);
2802 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2803 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2804 tcg_gen_shli_i64(t1, t1, 32);
2805 gen_helper_load_psw(cpu_env, t1, t2);
2806 tcg_temp_free_i64(t1);
2807 tcg_temp_free_i64(t2);
2808 return EXIT_NORETURN;
2811 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2815 check_privileged(s);
2816 per_breaking_event(s);
2818 t1 = tcg_temp_new_i64();
2819 t2 = tcg_temp_new_i64();
2820 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2821 tcg_gen_addi_i64(o->in2, o->in2, 8);
2822 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2823 gen_helper_load_psw(cpu_env, t1, t2);
2824 tcg_temp_free_i64(t1);
2825 tcg_temp_free_i64(t2);
2826 return EXIT_NORETURN;
2830 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2832 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2833 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2834 gen_helper_lam(cpu_env, r1, o->in2, r3);
2835 tcg_temp_free_i32(r1);
2836 tcg_temp_free_i32(r3);
2840 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2842 int r1 = get_field(s->fields, r1);
2843 int r3 = get_field(s->fields, r3);
2846 /* Only one register to read. */
2847 t1 = tcg_temp_new_i64();
2848 if (unlikely(r1 == r3)) {
2849 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2850 store_reg32_i64(r1, t1);
2855 /* First load the values of the first and last registers to trigger
2856 possible page faults. */
2857 t2 = tcg_temp_new_i64();
2858 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2859 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2860 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2861 store_reg32_i64(r1, t1);
2862 store_reg32_i64(r3, t2);
2864 /* Only two registers to read. */
2865 if (((r1 + 1) & 15) == r3) {
2871 /* Then load the remaining registers. Page fault can't occur. */
2873 tcg_gen_movi_i64(t2, 4);
2876 tcg_gen_add_i64(o->in2, o->in2, t2);
2877 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2878 store_reg32_i64(r1, t1);
2886 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2888 int r1 = get_field(s->fields, r1);
2889 int r3 = get_field(s->fields, r3);
2892 /* Only one register to read. */
2893 t1 = tcg_temp_new_i64();
2894 if (unlikely(r1 == r3)) {
2895 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2896 store_reg32h_i64(r1, t1);
2901 /* First load the values of the first and last registers to trigger
2902 possible page faults. */
2903 t2 = tcg_temp_new_i64();
2904 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2905 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2906 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2907 store_reg32h_i64(r1, t1);
2908 store_reg32h_i64(r3, t2);
2910 /* Only two registers to read. */
2911 if (((r1 + 1) & 15) == r3) {
2917 /* Then load the remaining registers. Page fault can't occur. */
2919 tcg_gen_movi_i64(t2, 4);
2922 tcg_gen_add_i64(o->in2, o->in2, t2);
2923 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2924 store_reg32h_i64(r1, t1);
2932 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2934 int r1 = get_field(s->fields, r1);
2935 int r3 = get_field(s->fields, r3);
2938 /* Only one register to read. */
2939 if (unlikely(r1 == r3)) {
2940 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2944 /* First load the values of the first and last registers to trigger
2945 possible page faults. */
2946 t1 = tcg_temp_new_i64();
2947 t2 = tcg_temp_new_i64();
2948 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2949 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2950 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2951 tcg_gen_mov_i64(regs[r1], t1);
2954 /* Only two registers to read. */
2955 if (((r1 + 1) & 15) == r3) {
2960 /* Then load the remaining registers. Page fault can't occur. */
2962 tcg_gen_movi_i64(t1, 8);
2965 tcg_gen_add_i64(o->in2, o->in2, t1);
2966 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2973 static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
2976 TCGMemOp mop = s->insn->data;
2978 /* In a parallel context, stop the world and single step. */
2979 if (tb_cflags(s->tb) & CF_PARALLEL) {
2982 gen_exception(EXCP_ATOMIC);
2983 return EXIT_NORETURN;
2986 /* In a serial context, perform the two loads ... */
2987 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
2988 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2989 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
2990 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
2991 tcg_temp_free_i64(a1);
2992 tcg_temp_free_i64(a2);
2994 /* ... and indicate that we performed them while interlocked. */
2995 gen_op_movi_cc(s, 0);
2999 static ExitStatus op_lpq(DisasContext *s, DisasOps *o)
3001 if (tb_cflags(s->tb) & CF_PARALLEL) {
3002 gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3004 gen_helper_lpq(o->out, cpu_env, o->in2);
3006 return_low128(o->out2);
3010 #ifndef CONFIG_USER_ONLY
3011 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
3013 check_privileged(s);
3014 gen_helper_lura(o->out, cpu_env, o->in2);
3018 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
3020 check_privileged(s);
3021 gen_helper_lurag(o->out, cpu_env, o->in2);
3026 static ExitStatus op_lzrb(DisasContext *s, DisasOps *o)
3028 tcg_gen_andi_i64(o->out, o->in2, -256);
3032 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
3035 o->g_out = o->g_in2;
3041 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
3043 int b2 = get_field(s->fields, b2);
3044 TCGv ar1 = tcg_temp_new_i64();
3047 o->g_out = o->g_in2;
3051 switch (s->tb->flags & FLAG_MASK_ASC) {
3052 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3053 tcg_gen_movi_i64(ar1, 0);
3055 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3056 tcg_gen_movi_i64(ar1, 1);
3058 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3060 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3062 tcg_gen_movi_i64(ar1, 0);
3065 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3066 tcg_gen_movi_i64(ar1, 2);
3070 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3071 tcg_temp_free_i64(ar1);
3076 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
3080 o->g_out = o->g_in1;
3081 o->g_out2 = o->g_in2;
3084 o->g_in1 = o->g_in2 = false;
3088 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
3090 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3091 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3092 tcg_temp_free_i32(l);
3096 static ExitStatus op_mvcin(DisasContext *s, DisasOps *o)
3098 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3099 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3100 tcg_temp_free_i32(l);
3104 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
3106 int r1 = get_field(s->fields, r1);
3107 int r2 = get_field(s->fields, r2);
3110 /* r1 and r2 must be even. */
3111 if (r1 & 1 || r2 & 1) {
3112 gen_program_exception(s, PGM_SPECIFICATION);
3113 return EXIT_NORETURN;
3116 t1 = tcg_const_i32(r1);
3117 t2 = tcg_const_i32(r2);
3118 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3119 tcg_temp_free_i32(t1);
3120 tcg_temp_free_i32(t2);
3125 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
3127 int r1 = get_field(s->fields, r1);
3128 int r3 = get_field(s->fields, r3);
3131 /* r1 and r3 must be even. */
3132 if (r1 & 1 || r3 & 1) {
3133 gen_program_exception(s, PGM_SPECIFICATION);
3134 return EXIT_NORETURN;
3137 t1 = tcg_const_i32(r1);
3138 t3 = tcg_const_i32(r3);
3139 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3140 tcg_temp_free_i32(t1);
3141 tcg_temp_free_i32(t3);
3146 static ExitStatus op_mvclu(DisasContext *s, DisasOps *o)
3148 int r1 = get_field(s->fields, r1);
3149 int r3 = get_field(s->fields, r3);
3152 /* r1 and r3 must be even. */
3153 if (r1 & 1 || r3 & 1) {
3154 gen_program_exception(s, PGM_SPECIFICATION);
3155 return EXIT_NORETURN;
3158 t1 = tcg_const_i32(r1);
3159 t3 = tcg_const_i32(r3);
3160 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3161 tcg_temp_free_i32(t1);
3162 tcg_temp_free_i32(t3);
3167 static ExitStatus op_mvcos(DisasContext *s, DisasOps *o)
3169 int r3 = get_field(s->fields, r3);
3170 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3175 #ifndef CONFIG_USER_ONLY
3176 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
3178 int r1 = get_field(s->fields, l1);
3179 check_privileged(s);
3180 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3185 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
3187 int r1 = get_field(s->fields, l1);
3188 check_privileged(s);
3189 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3195 static ExitStatus op_mvn(DisasContext *s, DisasOps *o)
3197 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3198 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3199 tcg_temp_free_i32(l);
3203 static ExitStatus op_mvo(DisasContext *s, DisasOps *o)
3205 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3206 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3207 tcg_temp_free_i32(l);
3211 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
3213 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3218 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
3220 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3222 return_low128(o->in2);
3226 static ExitStatus op_mvz(DisasContext *s, DisasOps *o)
3228 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3229 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3230 tcg_temp_free_i32(l);
3234 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
3236 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3240 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
3242 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3246 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
3248 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3252 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
3254 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3258 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
3260 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3264 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
3266 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3267 return_low128(o->out2);
3271 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
3273 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3274 return_low128(o->out2);
3278 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
3280 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3281 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3282 tcg_temp_free_i64(r3);
3286 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
3288 int r3 = get_field(s->fields, r3);
3289 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3293 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
3295 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3296 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3297 tcg_temp_free_i64(r3);
3301 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
3303 int r3 = get_field(s->fields, r3);
3304 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3308 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3311 z = tcg_const_i64(0);
3312 n = tcg_temp_new_i64();
3313 tcg_gen_neg_i64(n, o->in2);
3314 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3315 tcg_temp_free_i64(n);
3316 tcg_temp_free_i64(z);
3320 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
3322 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3326 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
3328 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3332 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
3334 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3335 tcg_gen_mov_i64(o->out2, o->in2);
3339 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3341 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3342 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3343 tcg_temp_free_i32(l);
3348 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3350 tcg_gen_neg_i64(o->out, o->in2);
3354 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3356 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3360 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3362 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3366 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3368 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3369 tcg_gen_mov_i64(o->out2, o->in2);
3373 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3375 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3376 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3377 tcg_temp_free_i32(l);
3382 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3384 tcg_gen_or_i64(o->out, o->in1, o->in2);
3388 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3390 int shift = s->insn->data & 0xff;
3391 int size = s->insn->data >> 8;
3392 uint64_t mask = ((1ull << size) - 1) << shift;
3395 tcg_gen_shli_i64(o->in2, o->in2, shift);
3396 tcg_gen_or_i64(o->out, o->in1, o->in2);
3398 /* Produce the CC from only the bits manipulated. */
3399 tcg_gen_andi_i64(cc_dst, o->out, mask);
3400 set_cc_nz_u64(s, cc_dst);
3404 static ExitStatus op_oi(DisasContext *s, DisasOps *o)
3406 o->in1 = tcg_temp_new_i64();
3408 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3409 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3411 /* Perform the atomic operation in memory. */
3412 tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3416 /* Recompute also for atomic case: needed for setting CC. */
3417 tcg_gen_or_i64(o->out, o->in1, o->in2);
3419 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3420 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3425 static ExitStatus op_pack(DisasContext *s, DisasOps *o)
3427 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3428 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3429 tcg_temp_free_i32(l);
3433 static ExitStatus op_pka(DisasContext *s, DisasOps *o)
3435 int l2 = get_field(s->fields, l2) + 1;
3438 /* The length must not exceed 32 bytes. */
3440 gen_program_exception(s, PGM_SPECIFICATION);
3441 return EXIT_NORETURN;
3443 l = tcg_const_i32(l2);
3444 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3445 tcg_temp_free_i32(l);
3449 static ExitStatus op_pku(DisasContext *s, DisasOps *o)
3451 int l2 = get_field(s->fields, l2) + 1;
3454 /* The length must be even and should not exceed 64 bytes. */
3455 if ((l2 & 1) || (l2 > 64)) {
3456 gen_program_exception(s, PGM_SPECIFICATION);
3457 return EXIT_NORETURN;
3459 l = tcg_const_i32(l2);
3460 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3461 tcg_temp_free_i32(l);
3465 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3467 gen_helper_popcnt(o->out, o->in2);
3471 #ifndef CONFIG_USER_ONLY
3472 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3474 check_privileged(s);
3475 gen_helper_ptlb(cpu_env);
3480 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3482 int i3 = get_field(s->fields, i3);
3483 int i4 = get_field(s->fields, i4);
3484 int i5 = get_field(s->fields, i5);
3485 int do_zero = i4 & 0x80;
3486 uint64_t mask, imask, pmask;
3489 /* Adjust the arguments for the specific insn. */
3490 switch (s->fields->op2) {
3491 case 0x55: /* risbg */
3492 case 0x59: /* risbgn */
3497 case 0x5d: /* risbhg */
3500 pmask = 0xffffffff00000000ull;
3502 case 0x51: /* risblg */
3505 pmask = 0x00000000ffffffffull;
3508 g_assert_not_reached();
3511 /* MASK is the set of bits to be inserted from R2.
3512 Take care for I3/I4 wraparound. */
3515 mask ^= pmask >> i4 >> 1;
3517 mask |= ~(pmask >> i4 >> 1);
3521 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3522 insns, we need to keep the other half of the register. */
3523 imask = ~mask | ~pmask;
3531 if (s->fields->op2 == 0x5d) {
3535 /* In some cases we can implement this with extract. */
3536 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3537 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3541 /* In some cases we can implement this with deposit. */
3542 if (len > 0 && (imask == 0 || ~mask == imask)) {
3543 /* Note that we rotate the bits to be inserted to the lsb, not to
3544 the position as described in the PoO. */
3545 rot = (rot - pos) & 63;
3550 /* Rotate the input as necessary. */
3551 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3553 /* Insert the selected bits into the output. */
3556 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3558 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3560 } else if (imask == 0) {
3561 tcg_gen_andi_i64(o->out, o->in2, mask);
3563 tcg_gen_andi_i64(o->in2, o->in2, mask);
3564 tcg_gen_andi_i64(o->out, o->out, imask);
3565 tcg_gen_or_i64(o->out, o->out, o->in2);
3570 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3572 int i3 = get_field(s->fields, i3);
3573 int i4 = get_field(s->fields, i4);
3574 int i5 = get_field(s->fields, i5);
3577 /* If this is a test-only form, arrange to discard the result. */
3579 o->out = tcg_temp_new_i64();
3587 /* MASK is the set of bits to be operated on from R2.
3588 Take care for I3/I4 wraparound. */
3591 mask ^= ~0ull >> i4 >> 1;
3593 mask |= ~(~0ull >> i4 >> 1);
3596 /* Rotate the input as necessary. */
3597 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3600 switch (s->fields->op2) {
3601 case 0x55: /* AND */
3602 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3603 tcg_gen_and_i64(o->out, o->out, o->in2);
3606 tcg_gen_andi_i64(o->in2, o->in2, mask);
3607 tcg_gen_or_i64(o->out, o->out, o->in2);
3609 case 0x57: /* XOR */
3610 tcg_gen_andi_i64(o->in2, o->in2, mask);
3611 tcg_gen_xor_i64(o->out, o->out, o->in2);
3618 tcg_gen_andi_i64(cc_dst, o->out, mask);
3619 set_cc_nz_u64(s, cc_dst);
3623 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3625 tcg_gen_bswap16_i64(o->out, o->in2);
3629 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3631 tcg_gen_bswap32_i64(o->out, o->in2);
3635 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3637 tcg_gen_bswap64_i64(o->out, o->in2);
3641 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3643 TCGv_i32 t1 = tcg_temp_new_i32();
3644 TCGv_i32 t2 = tcg_temp_new_i32();
3645 TCGv_i32 to = tcg_temp_new_i32();
3646 tcg_gen_extrl_i64_i32(t1, o->in1);
3647 tcg_gen_extrl_i64_i32(t2, o->in2);
3648 tcg_gen_rotl_i32(to, t1, t2);
3649 tcg_gen_extu_i32_i64(o->out, to);
3650 tcg_temp_free_i32(t1);
3651 tcg_temp_free_i32(t2);
3652 tcg_temp_free_i32(to);
3656 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3658 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3662 #ifndef CONFIG_USER_ONLY
3663 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3665 check_privileged(s);
3666 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3671 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3673 check_privileged(s);
3674 gen_helper_sacf(cpu_env, o->in2);
3675 /* Addressing mode has changed, so end the block. */
3676 return EXIT_PC_STALE;
3680 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3682 int sam = s->insn->data;
3698 /* Bizarre but true, we check the address of the current insn for the
3699 specification exception, not the next to be executed. Thus the PoO
3700 documents that Bad Things Happen two bytes before the end. */
3701 if (s->pc & ~mask) {
3702 gen_program_exception(s, PGM_SPECIFICATION);
3703 return EXIT_NORETURN;
3707 tsam = tcg_const_i64(sam);
3708 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3709 tcg_temp_free_i64(tsam);
3711 /* Always exit the TB, since we (may have) changed execution mode. */
3712 return EXIT_PC_STALE;
3715 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3717 int r1 = get_field(s->fields, r1);
3718 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3722 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3724 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3728 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3730 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3734 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3736 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3737 return_low128(o->out2);
3741 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3743 gen_helper_sqeb(o->out, cpu_env, o->in2);
3747 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3749 gen_helper_sqdb(o->out, cpu_env, o->in2);
3753 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3755 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3756 return_low128(o->out2);
3760 #ifndef CONFIG_USER_ONLY
3761 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3763 check_privileged(s);
3764 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3769 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3771 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3772 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3773 check_privileged(s);
3774 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
3776 tcg_temp_free_i32(r1);
3777 tcg_temp_free_i32(r3);
3782 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3789 disas_jcc(s, &c, get_field(s->fields, m3));
3791 /* We want to store when the condition is fulfilled, so branch
3792 out when it's not */
3793 c.cond = tcg_invert_cond(c.cond);
3795 lab = gen_new_label();
3797 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3799 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3803 r1 = get_field(s->fields, r1);
3804 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3805 switch (s->insn->data) {
3807 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3810 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3812 case 2: /* STOCFH */
3813 h = tcg_temp_new_i64();
3814 tcg_gen_shri_i64(h, regs[r1], 32);
3815 tcg_gen_qemu_st32(h, a, get_mem_index(s));
3816 tcg_temp_free_i64(h);
3819 g_assert_not_reached();
3821 tcg_temp_free_i64(a);
3827 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3829 uint64_t sign = 1ull << s->insn->data;
3830 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3831 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3832 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3833 /* The arithmetic left shift is curious in that it does not affect
3834 the sign bit. Copy that over from the source unchanged. */
3835 tcg_gen_andi_i64(o->out, o->out, ~sign);
3836 tcg_gen_andi_i64(o->in1, o->in1, sign);
3837 tcg_gen_or_i64(o->out, o->out, o->in1);
3841 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3843 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3847 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3849 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3853 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3855 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3859 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3861 gen_helper_sfpc(cpu_env, o->in2);
3865 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3867 gen_helper_sfas(cpu_env, o->in2);
3871 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3873 int b2 = get_field(s->fields, b2);
3874 int d2 = get_field(s->fields, d2);
3875 TCGv_i64 t1 = tcg_temp_new_i64();
3876 TCGv_i64 t2 = tcg_temp_new_i64();
3879 switch (s->fields->op2) {
3880 case 0x99: /* SRNM */
3883 case 0xb8: /* SRNMB */
3886 case 0xb9: /* SRNMT */
3892 mask = (1 << len) - 1;
3894 /* Insert the value into the appropriate field of the FPC. */
3896 tcg_gen_movi_i64(t1, d2 & mask);
3898 tcg_gen_addi_i64(t1, regs[b2], d2);
3899 tcg_gen_andi_i64(t1, t1, mask);
3901 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3902 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3903 tcg_temp_free_i64(t1);
3905 /* Then install the new FPC to set the rounding mode in fpu_status. */
3906 gen_helper_sfpc(cpu_env, t2);
3907 tcg_temp_free_i64(t2);
3911 static ExitStatus op_spm(DisasContext *s, DisasOps *o)
3913 tcg_gen_extrl_i64_i32(cc_op, o->in1);
3914 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
3917 tcg_gen_shri_i64(o->in1, o->in1, 24);
3918 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
3922 static ExitStatus op_ectg(DisasContext *s, DisasOps *o)
3924 int b1 = get_field(s->fields, b1);
3925 int d1 = get_field(s->fields, d1);
3926 int b2 = get_field(s->fields, b2);
3927 int d2 = get_field(s->fields, d2);
3928 int r3 = get_field(s->fields, r3);
3929 TCGv_i64 tmp = tcg_temp_new_i64();
3931 /* fetch all operands first */
3932 o->in1 = tcg_temp_new_i64();
3933 tcg_gen_addi_i64(o->in1, regs[b1], d1);
3934 o->in2 = tcg_temp_new_i64();
3935 tcg_gen_addi_i64(o->in2, regs[b2], d2);
3936 o->addr1 = get_address(s, 0, r3, 0);
3938 /* load the third operand into r3 before modifying anything */
3939 tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
3941 /* subtract CPU timer from first operand and store in GR0 */
3942 gen_helper_stpt(tmp, cpu_env);
3943 tcg_gen_sub_i64(regs[0], o->in1, tmp);
3945 /* store second operand in GR1 */
3946 tcg_gen_mov_i64(regs[1], o->in2);
3948 tcg_temp_free_i64(tmp);
3952 #ifndef CONFIG_USER_ONLY
3953 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3955 check_privileged(s);
3956 tcg_gen_shri_i64(o->in2, o->in2, 4);
3957 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
3961 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3963 check_privileged(s);
3964 gen_helper_sske(cpu_env, o->in1, o->in2);
3968 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3970 check_privileged(s);
3971 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3972 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3973 return EXIT_PC_STALE_NOCHAIN;
3976 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3978 check_privileged(s);
3979 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
3983 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3985 gen_helper_stck(o->out, cpu_env);
3986 /* ??? We don't implement clock states. */
3987 gen_op_movi_cc(s, 0);
3991 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3993 TCGv_i64 c1 = tcg_temp_new_i64();
3994 TCGv_i64 c2 = tcg_temp_new_i64();
3995 TCGv_i64 todpr = tcg_temp_new_i64();
3996 gen_helper_stck(c1, cpu_env);
3997 /* 16 bit value store in an uint32_t (only valid bits set) */
3998 tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
3999 /* Shift the 64-bit value into its place as a zero-extended
4000 104-bit value. Note that "bit positions 64-103 are always
4001 non-zero so that they compare differently to STCK"; we set
4002 the least significant bit to 1. */
4003 tcg_gen_shli_i64(c2, c1, 56);
4004 tcg_gen_shri_i64(c1, c1, 8);
4005 tcg_gen_ori_i64(c2, c2, 0x10000);
4006 tcg_gen_or_i64(c2, c2, todpr);
4007 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4008 tcg_gen_addi_i64(o->in2, o->in2, 8);
4009 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4010 tcg_temp_free_i64(c1);
4011 tcg_temp_free_i64(c2);
4012 tcg_temp_free_i64(todpr);
4013 /* ??? We don't implement clock states. */
4014 gen_op_movi_cc(s, 0);
4018 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
4020 check_privileged(s);
4021 gen_helper_sckc(cpu_env, o->in2);
4025 static ExitStatus op_sckpf(DisasContext *s, DisasOps *o)
4027 check_privileged(s);
4028 gen_helper_sckpf(cpu_env, regs[0]);
4032 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
4034 check_privileged(s);
4035 gen_helper_stckc(o->out, cpu_env);
4039 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
4041 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4042 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4043 check_privileged(s);
4044 gen_helper_stctg(cpu_env, r1, o->in2, r3);
4045 tcg_temp_free_i32(r1);
4046 tcg_temp_free_i32(r3);
4050 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
4052 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4053 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4054 check_privileged(s);
4055 gen_helper_stctl(cpu_env, r1, o->in2, r3);
4056 tcg_temp_free_i32(r1);
4057 tcg_temp_free_i32(r3);
4061 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
4063 check_privileged(s);
4064 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4068 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
4070 check_privileged(s);
4071 gen_helper_spt(cpu_env, o->in2);
4075 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
4077 check_privileged(s);
4078 gen_helper_stfl(cpu_env);
4082 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
4084 check_privileged(s);
4085 gen_helper_stpt(o->out, cpu_env);
4089 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
4091 check_privileged(s);
4092 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4097 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
4099 check_privileged(s);
4100 gen_helper_spx(cpu_env, o->in2);
4104 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
4106 check_privileged(s);
4107 gen_helper_xsch(cpu_env, regs[1]);
4112 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
4114 check_privileged(s);
4115 gen_helper_csch(cpu_env, regs[1]);
4120 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
4122 check_privileged(s);
4123 gen_helper_hsch(cpu_env, regs[1]);
4128 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
4130 check_privileged(s);
4131 gen_helper_msch(cpu_env, regs[1], o->in2);
4136 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
4138 check_privileged(s);
4139 gen_helper_rchp(cpu_env, regs[1]);
4144 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
4146 check_privileged(s);
4147 gen_helper_rsch(cpu_env, regs[1]);
4152 static ExitStatus op_sal(DisasContext *s, DisasOps *o)
4154 check_privileged(s);
4155 gen_helper_sal(cpu_env, regs[1]);
4159 static ExitStatus op_schm(DisasContext *s, DisasOps *o)
4161 check_privileged(s);
4162 gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4166 static ExitStatus op_siga(DisasContext *s, DisasOps *o)
4168 check_privileged(s);
4169 /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4170 gen_op_movi_cc(s, 3);
4174 static ExitStatus op_stcps(DisasContext *s, DisasOps *o)
4176 check_privileged(s);
4177 /* The instruction is suppressed if not provided. */
4181 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
4183 check_privileged(s);
4184 gen_helper_ssch(cpu_env, regs[1], o->in2);
4189 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
4191 check_privileged(s);
4192 gen_helper_stsch(cpu_env, regs[1], o->in2);
4197 static ExitStatus op_stcrw(DisasContext *s, DisasOps *o)
4199 check_privileged(s);
4200 gen_helper_stcrw(cpu_env, o->in2);
4205 static ExitStatus op_tpi(DisasContext *s, DisasOps *o)
4207 check_privileged(s);
4208 gen_helper_tpi(cc_op, cpu_env, o->addr1);
4213 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
4215 check_privileged(s);
4216 gen_helper_tsch(cpu_env, regs[1], o->in2);
4221 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
4223 check_privileged(s);
4224 gen_helper_chsc(cpu_env, o->in2);
4229 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
4231 check_privileged(s);
4232 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4233 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4237 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
4239 uint64_t i2 = get_field(s->fields, i2);
4242 check_privileged(s);
4244 /* It is important to do what the instruction name says: STORE THEN.
4245 If we let the output hook perform the store then if we fault and
4246 restart, we'll have the wrong SYSTEM MASK in place. */
4247 t = tcg_temp_new_i64();
4248 tcg_gen_shri_i64(t, psw_mask, 56);
4249 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4250 tcg_temp_free_i64(t);
4252 if (s->fields->op == 0xac) {
4253 tcg_gen_andi_i64(psw_mask, psw_mask,
4254 (i2 << 56) | 0x00ffffffffffffffull);
4256 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4259 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4260 return EXIT_PC_STALE_NOCHAIN;
4263 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
4265 check_privileged(s);
4266 gen_helper_stura(cpu_env, o->in2, o->in1);
4270 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
4272 check_privileged(s);
4273 gen_helper_sturg(cpu_env, o->in2, o->in1);
4278 static ExitStatus op_stfle(DisasContext *s, DisasOps *o)
4280 gen_helper_stfle(cc_op, cpu_env, o->in2);
4285 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
4287 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4291 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
4293 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4297 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
4299 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4303 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
4305 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4309 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
4311 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4312 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4313 gen_helper_stam(cpu_env, r1, o->in2, r3);
4314 tcg_temp_free_i32(r1);
4315 tcg_temp_free_i32(r3);
4319 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
4321 int m3 = get_field(s->fields, m3);
4322 int pos, base = s->insn->data;
4323 TCGv_i64 tmp = tcg_temp_new_i64();
4325 pos = base + ctz32(m3) * 8;
4328 /* Effectively a 32-bit store. */
4329 tcg_gen_shri_i64(tmp, o->in1, pos);
4330 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4336 /* Effectively a 16-bit store. */
4337 tcg_gen_shri_i64(tmp, o->in1, pos);
4338 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4345 /* Effectively an 8-bit store. */
4346 tcg_gen_shri_i64(tmp, o->in1, pos);
4347 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4351 /* This is going to be a sequence of shifts and stores. */
4352 pos = base + 32 - 8;
4355 tcg_gen_shri_i64(tmp, o->in1, pos);
4356 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4357 tcg_gen_addi_i64(o->in2, o->in2, 1);
4359 m3 = (m3 << 1) & 0xf;
4364 tcg_temp_free_i64(tmp);
4368 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
4370 int r1 = get_field(s->fields, r1);
4371 int r3 = get_field(s->fields, r3);
4372 int size = s->insn->data;
4373 TCGv_i64 tsize = tcg_const_i64(size);
4377 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4379 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4384 tcg_gen_add_i64(o->in2, o->in2, tsize);
4388 tcg_temp_free_i64(tsize);
4392 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
4394 int r1 = get_field(s->fields, r1);
4395 int r3 = get_field(s->fields, r3);
4396 TCGv_i64 t = tcg_temp_new_i64();
4397 TCGv_i64 t4 = tcg_const_i64(4);
4398 TCGv_i64 t32 = tcg_const_i64(32);
4401 tcg_gen_shl_i64(t, regs[r1], t32);
4402 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4406 tcg_gen_add_i64(o->in2, o->in2, t4);
4410 tcg_temp_free_i64(t);
4411 tcg_temp_free_i64(t4);
4412 tcg_temp_free_i64(t32);
4416 static ExitStatus op_stpq(DisasContext *s, DisasOps *o)
4418 if (tb_cflags(s->tb) & CF_PARALLEL) {
4419 gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4421 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4426 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
4428 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4429 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4431 gen_helper_srst(cpu_env, r1, r2);
4433 tcg_temp_free_i32(r1);
4434 tcg_temp_free_i32(r2);
4439 static ExitStatus op_srstu(DisasContext *s, DisasOps *o)
4441 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4442 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4444 gen_helper_srstu(cpu_env, r1, r2);
4446 tcg_temp_free_i32(r1);
4447 tcg_temp_free_i32(r2);
4452 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
4454 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4458 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
4463 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4465 /* The !borrow flag is the msb of CC. Since we want the inverse of
4466 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4467 disas_jcc(s, &cmp, 8 | 4);
4468 borrow = tcg_temp_new_i64();
4470 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4472 TCGv_i32 t = tcg_temp_new_i32();
4473 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4474 tcg_gen_extu_i32_i64(borrow, t);
4475 tcg_temp_free_i32(t);
4479 tcg_gen_sub_i64(o->out, o->out, borrow);
4480 tcg_temp_free_i64(borrow);
4484 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
4491 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4492 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4493 tcg_temp_free_i32(t);
4495 t = tcg_const_i32(s->ilen);
4496 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4497 tcg_temp_free_i32(t);
4499 gen_exception(EXCP_SVC);
4500 return EXIT_NORETURN;
4503 static ExitStatus op_tam(DisasContext *s, DisasOps *o)
4507 cc |= (s->tb->flags & FLAG_MASK_64) ? 2 : 0;
4508 cc |= (s->tb->flags & FLAG_MASK_32) ? 1 : 0;
4509 gen_op_movi_cc(s, cc);
4513 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
4515 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4520 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
4522 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4527 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
4529 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4534 #ifndef CONFIG_USER_ONLY
4536 static ExitStatus op_testblock(DisasContext *s, DisasOps *o)
4538 check_privileged(s);
4539 gen_helper_testblock(cc_op, cpu_env, o->in2);
4544 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4546 gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4553 static ExitStatus op_tp(DisasContext *s, DisasOps *o)
4555 TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4556 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4557 tcg_temp_free_i32(l1);
4562 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4564 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4565 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4566 tcg_temp_free_i32(l);
4571 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4573 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4574 return_low128(o->out2);
4579 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4581 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4582 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4583 tcg_temp_free_i32(l);
4588 static ExitStatus op_trtr(DisasContext *s, DisasOps *o)
4590 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4591 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4592 tcg_temp_free_i32(l);
4597 static ExitStatus op_trXX(DisasContext *s, DisasOps *o)
4599 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4600 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4601 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4602 TCGv_i32 tst = tcg_temp_new_i32();
4603 int m3 = get_field(s->fields, m3);
4605 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4609 tcg_gen_movi_i32(tst, -1);
4611 tcg_gen_extrl_i64_i32(tst, regs[0]);
4612 if (s->insn->opc & 3) {
4613 tcg_gen_ext8u_i32(tst, tst);
4615 tcg_gen_ext16u_i32(tst, tst);
4618 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4620 tcg_temp_free_i32(r1);
4621 tcg_temp_free_i32(r2);
4622 tcg_temp_free_i32(sizes);
4623 tcg_temp_free_i32(tst);
4628 static ExitStatus op_ts(DisasContext *s, DisasOps *o)
4630 TCGv_i32 t1 = tcg_const_i32(0xff);
4631 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4632 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4633 tcg_temp_free_i32(t1);
4638 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4640 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4641 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4642 tcg_temp_free_i32(l);
4646 static ExitStatus op_unpka(DisasContext *s, DisasOps *o)
4648 int l1 = get_field(s->fields, l1) + 1;
4651 /* The length must not exceed 32 bytes. */
4653 gen_program_exception(s, PGM_SPECIFICATION);
4654 return EXIT_NORETURN;
4656 l = tcg_const_i32(l1);
4657 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4658 tcg_temp_free_i32(l);
4663 static ExitStatus op_unpku(DisasContext *s, DisasOps *o)
4665 int l1 = get_field(s->fields, l1) + 1;
4668 /* The length must be even and should not exceed 64 bytes. */
4669 if ((l1 & 1) || (l1 > 64)) {
4670 gen_program_exception(s, PGM_SPECIFICATION);
4671 return EXIT_NORETURN;
4673 l = tcg_const_i32(l1);
4674 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4675 tcg_temp_free_i32(l);
4681 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4683 int d1 = get_field(s->fields, d1);
4684 int d2 = get_field(s->fields, d2);
4685 int b1 = get_field(s->fields, b1);
4686 int b2 = get_field(s->fields, b2);
4687 int l = get_field(s->fields, l1);
4690 o->addr1 = get_address(s, 0, b1, d1);
4692 /* If the addresses are identical, this is a store/memset of zero. */
4693 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4694 o->in2 = tcg_const_i64(0);
4698 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4701 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4705 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4708 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4712 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4715 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4719 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4721 gen_op_movi_cc(s, 0);
4725 /* But in general we'll defer to a helper. */
4726 o->in2 = get_address(s, 0, b2, d2);
4727 t32 = tcg_const_i32(l);
4728 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4729 tcg_temp_free_i32(t32);
4734 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4736 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4740 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4742 int shift = s->insn->data & 0xff;
4743 int size = s->insn->data >> 8;
4744 uint64_t mask = ((1ull << size) - 1) << shift;
4747 tcg_gen_shli_i64(o->in2, o->in2, shift);
4748 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4750 /* Produce the CC from only the bits manipulated. */
4751 tcg_gen_andi_i64(cc_dst, o->out, mask);
4752 set_cc_nz_u64(s, cc_dst);
4756 static ExitStatus op_xi(DisasContext *s, DisasOps *o)
4758 o->in1 = tcg_temp_new_i64();
4760 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4761 tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4763 /* Perform the atomic operation in memory. */
4764 tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4768 /* Recompute also for atomic case: needed for setting CC. */
4769 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4771 if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4772 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4777 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4779 o->out = tcg_const_i64(0);
4783 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4785 o->out = tcg_const_i64(0);
4791 #ifndef CONFIG_USER_ONLY
4792 static ExitStatus op_clp(DisasContext *s, DisasOps *o)
4794 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4796 check_privileged(s);
4797 gen_helper_clp(cpu_env, r2);
4798 tcg_temp_free_i32(r2);
4803 static ExitStatus op_pcilg(DisasContext *s, DisasOps *o)
4805 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4806 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4808 check_privileged(s);
4809 gen_helper_pcilg(cpu_env, r1, r2);
4810 tcg_temp_free_i32(r1);
4811 tcg_temp_free_i32(r2);
4816 static ExitStatus op_pcistg(DisasContext *s, DisasOps *o)
4818 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4819 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4821 check_privileged(s);
4822 gen_helper_pcistg(cpu_env, r1, r2);
4823 tcg_temp_free_i32(r1);
4824 tcg_temp_free_i32(r2);
4829 static ExitStatus op_stpcifc(DisasContext *s, DisasOps *o)
4831 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4832 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4834 check_privileged(s);
4835 gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
4836 tcg_temp_free_i32(ar);
4837 tcg_temp_free_i32(r1);
4842 static ExitStatus op_sic(DisasContext *s, DisasOps *o)
4844 check_privileged(s);
4845 gen_helper_sic(cpu_env, o->in1, o->in2);
4849 static ExitStatus op_rpcit(DisasContext *s, DisasOps *o)
4851 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4852 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4854 check_privileged(s);
4855 gen_helper_rpcit(cpu_env, r1, r2);
4856 tcg_temp_free_i32(r1);
4857 tcg_temp_free_i32(r2);
4862 static ExitStatus op_pcistb(DisasContext *s, DisasOps *o)
4864 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4865 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4866 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4868 check_privileged(s);
4869 gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
4870 tcg_temp_free_i32(ar);
4871 tcg_temp_free_i32(r1);
4872 tcg_temp_free_i32(r3);
4877 static ExitStatus op_mpcifc(DisasContext *s, DisasOps *o)
4879 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4880 TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4882 check_privileged(s);
4883 gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
4884 tcg_temp_free_i32(ar);
4885 tcg_temp_free_i32(r1);
4891 /* ====================================================================== */
4892 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4893 the original inputs), update the various cc data structures in order to
4894 be able to compute the new condition code. */
4896 static void cout_abs32(DisasContext *s, DisasOps *o)
4898 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4901 static void cout_abs64(DisasContext *s, DisasOps *o)
4903 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4906 static void cout_adds32(DisasContext *s, DisasOps *o)
4908 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4911 static void cout_adds64(DisasContext *s, DisasOps *o)
4913 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4916 static void cout_addu32(DisasContext *s, DisasOps *o)
4918 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4921 static void cout_addu64(DisasContext *s, DisasOps *o)
4923 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4926 static void cout_addc32(DisasContext *s, DisasOps *o)
4928 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4931 static void cout_addc64(DisasContext *s, DisasOps *o)
4933 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4936 static void cout_cmps32(DisasContext *s, DisasOps *o)
4938 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4941 static void cout_cmps64(DisasContext *s, DisasOps *o)
4943 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4946 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4948 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4951 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4953 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4956 static void cout_f32(DisasContext *s, DisasOps *o)
4958 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4961 static void cout_f64(DisasContext *s, DisasOps *o)
4963 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4966 static void cout_f128(DisasContext *s, DisasOps *o)
4968 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4971 static void cout_nabs32(DisasContext *s, DisasOps *o)
4973 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4976 static void cout_nabs64(DisasContext *s, DisasOps *o)
4978 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4981 static void cout_neg32(DisasContext *s, DisasOps *o)
4983 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4986 static void cout_neg64(DisasContext *s, DisasOps *o)
4988 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4991 static void cout_nz32(DisasContext *s, DisasOps *o)
4993 tcg_gen_ext32u_i64(cc_dst, o->out);
4994 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4997 static void cout_nz64(DisasContext *s, DisasOps *o)
4999 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5002 static void cout_s32(DisasContext *s, DisasOps *o)
5004 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5007 static void cout_s64(DisasContext *s, DisasOps *o)
5009 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5012 static void cout_subs32(DisasContext *s, DisasOps *o)
5014 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5017 static void cout_subs64(DisasContext *s, DisasOps *o)
5019 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5022 static void cout_subu32(DisasContext *s, DisasOps *o)
5024 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
5027 static void cout_subu64(DisasContext *s, DisasOps *o)
5029 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
5032 static void cout_subb32(DisasContext *s, DisasOps *o)
5034 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
5037 static void cout_subb64(DisasContext *s, DisasOps *o)
5039 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
5042 static void cout_tm32(DisasContext *s, DisasOps *o)
5044 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5047 static void cout_tm64(DisasContext *s, DisasOps *o)
5049 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5052 /* ====================================================================== */
5053 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
5054 with the TCG register to which we will write. Used in combination with
5055 the "wout" generators, in some cases we need a new temporary, and in
5056 some cases we can write to a TCG global. */
5058 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
5060 o->out = tcg_temp_new_i64();
5062 #define SPEC_prep_new 0
5064 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
5066 o->out = tcg_temp_new_i64();
5067 o->out2 = tcg_temp_new_i64();
5069 #define SPEC_prep_new_P 0
5071 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5073 o->out = regs[get_field(f, r1)];
5076 #define SPEC_prep_r1 0
5078 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
5080 int r1 = get_field(f, r1);
5082 o->out2 = regs[r1 + 1];
5083 o->g_out = o->g_out2 = true;
5085 #define SPEC_prep_r1_P SPEC_r1_even
5087 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5089 o->out = fregs[get_field(f, r1)];
5092 #define SPEC_prep_f1 0
5094 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5096 int r1 = get_field(f, r1);
5098 o->out2 = fregs[r1 + 2];
5099 o->g_out = o->g_out2 = true;
5101 #define SPEC_prep_x1 SPEC_r1_f128
5103 /* ====================================================================== */
5104 /* The "Write OUTput" generators. These generally perform some non-trivial
5105 copy of data to TCG globals, or to main memory. The trivial cases are
5106 generally handled by having a "prep" generator install the TCG global
5107 as the destination of the operation. */
5109 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5111 store_reg(get_field(f, r1), o->out);
5113 #define SPEC_wout_r1 0
5115 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5117 int r1 = get_field(f, r1);
5118 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5120 #define SPEC_wout_r1_8 0
5122 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5124 int r1 = get_field(f, r1);
5125 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5127 #define SPEC_wout_r1_16 0
5129 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5131 store_reg32_i64(get_field(f, r1), o->out);
5133 #define SPEC_wout_r1_32 0
5135 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
5137 store_reg32h_i64(get_field(f, r1), o->out);
5139 #define SPEC_wout_r1_32h 0
5141 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5143 int r1 = get_field(f, r1);
5144 store_reg32_i64(r1, o->out);
5145 store_reg32_i64(r1 + 1, o->out2);
5147 #define SPEC_wout_r1_P32 SPEC_r1_even
5149 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5151 int r1 = get_field(f, r1);
5152 store_reg32_i64(r1 + 1, o->out);
5153 tcg_gen_shri_i64(o->out, o->out, 32);
5154 store_reg32_i64(r1, o->out);
5156 #define SPEC_wout_r1_D32 SPEC_r1_even
5158 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5160 int r3 = get_field(f, r3);
5161 store_reg32_i64(r3, o->out);
5162 store_reg32_i64(r3 + 1, o->out2);
5164 #define SPEC_wout_r3_P32 SPEC_r3_even
5166 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
5168 int r3 = get_field(f, r3);
5169 store_reg(r3, o->out);
5170 store_reg(r3 + 1, o->out2);
5172 #define SPEC_wout_r3_P64 SPEC_r3_even
5174 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5176 store_freg32_i64(get_field(f, r1), o->out);
5178 #define SPEC_wout_e1 0
5180 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5182 store_freg(get_field(f, r1), o->out);
5184 #define SPEC_wout_f1 0
5186 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5188 int f1 = get_field(s->fields, r1);
5189 store_freg(f1, o->out);
5190 store_freg(f1 + 2, o->out2);
5192 #define SPEC_wout_x1 SPEC_r1_f128
5194 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5196 if (get_field(f, r1) != get_field(f, r2)) {
5197 store_reg32_i64(get_field(f, r1), o->out);
5200 #define SPEC_wout_cond_r1r2_32 0
5202 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
5204 if (get_field(f, r1) != get_field(f, r2)) {
5205 store_freg32_i64(get_field(f, r1), o->out);
5208 #define SPEC_wout_cond_e1e2 0
5210 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5212 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5214 #define SPEC_wout_m1_8 0
5216 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5218 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5220 #define SPEC_wout_m1_16 0
5222 #ifndef CONFIG_USER_ONLY
5223 static void wout_m1_16a(DisasContext *s, DisasFields *f, DisasOps *o)
5225 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5227 #define SPEC_wout_m1_16a 0
5230 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5232 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5234 #define SPEC_wout_m1_32 0
5236 #ifndef CONFIG_USER_ONLY
5237 static void wout_m1_32a(DisasContext *s, DisasFields *f, DisasOps *o)
5239 tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5241 #define SPEC_wout_m1_32a 0
5244 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5246 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5248 #define SPEC_wout_m1_64 0
5250 #ifndef CONFIG_USER_ONLY
5251 static void wout_m1_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5253 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
5255 #define SPEC_wout_m1_64a 0
5258 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5260 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5262 #define SPEC_wout_m2_32 0
5264 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5266 store_reg(get_field(f, r1), o->in2);
5268 #define SPEC_wout_in2_r1 0
5270 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5272 store_reg32_i64(get_field(f, r1), o->in2);
5274 #define SPEC_wout_in2_r1_32 0
5276 /* ====================================================================== */
5277 /* The "INput 1" generators. These load the first operand to an insn. */
5279 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5281 o->in1 = load_reg(get_field(f, r1));
5283 #define SPEC_in1_r1 0
5285 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5287 o->in1 = regs[get_field(f, r1)];
5290 #define SPEC_in1_r1_o 0
5292 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5294 o->in1 = tcg_temp_new_i64();
5295 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
5297 #define SPEC_in1_r1_32s 0
5299 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5301 o->in1 = tcg_temp_new_i64();
5302 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
5304 #define SPEC_in1_r1_32u 0
5306 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5308 o->in1 = tcg_temp_new_i64();
5309 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
5311 #define SPEC_in1_r1_sr32 0
5313 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
5315 o->in1 = load_reg(get_field(f, r1) + 1);
5317 #define SPEC_in1_r1p1 SPEC_r1_even
5319 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5321 o->in1 = tcg_temp_new_i64();
5322 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
5324 #define SPEC_in1_r1p1_32s SPEC_r1_even
5326 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5328 o->in1 = tcg_temp_new_i64();
5329 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
5331 #define SPEC_in1_r1p1_32u SPEC_r1_even
5333 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5335 int r1 = get_field(f, r1);
5336 o->in1 = tcg_temp_new_i64();
5337 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5339 #define SPEC_in1_r1_D32 SPEC_r1_even
5341 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5343 o->in1 = load_reg(get_field(f, r2));
5345 #define SPEC_in1_r2 0
5347 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5349 o->in1 = tcg_temp_new_i64();
5350 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
5352 #define SPEC_in1_r2_sr32 0
5354 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5356 o->in1 = load_reg(get_field(f, r3));
5358 #define SPEC_in1_r3 0
5360 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5362 o->in1 = regs[get_field(f, r3)];
5365 #define SPEC_in1_r3_o 0
5367 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5369 o->in1 = tcg_temp_new_i64();
5370 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
5372 #define SPEC_in1_r3_32s 0
5374 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5376 o->in1 = tcg_temp_new_i64();
5377 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
5379 #define SPEC_in1_r3_32u 0
5381 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5383 int r3 = get_field(f, r3);
5384 o->in1 = tcg_temp_new_i64();
5385 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5387 #define SPEC_in1_r3_D32 SPEC_r3_even
5389 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5391 o->in1 = load_freg32_i64(get_field(f, r1));
5393 #define SPEC_in1_e1 0
5395 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5397 o->in1 = fregs[get_field(f, r1)];
5400 #define SPEC_in1_f1_o 0
5402 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5404 int r1 = get_field(f, r1);
5406 o->out2 = fregs[r1 + 2];
5407 o->g_out = o->g_out2 = true;
5409 #define SPEC_in1_x1_o SPEC_r1_f128
5411 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5413 o->in1 = fregs[get_field(f, r3)];
5416 #define SPEC_in1_f3_o 0
5418 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
5420 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
5422 #define SPEC_in1_la1 0
5424 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
5426 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5427 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5429 #define SPEC_in1_la2 0
5431 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5434 o->in1 = tcg_temp_new_i64();
5435 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5437 #define SPEC_in1_m1_8u 0
5439 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5442 o->in1 = tcg_temp_new_i64();
5443 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5445 #define SPEC_in1_m1_16s 0
5447 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5450 o->in1 = tcg_temp_new_i64();
5451 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5453 #define SPEC_in1_m1_16u 0
5455 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5458 o->in1 = tcg_temp_new_i64();
5459 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5461 #define SPEC_in1_m1_32s 0
5463 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5466 o->in1 = tcg_temp_new_i64();
5467 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5469 #define SPEC_in1_m1_32u 0
5471 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5474 o->in1 = tcg_temp_new_i64();
5475 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5477 #define SPEC_in1_m1_64 0
5479 /* ====================================================================== */
5480 /* The "INput 2" generators. These load the second operand to an insn. */
5482 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5484 o->in2 = regs[get_field(f, r1)];
5487 #define SPEC_in2_r1_o 0
5489 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5491 o->in2 = tcg_temp_new_i64();
5492 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5494 #define SPEC_in2_r1_16u 0
5496 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5498 o->in2 = tcg_temp_new_i64();
5499 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5501 #define SPEC_in2_r1_32u 0
5503 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5505 int r1 = get_field(f, r1);
5506 o->in2 = tcg_temp_new_i64();
5507 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5509 #define SPEC_in2_r1_D32 SPEC_r1_even
5511 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5513 o->in2 = load_reg(get_field(f, r2));
5515 #define SPEC_in2_r2 0
5517 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5519 o->in2 = regs[get_field(f, r2)];
5522 #define SPEC_in2_r2_o 0
5524 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5526 int r2 = get_field(f, r2);
5528 o->in2 = load_reg(r2);
5531 #define SPEC_in2_r2_nz 0
5533 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5535 o->in2 = tcg_temp_new_i64();
5536 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5538 #define SPEC_in2_r2_8s 0
5540 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5542 o->in2 = tcg_temp_new_i64();
5543 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5545 #define SPEC_in2_r2_8u 0
5547 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5549 o->in2 = tcg_temp_new_i64();
5550 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5552 #define SPEC_in2_r2_16s 0
5554 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5556 o->in2 = tcg_temp_new_i64();
5557 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5559 #define SPEC_in2_r2_16u 0
5561 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5563 o->in2 = load_reg(get_field(f, r3));
5565 #define SPEC_in2_r3 0
5567 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5569 o->in2 = tcg_temp_new_i64();
5570 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5572 #define SPEC_in2_r3_sr32 0
5574 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5576 o->in2 = tcg_temp_new_i64();
5577 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5579 #define SPEC_in2_r2_32s 0
5581 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5583 o->in2 = tcg_temp_new_i64();
5584 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5586 #define SPEC_in2_r2_32u 0
5588 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5590 o->in2 = tcg_temp_new_i64();
5591 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5593 #define SPEC_in2_r2_sr32 0
5595 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5597 o->in2 = load_freg32_i64(get_field(f, r2));
5599 #define SPEC_in2_e2 0
5601 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5603 o->in2 = fregs[get_field(f, r2)];
5606 #define SPEC_in2_f2_o 0
5608 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5610 int r2 = get_field(f, r2);
5612 o->in2 = fregs[r2 + 2];
5613 o->g_in1 = o->g_in2 = true;
5615 #define SPEC_in2_x2_o SPEC_r2_f128
5617 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5619 o->in2 = get_address(s, 0, get_field(f, r2), 0);
5621 #define SPEC_in2_ra2 0
5623 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5625 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5626 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5628 #define SPEC_in2_a2 0
5630 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5632 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
5634 #define SPEC_in2_ri2 0
5636 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5638 help_l2_shift(s, f, o, 31);
5640 #define SPEC_in2_sh32 0
5642 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5644 help_l2_shift(s, f, o, 63);
5646 #define SPEC_in2_sh64 0
5648 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5651 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5653 #define SPEC_in2_m2_8u 0
5655 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5658 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5660 #define SPEC_in2_m2_16s 0
5662 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5665 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5667 #define SPEC_in2_m2_16u 0
5669 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5672 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5674 #define SPEC_in2_m2_32s 0
5676 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5679 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5681 #define SPEC_in2_m2_32u 0
5683 #ifndef CONFIG_USER_ONLY
5684 static void in2_m2_32ua(DisasContext *s, DisasFields *f, DisasOps *o)
5687 tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5689 #define SPEC_in2_m2_32ua 0
5692 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5695 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5697 #define SPEC_in2_m2_64 0
5699 #ifndef CONFIG_USER_ONLY
5700 static void in2_m2_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5703 tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
5705 #define SPEC_in2_m2_64a 0
5708 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5711 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5713 #define SPEC_in2_mri2_16u 0
5715 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5718 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5720 #define SPEC_in2_mri2_32s 0
5722 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5725 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5727 #define SPEC_in2_mri2_32u 0
5729 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5732 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5734 #define SPEC_in2_mri2_64 0
5736 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5738 o->in2 = tcg_const_i64(get_field(f, i2));
5740 #define SPEC_in2_i2 0
5742 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5744 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5746 #define SPEC_in2_i2_8u 0
5748 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5750 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5752 #define SPEC_in2_i2_16u 0
5754 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5756 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5758 #define SPEC_in2_i2_32u 0
5760 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5762 uint64_t i2 = (uint16_t)get_field(f, i2);
5763 o->in2 = tcg_const_i64(i2 << s->insn->data);
5765 #define SPEC_in2_i2_16u_shl 0
5767 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5769 uint64_t i2 = (uint32_t)get_field(f, i2);
5770 o->in2 = tcg_const_i64(i2 << s->insn->data);
5772 #define SPEC_in2_i2_32u_shl 0
5774 #ifndef CONFIG_USER_ONLY
5775 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5777 o->in2 = tcg_const_i64(s->fields->raw_insn);
5779 #define SPEC_in2_insn 0
5782 /* ====================================================================== */
5784 /* Find opc within the table of insns. This is formulated as a switch
5785 statement so that (1) we get compile-time notice of cut-paste errors
5786 for duplicated opcodes, and (2) the compiler generates the binary
5787 search tree, rather than us having to post-process the table. */
5789 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5790 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5792 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5794 enum DisasInsnEnum {
5795 #include "insn-data.def"
5799 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5803 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5805 .help_in1 = in1_##I1, \
5806 .help_in2 = in2_##I2, \
5807 .help_prep = prep_##P, \
5808 .help_wout = wout_##W, \
5809 .help_cout = cout_##CC, \
5810 .help_op = op_##OP, \
5814 /* Allow 0 to be used for NULL in the table below. */
5822 #define SPEC_in1_0 0
5823 #define SPEC_in2_0 0
5824 #define SPEC_prep_0 0
5825 #define SPEC_wout_0 0
5827 /* Give smaller names to the various facilities. */
5828 #define FAC_Z S390_FEAT_ZARCH
5829 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5830 #define FAC_DFP S390_FEAT_DFP
5831 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5832 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5833 #define FAC_EE S390_FEAT_EXECUTE_EXT
5834 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5835 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5836 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5837 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5838 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5839 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5840 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5841 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5842 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5843 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5844 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5845 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5846 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5847 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5848 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5849 #define FAC_SFLE S390_FEAT_STFLE
5850 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5851 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5852 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5853 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5854 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5855 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5856 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5857 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5858 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5859 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
5860 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
5861 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
5862 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
5863 #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
5864 #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
5865 #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
5867 static const DisasInsn insn_info[] = {
5868 #include "insn-data.def"
5872 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5873 case OPC: return &insn_info[insn_ ## NM];
5875 static const DisasInsn *lookup_opc(uint16_t opc)
5878 #include "insn-data.def"
5887 /* Extract a field from the insn. The INSN should be left-aligned in
5888 the uint64_t so that we can more easily utilize the big-bit-endian
5889 definitions we extract from the Principals of Operation. */
5891 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5899 /* Zero extract the field from the insn. */
5900 r = (insn << f->beg) >> (64 - f->size);
5902 /* Sign-extend, or un-swap the field as necessary. */
5904 case 0: /* unsigned */
5906 case 1: /* signed */
5907 assert(f->size <= 32);
5908 m = 1u << (f->size - 1);
5911 case 2: /* dl+dh split, signed 20 bit. */
5912 r = ((int8_t)r << 12) | (r >> 8);
5918 /* Validate that the "compressed" encoding we selected above is valid.
5919 I.e. we havn't make two different original fields overlap. */
5920 assert(((o->presentC >> f->indexC) & 1) == 0);
5921 o->presentC |= 1 << f->indexC;
5922 o->presentO |= 1 << f->indexO;
5924 o->c[f->indexC] = r;
5927 /* Lookup the insn at the current PC, extracting the operands into O and
5928 returning the info struct for the insn. Returns NULL for invalid insn. */
5930 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5933 uint64_t insn, pc = s->pc;
5935 const DisasInsn *info;
5937 if (unlikely(s->ex_value)) {
5938 /* Drop the EX data now, so that it's clear on exception paths. */
5939 TCGv_i64 zero = tcg_const_i64(0);
5940 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
5941 tcg_temp_free_i64(zero);
5943 /* Extract the values saved by EXECUTE. */
5944 insn = s->ex_value & 0xffffffffffff0000ull;
5945 ilen = s->ex_value & 0xf;
5948 insn = ld_code2(env, pc);
5949 op = (insn >> 8) & 0xff;
5950 ilen = get_ilen(op);
5956 insn = ld_code4(env, pc) << 32;
5959 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5962 g_assert_not_reached();
5965 s->next_pc = s->pc + ilen;
5968 /* We can't actually determine the insn format until we've looked up
5969 the full insn opcode. Which we can't do without locating the
5970 secondary opcode. Assume by default that OP2 is at bit 40; for
5971 those smaller insns that don't actually have a secondary opcode
5972 this will correctly result in OP2 = 0. */
5978 case 0xb2: /* S, RRF, RRE, IE */
5979 case 0xb3: /* RRE, RRD, RRF */
5980 case 0xb9: /* RRE, RRF */
5981 case 0xe5: /* SSE, SIL */
5982 op2 = (insn << 8) >> 56;
5986 case 0xc0: /* RIL */
5987 case 0xc2: /* RIL */
5988 case 0xc4: /* RIL */
5989 case 0xc6: /* RIL */
5990 case 0xc8: /* SSF */
5991 case 0xcc: /* RIL */
5992 op2 = (insn << 12) >> 60;
5994 case 0xc5: /* MII */
5995 case 0xc7: /* SMI */
5996 case 0xd0 ... 0xdf: /* SS */
6002 case 0xee ... 0xf3: /* SS */
6003 case 0xf8 ... 0xfd: /* SS */
6007 op2 = (insn << 40) >> 56;
6011 memset(f, 0, sizeof(*f));
6016 /* Lookup the instruction. */
6017 info = lookup_opc(op << 8 | op2);
6019 /* If we found it, extract the operands. */
6021 DisasFormat fmt = info->fmt;
6024 for (i = 0; i < NUM_C_FIELD; ++i) {
6025 extract_field(f, &format_info[fmt].op[i], insn);
6031 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
6033 const DisasInsn *insn;
6034 ExitStatus ret = NO_EXIT;
6038 /* Search for the insn in the table. */
6039 insn = extract_insn(env, s, &f);
6041 /* Not found means unimplemented/illegal opcode. */
6043 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6045 gen_illegal_opcode(s);
6046 return EXIT_NORETURN;
6049 #ifndef CONFIG_USER_ONLY
6050 if (s->tb->flags & FLAG_MASK_PER) {
6051 TCGv_i64 addr = tcg_const_i64(s->pc);
6052 gen_helper_per_ifetch(cpu_env, addr);
6053 tcg_temp_free_i64(addr);
6057 /* Check for insn specification exceptions. */
6059 int spec = insn->spec, excp = 0, r;
6061 if (spec & SPEC_r1_even) {
6062 r = get_field(&f, r1);
6064 excp = PGM_SPECIFICATION;
6067 if (spec & SPEC_r2_even) {
6068 r = get_field(&f, r2);
6070 excp = PGM_SPECIFICATION;
6073 if (spec & SPEC_r3_even) {
6074 r = get_field(&f, r3);
6076 excp = PGM_SPECIFICATION;
6079 if (spec & SPEC_r1_f128) {
6080 r = get_field(&f, r1);
6082 excp = PGM_SPECIFICATION;
6085 if (spec & SPEC_r2_f128) {
6086 r = get_field(&f, r2);
6088 excp = PGM_SPECIFICATION;
6092 gen_program_exception(s, excp);
6093 return EXIT_NORETURN;
6097 /* Set up the strutures we use to communicate with the helpers. */
6100 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
6107 /* Implement the instruction. */
6108 if (insn->help_in1) {
6109 insn->help_in1(s, &f, &o);
6111 if (insn->help_in2) {
6112 insn->help_in2(s, &f, &o);
6114 if (insn->help_prep) {
6115 insn->help_prep(s, &f, &o);
6117 if (insn->help_op) {
6118 ret = insn->help_op(s, &o);
6120 if (insn->help_wout) {
6121 insn->help_wout(s, &f, &o);
6123 if (insn->help_cout) {
6124 insn->help_cout(s, &o);
6127 /* Free any temporaries created by the helpers. */
6128 if (o.out && !o.g_out) {
6129 tcg_temp_free_i64(o.out);
6131 if (o.out2 && !o.g_out2) {
6132 tcg_temp_free_i64(o.out2);
6134 if (o.in1 && !o.g_in1) {
6135 tcg_temp_free_i64(o.in1);
6137 if (o.in2 && !o.g_in2) {
6138 tcg_temp_free_i64(o.in2);
6141 tcg_temp_free_i64(o.addr1);
6144 #ifndef CONFIG_USER_ONLY
6145 if (s->tb->flags & FLAG_MASK_PER) {
6146 /* An exception might be triggered, save PSW if not already done. */
6147 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
6148 tcg_gen_movi_i64(psw_addr, s->next_pc);
6151 /* Call the helper to check for a possible PER exception. */
6152 gen_helper_per_check_exception(cpu_env);
6156 /* Advance to the next instruction. */
6161 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
6163 CPUS390XState *env = cs->env_ptr;
6165 target_ulong pc_start;
6166 uint64_t next_page_start;
6167 int num_insns, max_insns;
6174 if (!(tb->flags & FLAG_MASK_64)) {
6175 pc_start &= 0x7fffffff;
6180 dc.cc_op = CC_OP_DYNAMIC;
6181 dc.ex_value = tb->cs_base;
6182 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
6184 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
6187 max_insns = tb_cflags(tb) & CF_COUNT_MASK;
6188 if (max_insns == 0) {
6189 max_insns = CF_COUNT_MASK;
6191 if (max_insns > TCG_MAX_INSNS) {
6192 max_insns = TCG_MAX_INSNS;
6198 tcg_gen_insn_start(dc.pc, dc.cc_op);
6201 if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
6202 status = EXIT_PC_STALE;
6204 /* The address covered by the breakpoint must be included in
6205 [tb->pc, tb->pc + tb->size) in order to for it to be
6206 properly cleared -- thus we increment the PC here so that
6207 the logic setting tb->size below does the right thing. */
6212 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
6216 status = translate_one(env, &dc);
6218 /* If we reach a page boundary, are single stepping,
6219 or exhaust instruction count, stop generation. */
6220 if (status == NO_EXIT
6221 && (dc.pc >= next_page_start
6222 || tcg_op_buf_full()
6223 || num_insns >= max_insns
6225 || cs->singlestep_enabled
6227 status = EXIT_PC_STALE;
6229 } while (status == NO_EXIT);
6231 if (tb_cflags(tb) & CF_LAST_IO) {
6240 case EXIT_PC_STALE_NOCHAIN:
6241 update_psw_addr(&dc);
6243 case EXIT_PC_UPDATED:
6244 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6245 cc op type is in env */
6248 case EXIT_PC_CC_UPDATED:
6249 /* Exit the TB, either by raising a debug exception or by return. */
6251 gen_exception(EXCP_DEBUG);
6252 } else if (use_exit_tb(&dc) || status == EXIT_PC_STALE_NOCHAIN) {
6255 tcg_gen_lookup_and_goto_ptr();
6259 g_assert_not_reached();
6262 gen_tb_end(tb, num_insns);
6264 tb->size = dc.pc - pc_start;
6265 tb->icount = num_insns;
6267 #if defined(S390X_DEBUG_DISAS)
6268 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
6269 && qemu_log_in_addr_range(pc_start)) {
6271 if (unlikely(dc.ex_value)) {
6272 /* ??? Unfortunately log_target_disas can't use host memory. */
6273 qemu_log("IN: EXECUTE %016" PRIx64 "\n", dc.ex_value);
6275 qemu_log("IN: %s\n", lookup_symbol(pc_start));
6276 log_target_disas(cs, pc_start, dc.pc - pc_start);
6284 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6287 int cc_op = data[1];
6288 env->psw.addr = data[0];
6289 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {