4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
33 #include "disas/disas.h"
34 #include "exec/exec-all.h"
37 #include "qemu/host-utils.h"
38 #include "exec/cpu_ldst.h"
40 /* global register indexes */
41 static TCGv_env cpu_env;
43 #include "exec/gen-icount.h"
44 #include "exec/helper-proto.h"
45 #include "exec/helper-gen.h"
47 #include "trace-tcg.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
57 struct TranslationBlock *tb;
58 const DisasInsn *insn;
64 bool singlestep_enabled;
67 /* Information carried about a condition to be evaluated. */
74 struct { TCGv_i64 a, b; } s64;
75 struct { TCGv_i32 a, b; } s32;
81 #ifdef DEBUG_INLINE_BRANCHES
82 static uint64_t inline_branch_hit[CC_OP_MAX];
83 static uint64_t inline_branch_miss[CC_OP_MAX];
86 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
88 if (!(s->tb->flags & FLAG_MASK_64)) {
89 if (s->tb->flags & FLAG_MASK_32) {
90 return pc | 0x80000000;
96 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
99 S390CPU *cpu = S390_CPU(cs);
100 CPUS390XState *env = &cpu->env;
103 if (env->cc_op > 3) {
104 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
105 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
107 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
108 env->psw.mask, env->psw.addr, env->cc_op);
111 for (i = 0; i < 16; i++) {
112 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
114 cpu_fprintf(f, "\n");
120 for (i = 0; i < 16; i++) {
121 cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
123 cpu_fprintf(f, "\n");
129 for (i = 0; i < 32; i++) {
130 cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
131 env->vregs[i][0].ll, env->vregs[i][1].ll);
132 cpu_fprintf(f, (i % 2) ? "\n" : " ");
135 #ifndef CONFIG_USER_ONLY
136 for (i = 0; i < 16; i++) {
137 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
139 cpu_fprintf(f, "\n");
146 #ifdef DEBUG_INLINE_BRANCHES
147 for (i = 0; i < CC_OP_MAX; i++) {
148 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
149 inline_branch_miss[i], inline_branch_hit[i]);
153 cpu_fprintf(f, "\n");
156 static TCGv_i64 psw_addr;
157 static TCGv_i64 psw_mask;
158 static TCGv_i64 gbea;
160 static TCGv_i32 cc_op;
161 static TCGv_i64 cc_src;
162 static TCGv_i64 cc_dst;
163 static TCGv_i64 cc_vr;
165 static char cpu_reg_names[32][4];
166 static TCGv_i64 regs[16];
167 static TCGv_i64 fregs[16];
169 void s390x_translate_init(void)
173 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
174 tcg_ctx.tcg_env = cpu_env;
175 psw_addr = tcg_global_mem_new_i64(cpu_env,
176 offsetof(CPUS390XState, psw.addr),
178 psw_mask = tcg_global_mem_new_i64(cpu_env,
179 offsetof(CPUS390XState, psw.mask),
181 gbea = tcg_global_mem_new_i64(cpu_env,
182 offsetof(CPUS390XState, gbea),
185 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
187 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
189 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
191 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
194 for (i = 0; i < 16; i++) {
195 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
196 regs[i] = tcg_global_mem_new(cpu_env,
197 offsetof(CPUS390XState, regs[i]),
201 for (i = 0; i < 16; i++) {
202 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
203 fregs[i] = tcg_global_mem_new(cpu_env,
204 offsetof(CPUS390XState, vregs[i][0].d),
205 cpu_reg_names[i + 16]);
209 static TCGv_i64 load_reg(int reg)
211 TCGv_i64 r = tcg_temp_new_i64();
212 tcg_gen_mov_i64(r, regs[reg]);
216 static TCGv_i64 load_freg32_i64(int reg)
218 TCGv_i64 r = tcg_temp_new_i64();
219 tcg_gen_shri_i64(r, fregs[reg], 32);
223 static void store_reg(int reg, TCGv_i64 v)
225 tcg_gen_mov_i64(regs[reg], v);
228 static void store_freg(int reg, TCGv_i64 v)
230 tcg_gen_mov_i64(fregs[reg], v);
233 static void store_reg32_i64(int reg, TCGv_i64 v)
235 /* 32 bit register writes keep the upper half */
236 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
239 static void store_reg32h_i64(int reg, TCGv_i64 v)
241 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
244 static void store_freg32_i64(int reg, TCGv_i64 v)
246 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
249 static void return_low128(TCGv_i64 dest)
251 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
254 static void update_psw_addr(DisasContext *s)
257 tcg_gen_movi_i64(psw_addr, s->pc);
260 static void per_branch(DisasContext *s, bool to_next)
262 #ifndef CONFIG_USER_ONLY
263 tcg_gen_movi_i64(gbea, s->pc);
265 if (s->tb->flags & FLAG_MASK_PER) {
266 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
267 gen_helper_per_branch(cpu_env, gbea, next_pc);
269 tcg_temp_free_i64(next_pc);
275 static void per_branch_cond(DisasContext *s, TCGCond cond,
276 TCGv_i64 arg1, TCGv_i64 arg2)
278 #ifndef CONFIG_USER_ONLY
279 if (s->tb->flags & FLAG_MASK_PER) {
280 TCGLabel *lab = gen_new_label();
281 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
283 tcg_gen_movi_i64(gbea, s->pc);
284 gen_helper_per_branch(cpu_env, gbea, psw_addr);
288 TCGv_i64 pc = tcg_const_i64(s->pc);
289 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
290 tcg_temp_free_i64(pc);
295 static void per_breaking_event(DisasContext *s)
297 tcg_gen_movi_i64(gbea, s->pc);
300 static void update_cc_op(DisasContext *s)
302 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
303 tcg_gen_movi_i32(cc_op, s->cc_op);
307 static void potential_page_fault(DisasContext *s)
313 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
315 return (uint64_t)cpu_lduw_code(env, pc);
318 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
320 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
323 static int get_mem_index(DisasContext *s)
325 switch (s->tb->flags & FLAG_MASK_ASC) {
326 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
328 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
330 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
338 static void gen_exception(int excp)
340 TCGv_i32 tmp = tcg_const_i32(excp);
341 gen_helper_exception(cpu_env, tmp);
342 tcg_temp_free_i32(tmp);
345 static void gen_program_exception(DisasContext *s, int code)
349 /* Remember what pgm exeption this was. */
350 tmp = tcg_const_i32(code);
351 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
352 tcg_temp_free_i32(tmp);
354 tmp = tcg_const_i32(s->ilen);
355 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
356 tcg_temp_free_i32(tmp);
364 /* Trigger exception. */
365 gen_exception(EXCP_PGM);
368 static inline void gen_illegal_opcode(DisasContext *s)
370 gen_program_exception(s, PGM_OPERATION);
373 static inline void gen_trap(DisasContext *s)
377 /* Set DXC to 0xff. */
378 t = tcg_temp_new_i32();
379 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
380 tcg_gen_ori_i32(t, t, 0xff00);
381 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
382 tcg_temp_free_i32(t);
384 gen_program_exception(s, PGM_DATA);
387 #ifndef CONFIG_USER_ONLY
388 static void check_privileged(DisasContext *s)
390 if (s->tb->flags & FLAG_MASK_PSTATE) {
391 gen_program_exception(s, PGM_PRIVILEGED);
396 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
398 TCGv_i64 tmp = tcg_temp_new_i64();
399 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
401 /* Note that d2 is limited to 20 bits, signed. If we crop negative
402 displacements early we create larger immedate addends. */
404 /* Note that addi optimizes the imm==0 case. */
406 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
407 tcg_gen_addi_i64(tmp, tmp, d2);
409 tcg_gen_addi_i64(tmp, regs[b2], d2);
411 tcg_gen_addi_i64(tmp, regs[x2], d2);
417 tcg_gen_movi_i64(tmp, d2);
420 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
426 static inline bool live_cc_data(DisasContext *s)
428 return (s->cc_op != CC_OP_DYNAMIC
429 && s->cc_op != CC_OP_STATIC
433 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
435 if (live_cc_data(s)) {
436 tcg_gen_discard_i64(cc_src);
437 tcg_gen_discard_i64(cc_dst);
438 tcg_gen_discard_i64(cc_vr);
440 s->cc_op = CC_OP_CONST0 + val;
443 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
445 if (live_cc_data(s)) {
446 tcg_gen_discard_i64(cc_src);
447 tcg_gen_discard_i64(cc_vr);
449 tcg_gen_mov_i64(cc_dst, dst);
453 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
456 if (live_cc_data(s)) {
457 tcg_gen_discard_i64(cc_vr);
459 tcg_gen_mov_i64(cc_src, src);
460 tcg_gen_mov_i64(cc_dst, dst);
464 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
465 TCGv_i64 dst, TCGv_i64 vr)
467 tcg_gen_mov_i64(cc_src, src);
468 tcg_gen_mov_i64(cc_dst, dst);
469 tcg_gen_mov_i64(cc_vr, vr);
473 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
475 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
478 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
480 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
483 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
485 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
488 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
490 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
493 /* CC value is in env->cc_op */
494 static void set_cc_static(DisasContext *s)
496 if (live_cc_data(s)) {
497 tcg_gen_discard_i64(cc_src);
498 tcg_gen_discard_i64(cc_dst);
499 tcg_gen_discard_i64(cc_vr);
501 s->cc_op = CC_OP_STATIC;
504 /* calculates cc into cc_op */
505 static void gen_op_calc_cc(DisasContext *s)
507 TCGv_i32 local_cc_op;
510 TCGV_UNUSED_I32(local_cc_op);
511 TCGV_UNUSED_I64(dummy);
514 dummy = tcg_const_i64(0);
528 local_cc_op = tcg_const_i32(s->cc_op);
544 /* s->cc_op is the cc value */
545 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
548 /* env->cc_op already is the cc value */
563 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
568 case CC_OP_LTUGTU_32:
569 case CC_OP_LTUGTU_64:
576 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
591 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
594 /* unknown operation - assume 3 arguments and cc_op in env */
595 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
601 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
602 tcg_temp_free_i32(local_cc_op);
604 if (!TCGV_IS_UNUSED_I64(dummy)) {
605 tcg_temp_free_i64(dummy);
608 /* We now have cc in cc_op as constant */
612 static bool use_exit_tb(DisasContext *s)
614 return (s->singlestep_enabled ||
615 (s->tb->cflags & CF_LAST_IO) ||
616 (s->tb->flags & FLAG_MASK_PER));
619 static bool use_goto_tb(DisasContext *s, uint64_t dest)
621 if (unlikely(use_exit_tb(s))) {
624 #ifndef CONFIG_USER_ONLY
625 return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
626 (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
632 static void account_noninline_branch(DisasContext *s, int cc_op)
634 #ifdef DEBUG_INLINE_BRANCHES
635 inline_branch_miss[cc_op]++;
639 static void account_inline_branch(DisasContext *s, int cc_op)
641 #ifdef DEBUG_INLINE_BRANCHES
642 inline_branch_hit[cc_op]++;
646 /* Table of mask values to comparison codes, given a comparison as input.
647 For such, CC=3 should not be possible. */
648 static const TCGCond ltgt_cond[16] = {
649 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
650 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
651 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
652 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
653 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
654 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
655 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
656 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
659 /* Table of mask values to comparison codes, given a logic op as input.
660 For such, only CC=0 and CC=1 should be possible. */
661 static const TCGCond nz_cond[16] = {
662 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
663 TCG_COND_NEVER, TCG_COND_NEVER,
664 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
665 TCG_COND_NE, TCG_COND_NE,
666 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
667 TCG_COND_EQ, TCG_COND_EQ,
668 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
669 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
672 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
673 details required to generate a TCG comparison. */
674 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
677 enum cc_op old_cc_op = s->cc_op;
679 if (mask == 15 || mask == 0) {
680 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
683 c->g1 = c->g2 = true;
688 /* Find the TCG condition for the mask + cc op. */
694 cond = ltgt_cond[mask];
695 if (cond == TCG_COND_NEVER) {
698 account_inline_branch(s, old_cc_op);
701 case CC_OP_LTUGTU_32:
702 case CC_OP_LTUGTU_64:
703 cond = tcg_unsigned_cond(ltgt_cond[mask]);
704 if (cond == TCG_COND_NEVER) {
707 account_inline_branch(s, old_cc_op);
711 cond = nz_cond[mask];
712 if (cond == TCG_COND_NEVER) {
715 account_inline_branch(s, old_cc_op);
730 account_inline_branch(s, old_cc_op);
745 account_inline_branch(s, old_cc_op);
749 switch (mask & 0xa) {
750 case 8: /* src == 0 -> no one bit found */
753 case 2: /* src != 0 -> one bit found */
759 account_inline_branch(s, old_cc_op);
765 case 8 | 2: /* vr == 0 */
768 case 4 | 1: /* vr != 0 */
771 case 8 | 4: /* no carry -> vr >= src */
774 case 2 | 1: /* carry -> vr < src */
780 account_inline_branch(s, old_cc_op);
785 /* Note that CC=0 is impossible; treat it as dont-care. */
787 case 2: /* zero -> op1 == op2 */
790 case 4 | 1: /* !zero -> op1 != op2 */
793 case 4: /* borrow (!carry) -> op1 < op2 */
796 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
802 account_inline_branch(s, old_cc_op);
807 /* Calculate cc value. */
812 /* Jump based on CC. We'll load up the real cond below;
813 the assignment here merely avoids a compiler warning. */
814 account_noninline_branch(s, old_cc_op);
815 old_cc_op = CC_OP_STATIC;
816 cond = TCG_COND_NEVER;
820 /* Load up the arguments of the comparison. */
822 c->g1 = c->g2 = false;
826 c->u.s32.a = tcg_temp_new_i32();
827 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
828 c->u.s32.b = tcg_const_i32(0);
831 case CC_OP_LTUGTU_32:
834 c->u.s32.a = tcg_temp_new_i32();
835 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
836 c->u.s32.b = tcg_temp_new_i32();
837 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
844 c->u.s64.b = tcg_const_i64(0);
848 case CC_OP_LTUGTU_64:
852 c->g1 = c->g2 = true;
858 c->u.s64.a = tcg_temp_new_i64();
859 c->u.s64.b = tcg_const_i64(0);
860 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
865 c->u.s32.a = tcg_temp_new_i32();
866 c->u.s32.b = tcg_temp_new_i32();
867 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
868 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
869 tcg_gen_movi_i32(c->u.s32.b, 0);
871 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
878 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
879 c->u.s64.b = tcg_const_i64(0);
891 case 0x8 | 0x4 | 0x2: /* cc != 3 */
893 c->u.s32.b = tcg_const_i32(3);
895 case 0x8 | 0x4 | 0x1: /* cc != 2 */
897 c->u.s32.b = tcg_const_i32(2);
899 case 0x8 | 0x2 | 0x1: /* cc != 1 */
901 c->u.s32.b = tcg_const_i32(1);
903 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
906 c->u.s32.a = tcg_temp_new_i32();
907 c->u.s32.b = tcg_const_i32(0);
908 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
910 case 0x8 | 0x4: /* cc < 2 */
912 c->u.s32.b = tcg_const_i32(2);
914 case 0x8: /* cc == 0 */
916 c->u.s32.b = tcg_const_i32(0);
918 case 0x4 | 0x2 | 0x1: /* cc != 0 */
920 c->u.s32.b = tcg_const_i32(0);
922 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
925 c->u.s32.a = tcg_temp_new_i32();
926 c->u.s32.b = tcg_const_i32(0);
927 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
929 case 0x4: /* cc == 1 */
931 c->u.s32.b = tcg_const_i32(1);
933 case 0x2 | 0x1: /* cc > 1 */
935 c->u.s32.b = tcg_const_i32(1);
937 case 0x2: /* cc == 2 */
939 c->u.s32.b = tcg_const_i32(2);
941 case 0x1: /* cc == 3 */
943 c->u.s32.b = tcg_const_i32(3);
946 /* CC is masked by something else: (8 >> cc) & mask. */
949 c->u.s32.a = tcg_const_i32(8);
950 c->u.s32.b = tcg_const_i32(0);
951 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
952 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
963 static void free_compare(DisasCompare *c)
967 tcg_temp_free_i64(c->u.s64.a);
969 tcg_temp_free_i32(c->u.s32.a);
974 tcg_temp_free_i64(c->u.s64.b);
976 tcg_temp_free_i32(c->u.s32.b);
981 /* ====================================================================== */
982 /* Define the insn format enumeration. */
983 #define F0(N) FMT_##N,
984 #define F1(N, X1) F0(N)
985 #define F2(N, X1, X2) F0(N)
986 #define F3(N, X1, X2, X3) F0(N)
987 #define F4(N, X1, X2, X3, X4) F0(N)
988 #define F5(N, X1, X2, X3, X4, X5) F0(N)
991 #include "insn-format.def"
1001 /* Define a structure to hold the decoded fields. We'll store each inside
1002 an array indexed by an enum. In order to conserve memory, we'll arrange
1003 for fields that do not exist at the same time to overlap, thus the "C"
1004 for compact. For checking purposes there is an "O" for original index
1005 as well that will be applied to availability bitmaps. */
1007 enum DisasFieldIndexO {
1030 enum DisasFieldIndexC {
1061 struct DisasFields {
1065 unsigned presentC:16;
1066 unsigned int presentO;
1070 /* This is the way fields are to be accessed out of DisasFields. */
1071 #define have_field(S, F) have_field1((S), FLD_O_##F)
1072 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1074 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1076 return (f->presentO >> c) & 1;
1079 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1080 enum DisasFieldIndexC c)
1082 assert(have_field1(f, o));
1086 /* Describe the layout of each field in each format. */
1087 typedef struct DisasField {
1089 unsigned int size:8;
1090 unsigned int type:2;
1091 unsigned int indexC:6;
1092 enum DisasFieldIndexO indexO:8;
1095 typedef struct DisasFormatInfo {
1096 DisasField op[NUM_C_FIELD];
1099 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1100 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1101 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1102 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1103 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1104 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1105 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1106 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1107 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1108 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1109 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1110 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1111 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1112 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1114 #define F0(N) { { } },
1115 #define F1(N, X1) { { X1 } },
1116 #define F2(N, X1, X2) { { X1, X2 } },
1117 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1118 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1119 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1121 static const DisasFormatInfo format_info[] = {
1122 #include "insn-format.def"
1140 /* Generally, we'll extract operands into this structures, operate upon
1141 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1142 of routines below for more details. */
1144 bool g_out, g_out2, g_in1, g_in2;
1145 TCGv_i64 out, out2, in1, in2;
1149 /* Instructions can place constraints on their operands, raising specification
1150 exceptions if they are violated. To make this easy to automate, each "in1",
1151 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1152 of the following, or 0. To make this easy to document, we'll put the
1153 SPEC_<name> defines next to <name>. */
1155 #define SPEC_r1_even 1
1156 #define SPEC_r2_even 2
1157 #define SPEC_r3_even 4
1158 #define SPEC_r1_f128 8
1159 #define SPEC_r2_f128 16
1161 /* Return values from translate_one, indicating the state of the TB. */
1163 /* Continue the TB. */
1165 /* We have emitted one or more goto_tb. No fixup required. */
1167 /* We are not using a goto_tb (for whatever reason), but have updated
1168 the PC (for whatever reason), so there's no need to do it again on
1171 /* We have updated the PC and CC values. */
1173 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1174 updated the PC for the next instruction to be executed. */
1176 /* We are exiting the TB to the main loop. */
1177 EXIT_PC_STALE_NOCHAIN,
1178 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1179 No following code will be executed. */
1191 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1192 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1193 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1194 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1195 void (*help_cout)(DisasContext *, DisasOps *);
1196 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1201 /* ====================================================================== */
1202 /* Miscellaneous helpers, used by several operations. */
1204 static void help_l2_shift(DisasContext *s, DisasFields *f,
1205 DisasOps *o, int mask)
1207 int b2 = get_field(f, b2);
1208 int d2 = get_field(f, d2);
1211 o->in2 = tcg_const_i64(d2 & mask);
1213 o->in2 = get_address(s, 0, b2, d2);
1214 tcg_gen_andi_i64(o->in2, o->in2, mask);
1218 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1220 if (dest == s->next_pc) {
1221 per_branch(s, true);
1224 if (use_goto_tb(s, dest)) {
1226 per_breaking_event(s);
1228 tcg_gen_movi_i64(psw_addr, dest);
1229 tcg_gen_exit_tb((uintptr_t)s->tb);
1230 return EXIT_GOTO_TB;
1232 tcg_gen_movi_i64(psw_addr, dest);
1233 per_branch(s, false);
1234 return EXIT_PC_UPDATED;
1238 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1239 bool is_imm, int imm, TCGv_i64 cdest)
1242 uint64_t dest = s->pc + 2 * imm;
1245 /* Take care of the special cases first. */
1246 if (c->cond == TCG_COND_NEVER) {
1251 if (dest == s->next_pc) {
1252 /* Branch to next. */
1253 per_branch(s, true);
1257 if (c->cond == TCG_COND_ALWAYS) {
1258 ret = help_goto_direct(s, dest);
1262 if (TCGV_IS_UNUSED_I64(cdest)) {
1263 /* E.g. bcr %r0 -> no branch. */
1267 if (c->cond == TCG_COND_ALWAYS) {
1268 tcg_gen_mov_i64(psw_addr, cdest);
1269 per_branch(s, false);
1270 ret = EXIT_PC_UPDATED;
1275 if (use_goto_tb(s, s->next_pc)) {
1276 if (is_imm && use_goto_tb(s, dest)) {
1277 /* Both exits can use goto_tb. */
1280 lab = gen_new_label();
1282 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1284 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1287 /* Branch not taken. */
1289 tcg_gen_movi_i64(psw_addr, s->next_pc);
1290 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1294 per_breaking_event(s);
1296 tcg_gen_movi_i64(psw_addr, dest);
1297 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1301 /* Fallthru can use goto_tb, but taken branch cannot. */
1302 /* Store taken branch destination before the brcond. This
1303 avoids having to allocate a new local temp to hold it.
1304 We'll overwrite this in the not taken case anyway. */
1306 tcg_gen_mov_i64(psw_addr, cdest);
1309 lab = gen_new_label();
1311 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1313 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1316 /* Branch not taken. */
1319 tcg_gen_movi_i64(psw_addr, s->next_pc);
1320 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1324 tcg_gen_movi_i64(psw_addr, dest);
1326 per_breaking_event(s);
1327 ret = EXIT_PC_UPDATED;
1330 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1331 Most commonly we're single-stepping or some other condition that
1332 disables all use of goto_tb. Just update the PC and exit. */
1334 TCGv_i64 next = tcg_const_i64(s->next_pc);
1336 cdest = tcg_const_i64(dest);
1340 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1342 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1344 TCGv_i32 t0 = tcg_temp_new_i32();
1345 TCGv_i64 t1 = tcg_temp_new_i64();
1346 TCGv_i64 z = tcg_const_i64(0);
1347 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1348 tcg_gen_extu_i32_i64(t1, t0);
1349 tcg_temp_free_i32(t0);
1350 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1351 per_branch_cond(s, TCG_COND_NE, t1, z);
1352 tcg_temp_free_i64(t1);
1353 tcg_temp_free_i64(z);
1357 tcg_temp_free_i64(cdest);
1359 tcg_temp_free_i64(next);
1361 ret = EXIT_PC_UPDATED;
1369 /* ====================================================================== */
1370 /* The operations. These perform the bulk of the work for any insn,
1371 usually after the operands have been loaded and output initialized. */
1373 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1376 z = tcg_const_i64(0);
1377 n = tcg_temp_new_i64();
1378 tcg_gen_neg_i64(n, o->in2);
1379 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1380 tcg_temp_free_i64(n);
1381 tcg_temp_free_i64(z);
1385 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1387 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1391 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1393 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1397 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1399 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1400 tcg_gen_mov_i64(o->out2, o->in2);
1404 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1406 tcg_gen_add_i64(o->out, o->in1, o->in2);
1410 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1415 tcg_gen_add_i64(o->out, o->in1, o->in2);
1417 /* The carry flag is the msb of CC, therefore the branch mask that would
1418 create that comparison is 3. Feeding the generated comparison to
1419 setcond produces the carry flag that we desire. */
1420 disas_jcc(s, &cmp, 3);
1421 carry = tcg_temp_new_i64();
1423 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1425 TCGv_i32 t = tcg_temp_new_i32();
1426 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1427 tcg_gen_extu_i32_i64(carry, t);
1428 tcg_temp_free_i32(t);
1432 tcg_gen_add_i64(o->out, o->out, carry);
1433 tcg_temp_free_i64(carry);
1437 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1439 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1443 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1445 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1449 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1451 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1452 return_low128(o->out2);
1456 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1458 tcg_gen_and_i64(o->out, o->in1, o->in2);
1462 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1464 int shift = s->insn->data & 0xff;
1465 int size = s->insn->data >> 8;
1466 uint64_t mask = ((1ull << size) - 1) << shift;
1469 tcg_gen_shli_i64(o->in2, o->in2, shift);
1470 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1471 tcg_gen_and_i64(o->out, o->in1, o->in2);
1473 /* Produce the CC from only the bits manipulated. */
1474 tcg_gen_andi_i64(cc_dst, o->out, mask);
1475 set_cc_nz_u64(s, cc_dst);
1479 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1481 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1482 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1483 tcg_gen_mov_i64(psw_addr, o->in2);
1484 per_branch(s, false);
1485 return EXIT_PC_UPDATED;
1491 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1493 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1494 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1497 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1499 int m1 = get_field(s->fields, m1);
1500 bool is_imm = have_field(s->fields, i2);
1501 int imm = is_imm ? get_field(s->fields, i2) : 0;
1504 /* BCR with R2 = 0 causes no branching */
1505 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1507 /* Perform serialization */
1508 /* FIXME: check for fast-BCR-serialization facility */
1509 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1512 /* Perform serialization */
1513 /* FIXME: perform checkpoint-synchronisation */
1514 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1519 disas_jcc(s, &c, m1);
1520 return help_branch(s, &c, is_imm, imm, o->in2);
1523 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1525 int r1 = get_field(s->fields, r1);
1526 bool is_imm = have_field(s->fields, i2);
1527 int imm = is_imm ? get_field(s->fields, i2) : 0;
1531 c.cond = TCG_COND_NE;
1536 t = tcg_temp_new_i64();
1537 tcg_gen_subi_i64(t, regs[r1], 1);
1538 store_reg32_i64(r1, t);
1539 c.u.s32.a = tcg_temp_new_i32();
1540 c.u.s32.b = tcg_const_i32(0);
1541 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1542 tcg_temp_free_i64(t);
1544 return help_branch(s, &c, is_imm, imm, o->in2);
1547 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1549 int r1 = get_field(s->fields, r1);
1550 int imm = get_field(s->fields, i2);
1554 c.cond = TCG_COND_NE;
1559 t = tcg_temp_new_i64();
1560 tcg_gen_shri_i64(t, regs[r1], 32);
1561 tcg_gen_subi_i64(t, t, 1);
1562 store_reg32h_i64(r1, t);
1563 c.u.s32.a = tcg_temp_new_i32();
1564 c.u.s32.b = tcg_const_i32(0);
1565 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1566 tcg_temp_free_i64(t);
1568 return help_branch(s, &c, 1, imm, o->in2);
1571 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1573 int r1 = get_field(s->fields, r1);
1574 bool is_imm = have_field(s->fields, i2);
1575 int imm = is_imm ? get_field(s->fields, i2) : 0;
1578 c.cond = TCG_COND_NE;
1583 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1584 c.u.s64.a = regs[r1];
1585 c.u.s64.b = tcg_const_i64(0);
1587 return help_branch(s, &c, is_imm, imm, o->in2);
1590 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1592 int r1 = get_field(s->fields, r1);
1593 int r3 = get_field(s->fields, r3);
1594 bool is_imm = have_field(s->fields, i2);
1595 int imm = is_imm ? get_field(s->fields, i2) : 0;
1599 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1604 t = tcg_temp_new_i64();
1605 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1606 c.u.s32.a = tcg_temp_new_i32();
1607 c.u.s32.b = tcg_temp_new_i32();
1608 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1609 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1610 store_reg32_i64(r1, t);
1611 tcg_temp_free_i64(t);
1613 return help_branch(s, &c, is_imm, imm, o->in2);
1616 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1618 int r1 = get_field(s->fields, r1);
1619 int r3 = get_field(s->fields, r3);
1620 bool is_imm = have_field(s->fields, i2);
1621 int imm = is_imm ? get_field(s->fields, i2) : 0;
1624 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1627 if (r1 == (r3 | 1)) {
1628 c.u.s64.b = load_reg(r3 | 1);
1631 c.u.s64.b = regs[r3 | 1];
1635 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1636 c.u.s64.a = regs[r1];
1639 return help_branch(s, &c, is_imm, imm, o->in2);
1642 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1644 int imm, m3 = get_field(s->fields, m3);
1648 c.cond = ltgt_cond[m3];
1649 if (s->insn->data) {
1650 c.cond = tcg_unsigned_cond(c.cond);
1652 c.is_64 = c.g1 = c.g2 = true;
1656 is_imm = have_field(s->fields, i4);
1658 imm = get_field(s->fields, i4);
1661 o->out = get_address(s, 0, get_field(s->fields, b4),
1662 get_field(s->fields, d4));
1665 return help_branch(s, &c, is_imm, imm, o->out);
1668 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1670 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1675 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1677 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1682 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1684 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1689 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1691 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1692 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1693 tcg_temp_free_i32(m3);
1694 gen_set_cc_nz_f32(s, o->in2);
1698 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1700 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1701 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1702 tcg_temp_free_i32(m3);
1703 gen_set_cc_nz_f64(s, o->in2);
1707 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1709 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1710 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1711 tcg_temp_free_i32(m3);
1712 gen_set_cc_nz_f128(s, o->in1, o->in2);
1716 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1718 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1719 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1720 tcg_temp_free_i32(m3);
1721 gen_set_cc_nz_f32(s, o->in2);
1725 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1727 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1728 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1729 tcg_temp_free_i32(m3);
1730 gen_set_cc_nz_f64(s, o->in2);
1734 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1736 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1737 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1738 tcg_temp_free_i32(m3);
1739 gen_set_cc_nz_f128(s, o->in1, o->in2);
1743 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1745 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1746 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1747 tcg_temp_free_i32(m3);
1748 gen_set_cc_nz_f32(s, o->in2);
1752 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1754 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1755 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1756 tcg_temp_free_i32(m3);
1757 gen_set_cc_nz_f64(s, o->in2);
1761 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1763 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1764 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1765 tcg_temp_free_i32(m3);
1766 gen_set_cc_nz_f128(s, o->in1, o->in2);
1770 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1772 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1773 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1774 tcg_temp_free_i32(m3);
1775 gen_set_cc_nz_f32(s, o->in2);
1779 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1781 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1782 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1783 tcg_temp_free_i32(m3);
1784 gen_set_cc_nz_f64(s, o->in2);
1788 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1790 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1791 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1792 tcg_temp_free_i32(m3);
1793 gen_set_cc_nz_f128(s, o->in1, o->in2);
1797 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1799 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1800 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1801 tcg_temp_free_i32(m3);
1805 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1807 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1808 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1809 tcg_temp_free_i32(m3);
1813 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1815 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1816 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1817 tcg_temp_free_i32(m3);
1818 return_low128(o->out2);
1822 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1824 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1825 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1826 tcg_temp_free_i32(m3);
1830 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1832 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1833 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1834 tcg_temp_free_i32(m3);
1838 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1840 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1841 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1842 tcg_temp_free_i32(m3);
1843 return_low128(o->out2);
1847 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1849 int r2 = get_field(s->fields, r2);
1850 TCGv_i64 len = tcg_temp_new_i64();
1852 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1854 return_low128(o->out);
1856 tcg_gen_add_i64(regs[r2], regs[r2], len);
1857 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1858 tcg_temp_free_i64(len);
1863 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1865 int l = get_field(s->fields, l1);
1870 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1871 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1874 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1875 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1878 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1879 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1882 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1883 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1886 vl = tcg_const_i32(l);
1887 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1888 tcg_temp_free_i32(vl);
1892 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1896 static ExitStatus op_clcl(DisasContext *s, DisasOps *o)
1898 int r1 = get_field(s->fields, r1);
1899 int r2 = get_field(s->fields, r2);
1902 /* r1 and r2 must be even. */
1903 if (r1 & 1 || r2 & 1) {
1904 gen_program_exception(s, PGM_SPECIFICATION);
1905 return EXIT_NORETURN;
1908 t1 = tcg_const_i32(r1);
1909 t2 = tcg_const_i32(r2);
1910 gen_helper_clcl(cc_op, cpu_env, t1, t2);
1911 tcg_temp_free_i32(t1);
1912 tcg_temp_free_i32(t2);
1917 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1919 int r1 = get_field(s->fields, r1);
1920 int r3 = get_field(s->fields, r3);
1923 /* r1 and r3 must be even. */
1924 if (r1 & 1 || r3 & 1) {
1925 gen_program_exception(s, PGM_SPECIFICATION);
1926 return EXIT_NORETURN;
1929 t1 = tcg_const_i32(r1);
1930 t3 = tcg_const_i32(r3);
1931 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
1932 tcg_temp_free_i32(t1);
1933 tcg_temp_free_i32(t3);
1938 static ExitStatus op_clclu(DisasContext *s, DisasOps *o)
1940 int r1 = get_field(s->fields, r1);
1941 int r3 = get_field(s->fields, r3);
1944 /* r1 and r3 must be even. */
1945 if (r1 & 1 || r3 & 1) {
1946 gen_program_exception(s, PGM_SPECIFICATION);
1947 return EXIT_NORETURN;
1950 t1 = tcg_const_i32(r1);
1951 t3 = tcg_const_i32(r3);
1952 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
1953 tcg_temp_free_i32(t1);
1954 tcg_temp_free_i32(t3);
1959 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1961 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1962 TCGv_i32 t1 = tcg_temp_new_i32();
1963 tcg_gen_extrl_i64_i32(t1, o->in1);
1964 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1966 tcg_temp_free_i32(t1);
1967 tcg_temp_free_i32(m3);
1971 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1973 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1975 return_low128(o->in2);
1979 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1981 TCGv_i64 t = tcg_temp_new_i64();
1982 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1983 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1984 tcg_gen_or_i64(o->out, o->out, t);
1985 tcg_temp_free_i64(t);
1989 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1991 int d2 = get_field(s->fields, d2);
1992 int b2 = get_field(s->fields, b2);
1995 /* Note that in1 = R3 (new value) and
1996 in2 = (zero-extended) R1 (expected value). */
1998 addr = get_address(s, 0, b2, d2);
1999 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2000 get_mem_index(s), s->insn->data | MO_ALIGN);
2001 tcg_temp_free_i64(addr);
2003 /* Are the memory and expected values (un)equal? Note that this setcond
2004 produces the output CC value, thus the NE sense of the test. */
2005 cc = tcg_temp_new_i64();
2006 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2007 tcg_gen_extrl_i64_i32(cc_op, cc);
2008 tcg_temp_free_i64(cc);
2014 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
2016 int r1 = get_field(s->fields, r1);
2017 int r3 = get_field(s->fields, r3);
2018 int d2 = get_field(s->fields, d2);
2019 int b2 = get_field(s->fields, b2);
2021 TCGv_i32 t_r1, t_r3;
2023 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2024 addr = get_address(s, 0, b2, d2);
2025 t_r1 = tcg_const_i32(r1);
2026 t_r3 = tcg_const_i32(r3);
2027 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2028 tcg_temp_free_i64(addr);
2029 tcg_temp_free_i32(t_r1);
2030 tcg_temp_free_i32(t_r3);
2036 static ExitStatus op_csst(DisasContext *s, DisasOps *o)
2038 int r3 = get_field(s->fields, r3);
2039 TCGv_i32 t_r3 = tcg_const_i32(r3);
2041 gen_helper_csst(cc_op, cpu_env, t_r3, o->in1, o->in2);
2042 tcg_temp_free_i32(t_r3);
2048 #ifndef CONFIG_USER_ONLY
2049 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
2051 TCGMemOp mop = s->insn->data;
2052 TCGv_i64 addr, old, cc;
2053 TCGLabel *lab = gen_new_label();
2055 /* Note that in1 = R1 (zero-extended expected value),
2056 out = R1 (original reg), out2 = R1+1 (new value). */
2058 check_privileged(s);
2059 addr = tcg_temp_new_i64();
2060 old = tcg_temp_new_i64();
2061 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2062 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2063 get_mem_index(s), mop | MO_ALIGN);
2064 tcg_temp_free_i64(addr);
2066 /* Are the memory and expected values (un)equal? */
2067 cc = tcg_temp_new_i64();
2068 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2069 tcg_gen_extrl_i64_i32(cc_op, cc);
2071 /* Write back the output now, so that it happens before the
2072 following branch, so that we don't need local temps. */
2073 if ((mop & MO_SIZE) == MO_32) {
2074 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2076 tcg_gen_mov_i64(o->out, old);
2078 tcg_temp_free_i64(old);
2080 /* If the comparison was equal, and the LSB of R2 was set,
2081 then we need to flush the TLB (for all cpus). */
2082 tcg_gen_xori_i64(cc, cc, 1);
2083 tcg_gen_and_i64(cc, cc, o->in2);
2084 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2085 tcg_temp_free_i64(cc);
2087 gen_helper_purge(cpu_env);
2094 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2096 TCGv_i64 t1 = tcg_temp_new_i64();
2097 TCGv_i32 t2 = tcg_temp_new_i32();
2098 tcg_gen_extrl_i64_i32(t2, o->in1);
2099 gen_helper_cvd(t1, t2);
2100 tcg_temp_free_i32(t2);
2101 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2102 tcg_temp_free_i64(t1);
2106 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2108 int m3 = get_field(s->fields, m3);
2109 TCGLabel *lab = gen_new_label();
2112 c = tcg_invert_cond(ltgt_cond[m3]);
2113 if (s->insn->data) {
2114 c = tcg_unsigned_cond(c);
2116 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2125 static ExitStatus op_cuXX(DisasContext *s, DisasOps *o)
2127 int m3 = get_field(s->fields, m3);
2128 int r1 = get_field(s->fields, r1);
2129 int r2 = get_field(s->fields, r2);
2130 TCGv_i32 tr1, tr2, chk;
2132 /* R1 and R2 must both be even. */
2133 if ((r1 | r2) & 1) {
2134 gen_program_exception(s, PGM_SPECIFICATION);
2135 return EXIT_NORETURN;
2137 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2141 tr1 = tcg_const_i32(r1);
2142 tr2 = tcg_const_i32(r2);
2143 chk = tcg_const_i32(m3);
2145 switch (s->insn->data) {
2147 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2150 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2153 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2156 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2159 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2162 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2165 g_assert_not_reached();
2168 tcg_temp_free_i32(tr1);
2169 tcg_temp_free_i32(tr2);
2170 tcg_temp_free_i32(chk);
2175 #ifndef CONFIG_USER_ONLY
2176 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2178 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2179 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2180 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2182 check_privileged(s);
2186 gen_helper_diag(cpu_env, r1, r3, func_code);
2188 tcg_temp_free_i32(func_code);
2189 tcg_temp_free_i32(r3);
2190 tcg_temp_free_i32(r1);
2195 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2197 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2198 return_low128(o->out);
2202 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2204 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2205 return_low128(o->out);
2209 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2211 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2212 return_low128(o->out);
2216 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2218 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2219 return_low128(o->out);
2223 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2225 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2229 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2231 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2235 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2237 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2238 return_low128(o->out2);
2242 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2244 int r2 = get_field(s->fields, r2);
2245 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2249 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2251 /* No cache information provided. */
2252 tcg_gen_movi_i64(o->out, -1);
2256 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2258 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2262 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2264 int r1 = get_field(s->fields, r1);
2265 int r2 = get_field(s->fields, r2);
2266 TCGv_i64 t = tcg_temp_new_i64();
2268 /* Note the "subsequently" in the PoO, which implies a defined result
2269 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2270 tcg_gen_shri_i64(t, psw_mask, 32);
2271 store_reg32_i64(r1, t);
2273 store_reg32_i64(r2, psw_mask);
2276 tcg_temp_free_i64(t);
2280 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2282 int r1 = get_field(s->fields, r1);
2286 /* Nested EXECUTE is not allowed. */
2287 if (unlikely(s->ex_value)) {
2288 gen_program_exception(s, PGM_EXECUTE);
2289 return EXIT_NORETURN;
2296 v1 = tcg_const_i64(0);
2301 ilen = tcg_const_i32(s->ilen);
2302 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2303 tcg_temp_free_i32(ilen);
2306 tcg_temp_free_i64(v1);
2309 return EXIT_PC_CC_UPDATED;
2312 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2314 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2315 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2316 tcg_temp_free_i32(m3);
2320 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2322 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2323 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2324 tcg_temp_free_i32(m3);
2328 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2330 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2331 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2332 return_low128(o->out2);
2333 tcg_temp_free_i32(m3);
2337 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2339 /* We'll use the original input for cc computation, since we get to
2340 compare that against 0, which ought to be better than comparing
2341 the real output against 64. It also lets cc_dst be a convenient
2342 temporary during our computation. */
2343 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2345 /* R1 = IN ? CLZ(IN) : 64. */
2346 tcg_gen_clzi_i64(o->out, o->in2, 64);
2348 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2349 value by 64, which is undefined. But since the shift is 64 iff the
2350 input is zero, we still get the correct result after and'ing. */
2351 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2352 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2353 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2357 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2359 int m3 = get_field(s->fields, m3);
2360 int pos, len, base = s->insn->data;
2361 TCGv_i64 tmp = tcg_temp_new_i64();
2366 /* Effectively a 32-bit load. */
2367 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2374 /* Effectively a 16-bit load. */
2375 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2383 /* Effectively an 8-bit load. */
2384 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2389 pos = base + ctz32(m3) * 8;
2390 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2391 ccm = ((1ull << len) - 1) << pos;
2395 /* This is going to be a sequence of loads and inserts. */
2396 pos = base + 32 - 8;
2400 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2401 tcg_gen_addi_i64(o->in2, o->in2, 1);
2402 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2405 m3 = (m3 << 1) & 0xf;
2411 tcg_gen_movi_i64(tmp, ccm);
2412 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2413 tcg_temp_free_i64(tmp);
2417 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2419 int shift = s->insn->data & 0xff;
2420 int size = s->insn->data >> 8;
2421 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2425 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2430 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2432 t1 = tcg_temp_new_i64();
2433 tcg_gen_shli_i64(t1, psw_mask, 20);
2434 tcg_gen_shri_i64(t1, t1, 36);
2435 tcg_gen_or_i64(o->out, o->out, t1);
2437 tcg_gen_extu_i32_i64(t1, cc_op);
2438 tcg_gen_shli_i64(t1, t1, 28);
2439 tcg_gen_or_i64(o->out, o->out, t1);
2440 tcg_temp_free_i64(t1);
2444 #ifndef CONFIG_USER_ONLY
2445 static ExitStatus op_idte(DisasContext *s, DisasOps *o)
2449 check_privileged(s);
2450 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2451 m4 = tcg_const_i32(get_field(s->fields, m4));
2453 m4 = tcg_const_i32(0);
2455 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2456 tcg_temp_free_i32(m4);
2460 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2464 check_privileged(s);
2465 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2466 m4 = tcg_const_i32(get_field(s->fields, m4));
2468 m4 = tcg_const_i32(0);
2470 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2471 tcg_temp_free_i32(m4);
2475 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2477 check_privileged(s);
2478 gen_helper_iske(o->out, cpu_env, o->in2);
2483 static ExitStatus op_keb(DisasContext *s, DisasOps *o)
2485 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2490 static ExitStatus op_kdb(DisasContext *s, DisasOps *o)
2492 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2497 static ExitStatus op_kxb(DisasContext *s, DisasOps *o)
2499 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2504 static ExitStatus op_laa(DisasContext *s, DisasOps *o)
2506 /* The real output is indeed the original value in memory;
2507 recompute the addition for the computation of CC. */
2508 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2509 s->insn->data | MO_ALIGN);
2510 /* However, we need to recompute the addition for setting CC. */
2511 tcg_gen_add_i64(o->out, o->in1, o->in2);
2515 static ExitStatus op_lan(DisasContext *s, DisasOps *o)
2517 /* The real output is indeed the original value in memory;
2518 recompute the addition for the computation of CC. */
2519 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2520 s->insn->data | MO_ALIGN);
2521 /* However, we need to recompute the operation for setting CC. */
2522 tcg_gen_and_i64(o->out, o->in1, o->in2);
2526 static ExitStatus op_lao(DisasContext *s, DisasOps *o)
2528 /* The real output is indeed the original value in memory;
2529 recompute the addition for the computation of CC. */
2530 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2531 s->insn->data | MO_ALIGN);
2532 /* However, we need to recompute the operation for setting CC. */
2533 tcg_gen_or_i64(o->out, o->in1, o->in2);
2537 static ExitStatus op_lax(DisasContext *s, DisasOps *o)
2539 /* The real output is indeed the original value in memory;
2540 recompute the addition for the computation of CC. */
2541 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2542 s->insn->data | MO_ALIGN);
2543 /* However, we need to recompute the operation for setting CC. */
2544 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2548 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2550 gen_helper_ldeb(o->out, cpu_env, o->in2);
2554 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2556 gen_helper_ledb(o->out, cpu_env, o->in2);
2560 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2562 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2566 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2568 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2572 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2574 gen_helper_lxdb(o->out, cpu_env, o->in2);
2575 return_low128(o->out2);
2579 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2581 gen_helper_lxeb(o->out, cpu_env, o->in2);
2582 return_low128(o->out2);
2586 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2588 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2592 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2594 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2598 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2600 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2604 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2606 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2610 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2612 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2616 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2618 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2622 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2624 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2628 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2630 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2634 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2636 TCGLabel *lab = gen_new_label();
2637 store_reg32_i64(get_field(s->fields, r1), o->in2);
2638 /* The value is stored even in case of trap. */
2639 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2645 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2647 TCGLabel *lab = gen_new_label();
2648 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2649 /* The value is stored even in case of trap. */
2650 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2656 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2658 TCGLabel *lab = gen_new_label();
2659 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2660 /* The value is stored even in case of trap. */
2661 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2667 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2669 TCGLabel *lab = gen_new_label();
2670 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2671 /* The value is stored even in case of trap. */
2672 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2678 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2680 TCGLabel *lab = gen_new_label();
2681 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2682 /* The value is stored even in case of trap. */
2683 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2689 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2693 disas_jcc(s, &c, get_field(s->fields, m3));
2696 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2700 TCGv_i32 t32 = tcg_temp_new_i32();
2703 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2706 t = tcg_temp_new_i64();
2707 tcg_gen_extu_i32_i64(t, t32);
2708 tcg_temp_free_i32(t32);
2710 z = tcg_const_i64(0);
2711 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2712 tcg_temp_free_i64(t);
2713 tcg_temp_free_i64(z);
2719 #ifndef CONFIG_USER_ONLY
2720 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2722 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2723 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2724 check_privileged(s);
2725 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2726 tcg_temp_free_i32(r1);
2727 tcg_temp_free_i32(r3);
2731 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2733 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2734 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2735 check_privileged(s);
2736 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2737 tcg_temp_free_i32(r1);
2738 tcg_temp_free_i32(r3);
2742 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2744 check_privileged(s);
2745 gen_helper_lra(o->out, cpu_env, o->in2);
2750 static ExitStatus op_lpp(DisasContext *s, DisasOps *o)
2752 check_privileged(s);
2754 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2758 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2762 check_privileged(s);
2763 per_breaking_event(s);
2765 t1 = tcg_temp_new_i64();
2766 t2 = tcg_temp_new_i64();
2767 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2768 tcg_gen_addi_i64(o->in2, o->in2, 4);
2769 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2770 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2771 tcg_gen_shli_i64(t1, t1, 32);
2772 gen_helper_load_psw(cpu_env, t1, t2);
2773 tcg_temp_free_i64(t1);
2774 tcg_temp_free_i64(t2);
2775 return EXIT_NORETURN;
2778 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2782 check_privileged(s);
2783 per_breaking_event(s);
2785 t1 = tcg_temp_new_i64();
2786 t2 = tcg_temp_new_i64();
2787 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2788 tcg_gen_addi_i64(o->in2, o->in2, 8);
2789 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2790 gen_helper_load_psw(cpu_env, t1, t2);
2791 tcg_temp_free_i64(t1);
2792 tcg_temp_free_i64(t2);
2793 return EXIT_NORETURN;
2797 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2799 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2800 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2801 gen_helper_lam(cpu_env, r1, o->in2, r3);
2802 tcg_temp_free_i32(r1);
2803 tcg_temp_free_i32(r3);
2807 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2809 int r1 = get_field(s->fields, r1);
2810 int r3 = get_field(s->fields, r3);
2813 /* Only one register to read. */
2814 t1 = tcg_temp_new_i64();
2815 if (unlikely(r1 == r3)) {
2816 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2817 store_reg32_i64(r1, t1);
2822 /* First load the values of the first and last registers to trigger
2823 possible page faults. */
2824 t2 = tcg_temp_new_i64();
2825 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2826 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2827 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2828 store_reg32_i64(r1, t1);
2829 store_reg32_i64(r3, t2);
2831 /* Only two registers to read. */
2832 if (((r1 + 1) & 15) == r3) {
2838 /* Then load the remaining registers. Page fault can't occur. */
2840 tcg_gen_movi_i64(t2, 4);
2843 tcg_gen_add_i64(o->in2, o->in2, t2);
2844 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2845 store_reg32_i64(r1, t1);
2853 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2855 int r1 = get_field(s->fields, r1);
2856 int r3 = get_field(s->fields, r3);
2859 /* Only one register to read. */
2860 t1 = tcg_temp_new_i64();
2861 if (unlikely(r1 == r3)) {
2862 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2863 store_reg32h_i64(r1, t1);
2868 /* First load the values of the first and last registers to trigger
2869 possible page faults. */
2870 t2 = tcg_temp_new_i64();
2871 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2872 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2873 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2874 store_reg32h_i64(r1, t1);
2875 store_reg32h_i64(r3, t2);
2877 /* Only two registers to read. */
2878 if (((r1 + 1) & 15) == r3) {
2884 /* Then load the remaining registers. Page fault can't occur. */
2886 tcg_gen_movi_i64(t2, 4);
2889 tcg_gen_add_i64(o->in2, o->in2, t2);
2890 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2891 store_reg32h_i64(r1, t1);
2899 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2901 int r1 = get_field(s->fields, r1);
2902 int r3 = get_field(s->fields, r3);
2905 /* Only one register to read. */
2906 if (unlikely(r1 == r3)) {
2907 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2911 /* First load the values of the first and last registers to trigger
2912 possible page faults. */
2913 t1 = tcg_temp_new_i64();
2914 t2 = tcg_temp_new_i64();
2915 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2916 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2917 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2918 tcg_gen_mov_i64(regs[r1], t1);
2921 /* Only two registers to read. */
2922 if (((r1 + 1) & 15) == r3) {
2927 /* Then load the remaining registers. Page fault can't occur. */
2929 tcg_gen_movi_i64(t1, 8);
2932 tcg_gen_add_i64(o->in2, o->in2, t1);
2933 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2940 static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
2943 TCGMemOp mop = s->insn->data;
2945 /* In a parallel context, stop the world and single step. */
2946 if (parallel_cpus) {
2947 potential_page_fault(s);
2948 gen_exception(EXCP_ATOMIC);
2949 return EXIT_NORETURN;
2952 /* In a serial context, perform the two loads ... */
2953 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
2954 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2955 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
2956 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
2957 tcg_temp_free_i64(a1);
2958 tcg_temp_free_i64(a2);
2960 /* ... and indicate that we performed them while interlocked. */
2961 gen_op_movi_cc(s, 0);
2965 static ExitStatus op_lpq(DisasContext *s, DisasOps *o)
2967 gen_helper_lpq(o->out, cpu_env, o->in2);
2968 return_low128(o->out2);
2972 #ifndef CONFIG_USER_ONLY
2973 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2975 check_privileged(s);
2976 potential_page_fault(s);
2977 gen_helper_lura(o->out, cpu_env, o->in2);
2981 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2983 check_privileged(s);
2984 potential_page_fault(s);
2985 gen_helper_lurag(o->out, cpu_env, o->in2);
2990 static ExitStatus op_lzrb(DisasContext *s, DisasOps *o)
2992 tcg_gen_andi_i64(o->out, o->in2, -256);
2996 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2999 o->g_out = o->g_in2;
3000 TCGV_UNUSED_I64(o->in2);
3005 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
3007 int b2 = get_field(s->fields, b2);
3008 TCGv ar1 = tcg_temp_new_i64();
3011 o->g_out = o->g_in2;
3012 TCGV_UNUSED_I64(o->in2);
3015 switch (s->tb->flags & FLAG_MASK_ASC) {
3016 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3017 tcg_gen_movi_i64(ar1, 0);
3019 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3020 tcg_gen_movi_i64(ar1, 1);
3022 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3024 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3026 tcg_gen_movi_i64(ar1, 0);
3029 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3030 tcg_gen_movi_i64(ar1, 2);
3034 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3035 tcg_temp_free_i64(ar1);
3040 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
3044 o->g_out = o->g_in1;
3045 o->g_out2 = o->g_in2;
3046 TCGV_UNUSED_I64(o->in1);
3047 TCGV_UNUSED_I64(o->in2);
3048 o->g_in1 = o->g_in2 = false;
3052 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
3054 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3055 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3056 tcg_temp_free_i32(l);
3060 static ExitStatus op_mvcin(DisasContext *s, DisasOps *o)
3062 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3063 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3064 tcg_temp_free_i32(l);
3068 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
3070 int r1 = get_field(s->fields, r1);
3071 int r2 = get_field(s->fields, r2);
3074 /* r1 and r2 must be even. */
3075 if (r1 & 1 || r2 & 1) {
3076 gen_program_exception(s, PGM_SPECIFICATION);
3077 return EXIT_NORETURN;
3080 t1 = tcg_const_i32(r1);
3081 t2 = tcg_const_i32(r2);
3082 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3083 tcg_temp_free_i32(t1);
3084 tcg_temp_free_i32(t2);
3089 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
3091 int r1 = get_field(s->fields, r1);
3092 int r3 = get_field(s->fields, r3);
3095 /* r1 and r3 must be even. */
3096 if (r1 & 1 || r3 & 1) {
3097 gen_program_exception(s, PGM_SPECIFICATION);
3098 return EXIT_NORETURN;
3101 t1 = tcg_const_i32(r1);
3102 t3 = tcg_const_i32(r3);
3103 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3104 tcg_temp_free_i32(t1);
3105 tcg_temp_free_i32(t3);
3110 static ExitStatus op_mvclu(DisasContext *s, DisasOps *o)
3112 int r1 = get_field(s->fields, r1);
3113 int r3 = get_field(s->fields, r3);
3116 /* r1 and r3 must be even. */
3117 if (r1 & 1 || r3 & 1) {
3118 gen_program_exception(s, PGM_SPECIFICATION);
3119 return EXIT_NORETURN;
3122 t1 = tcg_const_i32(r1);
3123 t3 = tcg_const_i32(r3);
3124 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3125 tcg_temp_free_i32(t1);
3126 tcg_temp_free_i32(t3);
3131 static ExitStatus op_mvcos(DisasContext *s, DisasOps *o)
3133 int r3 = get_field(s->fields, r3);
3134 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3139 #ifndef CONFIG_USER_ONLY
3140 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
3142 int r1 = get_field(s->fields, l1);
3143 check_privileged(s);
3144 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3149 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
3151 int r1 = get_field(s->fields, l1);
3152 check_privileged(s);
3153 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3159 static ExitStatus op_mvn(DisasContext *s, DisasOps *o)
3161 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3162 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3163 tcg_temp_free_i32(l);
3167 static ExitStatus op_mvo(DisasContext *s, DisasOps *o)
3169 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3170 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3171 tcg_temp_free_i32(l);
3175 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
3177 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3182 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
3184 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3186 return_low128(o->in2);
3190 static ExitStatus op_mvz(DisasContext *s, DisasOps *o)
3192 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3193 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3194 tcg_temp_free_i32(l);
3198 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
3200 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3204 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
3206 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3210 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
3212 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3216 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
3218 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3222 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
3224 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3228 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
3230 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3231 return_low128(o->out2);
3235 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
3237 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3238 return_low128(o->out2);
3242 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
3244 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3245 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3246 tcg_temp_free_i64(r3);
3250 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
3252 int r3 = get_field(s->fields, r3);
3253 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3257 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
3259 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3260 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3261 tcg_temp_free_i64(r3);
3265 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
3267 int r3 = get_field(s->fields, r3);
3268 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3272 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3275 z = tcg_const_i64(0);
3276 n = tcg_temp_new_i64();
3277 tcg_gen_neg_i64(n, o->in2);
3278 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3279 tcg_temp_free_i64(n);
3280 tcg_temp_free_i64(z);
3284 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
3286 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3290 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
3292 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3296 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
3298 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3299 tcg_gen_mov_i64(o->out2, o->in2);
3303 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3305 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3306 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3307 tcg_temp_free_i32(l);
3312 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3314 tcg_gen_neg_i64(o->out, o->in2);
3318 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3320 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3324 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3326 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3330 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3332 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3333 tcg_gen_mov_i64(o->out2, o->in2);
3337 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3339 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3340 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3341 tcg_temp_free_i32(l);
3346 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3348 tcg_gen_or_i64(o->out, o->in1, o->in2);
3352 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3354 int shift = s->insn->data & 0xff;
3355 int size = s->insn->data >> 8;
3356 uint64_t mask = ((1ull << size) - 1) << shift;
3359 tcg_gen_shli_i64(o->in2, o->in2, shift);
3360 tcg_gen_or_i64(o->out, o->in1, o->in2);
3362 /* Produce the CC from only the bits manipulated. */
3363 tcg_gen_andi_i64(cc_dst, o->out, mask);
3364 set_cc_nz_u64(s, cc_dst);
3368 static ExitStatus op_pack(DisasContext *s, DisasOps *o)
3370 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3371 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3372 tcg_temp_free_i32(l);
3376 static ExitStatus op_pka(DisasContext *s, DisasOps *o)
3378 int l2 = get_field(s->fields, l2) + 1;
3381 /* The length must not exceed 32 bytes. */
3383 gen_program_exception(s, PGM_SPECIFICATION);
3384 return EXIT_NORETURN;
3386 l = tcg_const_i32(l2);
3387 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3388 tcg_temp_free_i32(l);
3392 static ExitStatus op_pku(DisasContext *s, DisasOps *o)
3394 int l2 = get_field(s->fields, l2) + 1;
3397 /* The length must be even and should not exceed 64 bytes. */
3398 if ((l2 & 1) || (l2 > 64)) {
3399 gen_program_exception(s, PGM_SPECIFICATION);
3400 return EXIT_NORETURN;
3402 l = tcg_const_i32(l2);
3403 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3404 tcg_temp_free_i32(l);
3408 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3410 gen_helper_popcnt(o->out, o->in2);
3414 #ifndef CONFIG_USER_ONLY
3415 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3417 check_privileged(s);
3418 gen_helper_ptlb(cpu_env);
3423 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3425 int i3 = get_field(s->fields, i3);
3426 int i4 = get_field(s->fields, i4);
3427 int i5 = get_field(s->fields, i5);
3428 int do_zero = i4 & 0x80;
3429 uint64_t mask, imask, pmask;
3432 /* Adjust the arguments for the specific insn. */
3433 switch (s->fields->op2) {
3434 case 0x55: /* risbg */
3439 case 0x5d: /* risbhg */
3442 pmask = 0xffffffff00000000ull;
3444 case 0x51: /* risblg */
3447 pmask = 0x00000000ffffffffull;
3453 /* MASK is the set of bits to be inserted from R2.
3454 Take care for I3/I4 wraparound. */
3457 mask ^= pmask >> i4 >> 1;
3459 mask |= ~(pmask >> i4 >> 1);
3463 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3464 insns, we need to keep the other half of the register. */
3465 imask = ~mask | ~pmask;
3467 if (s->fields->op2 == 0x55) {
3477 if (s->fields->op2 == 0x5d) {
3481 /* In some cases we can implement this with extract. */
3482 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3483 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3487 /* In some cases we can implement this with deposit. */
3488 if (len > 0 && (imask == 0 || ~mask == imask)) {
3489 /* Note that we rotate the bits to be inserted to the lsb, not to
3490 the position as described in the PoO. */
3491 rot = (rot - pos) & 63;
3496 /* Rotate the input as necessary. */
3497 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3499 /* Insert the selected bits into the output. */
3502 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3504 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3506 } else if (imask == 0) {
3507 tcg_gen_andi_i64(o->out, o->in2, mask);
3509 tcg_gen_andi_i64(o->in2, o->in2, mask);
3510 tcg_gen_andi_i64(o->out, o->out, imask);
3511 tcg_gen_or_i64(o->out, o->out, o->in2);
3516 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3518 int i3 = get_field(s->fields, i3);
3519 int i4 = get_field(s->fields, i4);
3520 int i5 = get_field(s->fields, i5);
3523 /* If this is a test-only form, arrange to discard the result. */
3525 o->out = tcg_temp_new_i64();
3533 /* MASK is the set of bits to be operated on from R2.
3534 Take care for I3/I4 wraparound. */
3537 mask ^= ~0ull >> i4 >> 1;
3539 mask |= ~(~0ull >> i4 >> 1);
3542 /* Rotate the input as necessary. */
3543 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3546 switch (s->fields->op2) {
3547 case 0x55: /* AND */
3548 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3549 tcg_gen_and_i64(o->out, o->out, o->in2);
3552 tcg_gen_andi_i64(o->in2, o->in2, mask);
3553 tcg_gen_or_i64(o->out, o->out, o->in2);
3555 case 0x57: /* XOR */
3556 tcg_gen_andi_i64(o->in2, o->in2, mask);
3557 tcg_gen_xor_i64(o->out, o->out, o->in2);
3564 tcg_gen_andi_i64(cc_dst, o->out, mask);
3565 set_cc_nz_u64(s, cc_dst);
3569 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3571 tcg_gen_bswap16_i64(o->out, o->in2);
3575 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3577 tcg_gen_bswap32_i64(o->out, o->in2);
3581 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3583 tcg_gen_bswap64_i64(o->out, o->in2);
3587 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3589 TCGv_i32 t1 = tcg_temp_new_i32();
3590 TCGv_i32 t2 = tcg_temp_new_i32();
3591 TCGv_i32 to = tcg_temp_new_i32();
3592 tcg_gen_extrl_i64_i32(t1, o->in1);
3593 tcg_gen_extrl_i64_i32(t2, o->in2);
3594 tcg_gen_rotl_i32(to, t1, t2);
3595 tcg_gen_extu_i32_i64(o->out, to);
3596 tcg_temp_free_i32(t1);
3597 tcg_temp_free_i32(t2);
3598 tcg_temp_free_i32(to);
3602 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3604 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3608 #ifndef CONFIG_USER_ONLY
3609 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3611 check_privileged(s);
3612 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3617 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3619 check_privileged(s);
3620 gen_helper_sacf(cpu_env, o->in2);
3621 /* Addressing mode has changed, so end the block. */
3622 return EXIT_PC_STALE;
3626 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3628 int sam = s->insn->data;
3644 /* Bizarre but true, we check the address of the current insn for the
3645 specification exception, not the next to be executed. Thus the PoO
3646 documents that Bad Things Happen two bytes before the end. */
3647 if (s->pc & ~mask) {
3648 gen_program_exception(s, PGM_SPECIFICATION);
3649 return EXIT_NORETURN;
3653 tsam = tcg_const_i64(sam);
3654 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3655 tcg_temp_free_i64(tsam);
3657 /* Always exit the TB, since we (may have) changed execution mode. */
3658 return EXIT_PC_STALE;
3661 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3663 int r1 = get_field(s->fields, r1);
3664 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3668 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3670 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3674 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3676 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3680 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3682 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3683 return_low128(o->out2);
3687 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3689 gen_helper_sqeb(o->out, cpu_env, o->in2);
3693 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3695 gen_helper_sqdb(o->out, cpu_env, o->in2);
3699 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3701 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3702 return_low128(o->out2);
3706 #ifndef CONFIG_USER_ONLY
3707 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3709 check_privileged(s);
3710 potential_page_fault(s);
3711 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3716 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3718 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3719 check_privileged(s);
3720 potential_page_fault(s);
3721 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3723 tcg_temp_free_i32(r1);
3728 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3735 disas_jcc(s, &c, get_field(s->fields, m3));
3737 /* We want to store when the condition is fulfilled, so branch
3738 out when it's not */
3739 c.cond = tcg_invert_cond(c.cond);
3741 lab = gen_new_label();
3743 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3745 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3749 r1 = get_field(s->fields, r1);
3750 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3751 switch (s->insn->data) {
3753 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3756 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3758 case 2: /* STOCFH */
3759 h = tcg_temp_new_i64();
3760 tcg_gen_shri_i64(h, regs[r1], 32);
3761 tcg_gen_qemu_st32(h, a, get_mem_index(s));
3762 tcg_temp_free_i64(h);
3765 g_assert_not_reached();
3767 tcg_temp_free_i64(a);
3773 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3775 uint64_t sign = 1ull << s->insn->data;
3776 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3777 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3778 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3779 /* The arithmetic left shift is curious in that it does not affect
3780 the sign bit. Copy that over from the source unchanged. */
3781 tcg_gen_andi_i64(o->out, o->out, ~sign);
3782 tcg_gen_andi_i64(o->in1, o->in1, sign);
3783 tcg_gen_or_i64(o->out, o->out, o->in1);
3787 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3789 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3793 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3795 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3799 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3801 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3805 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3807 gen_helper_sfpc(cpu_env, o->in2);
3811 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3813 gen_helper_sfas(cpu_env, o->in2);
3817 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3819 int b2 = get_field(s->fields, b2);
3820 int d2 = get_field(s->fields, d2);
3821 TCGv_i64 t1 = tcg_temp_new_i64();
3822 TCGv_i64 t2 = tcg_temp_new_i64();
3825 switch (s->fields->op2) {
3826 case 0x99: /* SRNM */
3829 case 0xb8: /* SRNMB */
3832 case 0xb9: /* SRNMT */
3838 mask = (1 << len) - 1;
3840 /* Insert the value into the appropriate field of the FPC. */
3842 tcg_gen_movi_i64(t1, d2 & mask);
3844 tcg_gen_addi_i64(t1, regs[b2], d2);
3845 tcg_gen_andi_i64(t1, t1, mask);
3847 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3848 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3849 tcg_temp_free_i64(t1);
3851 /* Then install the new FPC to set the rounding mode in fpu_status. */
3852 gen_helper_sfpc(cpu_env, t2);
3853 tcg_temp_free_i64(t2);
3857 #ifndef CONFIG_USER_ONLY
3858 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3860 check_privileged(s);
3861 tcg_gen_shri_i64(o->in2, o->in2, 4);
3862 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
3866 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3868 check_privileged(s);
3869 gen_helper_sske(cpu_env, o->in1, o->in2);
3873 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3875 check_privileged(s);
3876 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3877 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3878 return EXIT_PC_STALE_NOCHAIN;
3881 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3883 check_privileged(s);
3884 /* ??? Surely cpu address != cpu number. In any case the previous
3885 version of this stored more than the required half-word, so it
3886 is unlikely this has ever been tested. */
3887 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3891 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3893 gen_helper_stck(o->out, cpu_env);
3894 /* ??? We don't implement clock states. */
3895 gen_op_movi_cc(s, 0);
3899 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3901 TCGv_i64 c1 = tcg_temp_new_i64();
3902 TCGv_i64 c2 = tcg_temp_new_i64();
3903 gen_helper_stck(c1, cpu_env);
3904 /* Shift the 64-bit value into its place as a zero-extended
3905 104-bit value. Note that "bit positions 64-103 are always
3906 non-zero so that they compare differently to STCK"; we set
3907 the least significant bit to 1. */
3908 tcg_gen_shli_i64(c2, c1, 56);
3909 tcg_gen_shri_i64(c1, c1, 8);
3910 tcg_gen_ori_i64(c2, c2, 0x10000);
3911 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3912 tcg_gen_addi_i64(o->in2, o->in2, 8);
3913 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3914 tcg_temp_free_i64(c1);
3915 tcg_temp_free_i64(c2);
3916 /* ??? We don't implement clock states. */
3917 gen_op_movi_cc(s, 0);
3921 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3923 check_privileged(s);
3924 gen_helper_sckc(cpu_env, o->in2);
3928 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3930 check_privileged(s);
3931 gen_helper_stckc(o->out, cpu_env);
3935 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3937 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3938 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3939 check_privileged(s);
3940 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3941 tcg_temp_free_i32(r1);
3942 tcg_temp_free_i32(r3);
3946 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3948 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3949 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3950 check_privileged(s);
3951 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3952 tcg_temp_free_i32(r1);
3953 tcg_temp_free_i32(r3);
3957 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3959 check_privileged(s);
3960 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
3961 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
3965 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3967 check_privileged(s);
3968 gen_helper_spt(cpu_env, o->in2);
3972 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3974 check_privileged(s);
3975 gen_helper_stfl(cpu_env);
3979 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3981 check_privileged(s);
3982 gen_helper_stpt(o->out, cpu_env);
3986 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3988 check_privileged(s);
3989 potential_page_fault(s);
3990 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3995 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3997 check_privileged(s);
3998 gen_helper_spx(cpu_env, o->in2);
4002 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
4004 check_privileged(s);
4005 potential_page_fault(s);
4006 gen_helper_xsch(cpu_env, regs[1]);
4011 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
4013 check_privileged(s);
4014 potential_page_fault(s);
4015 gen_helper_csch(cpu_env, regs[1]);
4020 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
4022 check_privileged(s);
4023 potential_page_fault(s);
4024 gen_helper_hsch(cpu_env, regs[1]);
4029 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
4031 check_privileged(s);
4032 potential_page_fault(s);
4033 gen_helper_msch(cpu_env, regs[1], o->in2);
4038 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
4040 check_privileged(s);
4041 potential_page_fault(s);
4042 gen_helper_rchp(cpu_env, regs[1]);
4047 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
4049 check_privileged(s);
4050 potential_page_fault(s);
4051 gen_helper_rsch(cpu_env, regs[1]);
4056 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
4058 check_privileged(s);
4059 potential_page_fault(s);
4060 gen_helper_ssch(cpu_env, regs[1], o->in2);
4065 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
4067 check_privileged(s);
4068 potential_page_fault(s);
4069 gen_helper_stsch(cpu_env, regs[1], o->in2);
4074 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
4076 check_privileged(s);
4077 potential_page_fault(s);
4078 gen_helper_tsch(cpu_env, regs[1], o->in2);
4083 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
4085 check_privileged(s);
4086 potential_page_fault(s);
4087 gen_helper_chsc(cpu_env, o->in2);
4092 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
4094 check_privileged(s);
4095 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4096 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4100 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
4102 uint64_t i2 = get_field(s->fields, i2);
4105 check_privileged(s);
4107 /* It is important to do what the instruction name says: STORE THEN.
4108 If we let the output hook perform the store then if we fault and
4109 restart, we'll have the wrong SYSTEM MASK in place. */
4110 t = tcg_temp_new_i64();
4111 tcg_gen_shri_i64(t, psw_mask, 56);
4112 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4113 tcg_temp_free_i64(t);
4115 if (s->fields->op == 0xac) {
4116 tcg_gen_andi_i64(psw_mask, psw_mask,
4117 (i2 << 56) | 0x00ffffffffffffffull);
4119 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4122 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4123 return EXIT_PC_STALE_NOCHAIN;
4126 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
4128 check_privileged(s);
4129 potential_page_fault(s);
4130 gen_helper_stura(cpu_env, o->in2, o->in1);
4134 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
4136 check_privileged(s);
4137 potential_page_fault(s);
4138 gen_helper_sturg(cpu_env, o->in2, o->in1);
4143 static ExitStatus op_stfle(DisasContext *s, DisasOps *o)
4145 potential_page_fault(s);
4146 gen_helper_stfle(cc_op, cpu_env, o->in2);
4151 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
4153 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4157 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
4159 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4163 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
4165 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4169 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
4171 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4175 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
4177 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4178 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4179 gen_helper_stam(cpu_env, r1, o->in2, r3);
4180 tcg_temp_free_i32(r1);
4181 tcg_temp_free_i32(r3);
4185 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
4187 int m3 = get_field(s->fields, m3);
4188 int pos, base = s->insn->data;
4189 TCGv_i64 tmp = tcg_temp_new_i64();
4191 pos = base + ctz32(m3) * 8;
4194 /* Effectively a 32-bit store. */
4195 tcg_gen_shri_i64(tmp, o->in1, pos);
4196 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4202 /* Effectively a 16-bit store. */
4203 tcg_gen_shri_i64(tmp, o->in1, pos);
4204 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4211 /* Effectively an 8-bit store. */
4212 tcg_gen_shri_i64(tmp, o->in1, pos);
4213 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4217 /* This is going to be a sequence of shifts and stores. */
4218 pos = base + 32 - 8;
4221 tcg_gen_shri_i64(tmp, o->in1, pos);
4222 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4223 tcg_gen_addi_i64(o->in2, o->in2, 1);
4225 m3 = (m3 << 1) & 0xf;
4230 tcg_temp_free_i64(tmp);
4234 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
4236 int r1 = get_field(s->fields, r1);
4237 int r3 = get_field(s->fields, r3);
4238 int size = s->insn->data;
4239 TCGv_i64 tsize = tcg_const_i64(size);
4243 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4245 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4250 tcg_gen_add_i64(o->in2, o->in2, tsize);
4254 tcg_temp_free_i64(tsize);
4258 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
4260 int r1 = get_field(s->fields, r1);
4261 int r3 = get_field(s->fields, r3);
4262 TCGv_i64 t = tcg_temp_new_i64();
4263 TCGv_i64 t4 = tcg_const_i64(4);
4264 TCGv_i64 t32 = tcg_const_i64(32);
4267 tcg_gen_shl_i64(t, regs[r1], t32);
4268 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4272 tcg_gen_add_i64(o->in2, o->in2, t4);
4276 tcg_temp_free_i64(t);
4277 tcg_temp_free_i64(t4);
4278 tcg_temp_free_i64(t32);
4282 static ExitStatus op_stpq(DisasContext *s, DisasOps *o)
4284 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4288 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
4290 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4291 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4293 gen_helper_srst(cpu_env, r1, r2);
4295 tcg_temp_free_i32(r1);
4296 tcg_temp_free_i32(r2);
4301 static ExitStatus op_srstu(DisasContext *s, DisasOps *o)
4303 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4304 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4306 gen_helper_srstu(cpu_env, r1, r2);
4308 tcg_temp_free_i32(r1);
4309 tcg_temp_free_i32(r2);
4314 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
4316 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4320 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
4325 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4327 /* The !borrow flag is the msb of CC. Since we want the inverse of
4328 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4329 disas_jcc(s, &cmp, 8 | 4);
4330 borrow = tcg_temp_new_i64();
4332 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4334 TCGv_i32 t = tcg_temp_new_i32();
4335 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4336 tcg_gen_extu_i32_i64(borrow, t);
4337 tcg_temp_free_i32(t);
4341 tcg_gen_sub_i64(o->out, o->out, borrow);
4342 tcg_temp_free_i64(borrow);
4346 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
4353 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4354 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4355 tcg_temp_free_i32(t);
4357 t = tcg_const_i32(s->ilen);
4358 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4359 tcg_temp_free_i32(t);
4361 gen_exception(EXCP_SVC);
4362 return EXIT_NORETURN;
4365 static ExitStatus op_tam(DisasContext *s, DisasOps *o)
4369 cc |= (s->tb->flags & FLAG_MASK_64) ? 2 : 0;
4370 cc |= (s->tb->flags & FLAG_MASK_32) ? 1 : 0;
4371 gen_op_movi_cc(s, cc);
4375 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
4377 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4382 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
4384 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4389 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
4391 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4396 #ifndef CONFIG_USER_ONLY
4398 static ExitStatus op_testblock(DisasContext *s, DisasOps *o)
4400 check_privileged(s);
4401 gen_helper_testblock(cc_op, cpu_env, o->in2);
4406 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4408 gen_helper_tprot(cc_op, o->addr1, o->in2);
4415 static ExitStatus op_tp(DisasContext *s, DisasOps *o)
4417 TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4418 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4419 tcg_temp_free_i32(l1);
4424 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4426 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4427 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4428 tcg_temp_free_i32(l);
4433 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4435 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4436 return_low128(o->out2);
4441 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4443 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4444 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4445 tcg_temp_free_i32(l);
4450 static ExitStatus op_trtr(DisasContext *s, DisasOps *o)
4452 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4453 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4454 tcg_temp_free_i32(l);
4459 static ExitStatus op_trXX(DisasContext *s, DisasOps *o)
4461 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4462 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4463 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4464 TCGv_i32 tst = tcg_temp_new_i32();
4465 int m3 = get_field(s->fields, m3);
4467 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4471 tcg_gen_movi_i32(tst, -1);
4473 tcg_gen_extrl_i64_i32(tst, regs[0]);
4474 if (s->insn->opc & 3) {
4475 tcg_gen_ext8u_i32(tst, tst);
4477 tcg_gen_ext16u_i32(tst, tst);
4480 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4482 tcg_temp_free_i32(r1);
4483 tcg_temp_free_i32(r2);
4484 tcg_temp_free_i32(sizes);
4485 tcg_temp_free_i32(tst);
4490 static ExitStatus op_ts(DisasContext *s, DisasOps *o)
4492 TCGv_i32 t1 = tcg_const_i32(0xff);
4493 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4494 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4495 tcg_temp_free_i32(t1);
4500 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4502 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4503 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4504 tcg_temp_free_i32(l);
4508 static ExitStatus op_unpka(DisasContext *s, DisasOps *o)
4510 int l1 = get_field(s->fields, l1) + 1;
4513 /* The length must not exceed 32 bytes. */
4515 gen_program_exception(s, PGM_SPECIFICATION);
4516 return EXIT_NORETURN;
4518 l = tcg_const_i32(l1);
4519 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4520 tcg_temp_free_i32(l);
4525 static ExitStatus op_unpku(DisasContext *s, DisasOps *o)
4527 int l1 = get_field(s->fields, l1) + 1;
4530 /* The length must be even and should not exceed 64 bytes. */
4531 if ((l1 & 1) || (l1 > 64)) {
4532 gen_program_exception(s, PGM_SPECIFICATION);
4533 return EXIT_NORETURN;
4535 l = tcg_const_i32(l1);
4536 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4537 tcg_temp_free_i32(l);
4543 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4545 int d1 = get_field(s->fields, d1);
4546 int d2 = get_field(s->fields, d2);
4547 int b1 = get_field(s->fields, b1);
4548 int b2 = get_field(s->fields, b2);
4549 int l = get_field(s->fields, l1);
4552 o->addr1 = get_address(s, 0, b1, d1);
4554 /* If the addresses are identical, this is a store/memset of zero. */
4555 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4556 o->in2 = tcg_const_i64(0);
4560 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4563 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4567 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4570 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4574 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4577 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4581 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4583 gen_op_movi_cc(s, 0);
4587 /* But in general we'll defer to a helper. */
4588 o->in2 = get_address(s, 0, b2, d2);
4589 t32 = tcg_const_i32(l);
4590 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4591 tcg_temp_free_i32(t32);
4596 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4598 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4602 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4604 int shift = s->insn->data & 0xff;
4605 int size = s->insn->data >> 8;
4606 uint64_t mask = ((1ull << size) - 1) << shift;
4609 tcg_gen_shli_i64(o->in2, o->in2, shift);
4610 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4612 /* Produce the CC from only the bits manipulated. */
4613 tcg_gen_andi_i64(cc_dst, o->out, mask);
4614 set_cc_nz_u64(s, cc_dst);
4618 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4620 o->out = tcg_const_i64(0);
4624 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4626 o->out = tcg_const_i64(0);
4632 /* ====================================================================== */
4633 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4634 the original inputs), update the various cc data structures in order to
4635 be able to compute the new condition code. */
4637 static void cout_abs32(DisasContext *s, DisasOps *o)
4639 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4642 static void cout_abs64(DisasContext *s, DisasOps *o)
4644 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4647 static void cout_adds32(DisasContext *s, DisasOps *o)
4649 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4652 static void cout_adds64(DisasContext *s, DisasOps *o)
4654 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4657 static void cout_addu32(DisasContext *s, DisasOps *o)
4659 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4662 static void cout_addu64(DisasContext *s, DisasOps *o)
4664 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4667 static void cout_addc32(DisasContext *s, DisasOps *o)
4669 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4672 static void cout_addc64(DisasContext *s, DisasOps *o)
4674 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4677 static void cout_cmps32(DisasContext *s, DisasOps *o)
4679 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4682 static void cout_cmps64(DisasContext *s, DisasOps *o)
4684 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4687 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4689 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4692 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4694 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4697 static void cout_f32(DisasContext *s, DisasOps *o)
4699 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4702 static void cout_f64(DisasContext *s, DisasOps *o)
4704 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4707 static void cout_f128(DisasContext *s, DisasOps *o)
4709 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4712 static void cout_nabs32(DisasContext *s, DisasOps *o)
4714 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4717 static void cout_nabs64(DisasContext *s, DisasOps *o)
4719 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4722 static void cout_neg32(DisasContext *s, DisasOps *o)
4724 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4727 static void cout_neg64(DisasContext *s, DisasOps *o)
4729 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4732 static void cout_nz32(DisasContext *s, DisasOps *o)
4734 tcg_gen_ext32u_i64(cc_dst, o->out);
4735 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4738 static void cout_nz64(DisasContext *s, DisasOps *o)
4740 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4743 static void cout_s32(DisasContext *s, DisasOps *o)
4745 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4748 static void cout_s64(DisasContext *s, DisasOps *o)
4750 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4753 static void cout_subs32(DisasContext *s, DisasOps *o)
4755 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4758 static void cout_subs64(DisasContext *s, DisasOps *o)
4760 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4763 static void cout_subu32(DisasContext *s, DisasOps *o)
4765 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4768 static void cout_subu64(DisasContext *s, DisasOps *o)
4770 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4773 static void cout_subb32(DisasContext *s, DisasOps *o)
4775 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4778 static void cout_subb64(DisasContext *s, DisasOps *o)
4780 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4783 static void cout_tm32(DisasContext *s, DisasOps *o)
4785 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4788 static void cout_tm64(DisasContext *s, DisasOps *o)
4790 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4793 /* ====================================================================== */
4794 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4795 with the TCG register to which we will write. Used in combination with
4796 the "wout" generators, in some cases we need a new temporary, and in
4797 some cases we can write to a TCG global. */
4799 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4801 o->out = tcg_temp_new_i64();
4803 #define SPEC_prep_new 0
4805 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4807 o->out = tcg_temp_new_i64();
4808 o->out2 = tcg_temp_new_i64();
4810 #define SPEC_prep_new_P 0
4812 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4814 o->out = regs[get_field(f, r1)];
4817 #define SPEC_prep_r1 0
4819 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4821 int r1 = get_field(f, r1);
4823 o->out2 = regs[r1 + 1];
4824 o->g_out = o->g_out2 = true;
4826 #define SPEC_prep_r1_P SPEC_r1_even
4828 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4830 o->out = fregs[get_field(f, r1)];
4833 #define SPEC_prep_f1 0
4835 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4837 int r1 = get_field(f, r1);
4839 o->out2 = fregs[r1 + 2];
4840 o->g_out = o->g_out2 = true;
4842 #define SPEC_prep_x1 SPEC_r1_f128
4844 /* ====================================================================== */
4845 /* The "Write OUTput" generators. These generally perform some non-trivial
4846 copy of data to TCG globals, or to main memory. The trivial cases are
4847 generally handled by having a "prep" generator install the TCG global
4848 as the destination of the operation. */
4850 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4852 store_reg(get_field(f, r1), o->out);
4854 #define SPEC_wout_r1 0
4856 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4858 int r1 = get_field(f, r1);
4859 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4861 #define SPEC_wout_r1_8 0
4863 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4865 int r1 = get_field(f, r1);
4866 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4868 #define SPEC_wout_r1_16 0
4870 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4872 store_reg32_i64(get_field(f, r1), o->out);
4874 #define SPEC_wout_r1_32 0
4876 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4878 store_reg32h_i64(get_field(f, r1), o->out);
4880 #define SPEC_wout_r1_32h 0
4882 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4884 int r1 = get_field(f, r1);
4885 store_reg32_i64(r1, o->out);
4886 store_reg32_i64(r1 + 1, o->out2);
4888 #define SPEC_wout_r1_P32 SPEC_r1_even
4890 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4892 int r1 = get_field(f, r1);
4893 store_reg32_i64(r1 + 1, o->out);
4894 tcg_gen_shri_i64(o->out, o->out, 32);
4895 store_reg32_i64(r1, o->out);
4897 #define SPEC_wout_r1_D32 SPEC_r1_even
4899 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4901 int r3 = get_field(f, r3);
4902 store_reg32_i64(r3, o->out);
4903 store_reg32_i64(r3 + 1, o->out2);
4905 #define SPEC_wout_r3_P32 SPEC_r3_even
4907 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
4909 int r3 = get_field(f, r3);
4910 store_reg(r3, o->out);
4911 store_reg(r3 + 1, o->out2);
4913 #define SPEC_wout_r3_P64 SPEC_r3_even
4915 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4917 store_freg32_i64(get_field(f, r1), o->out);
4919 #define SPEC_wout_e1 0
4921 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4923 store_freg(get_field(f, r1), o->out);
4925 #define SPEC_wout_f1 0
4927 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4929 int f1 = get_field(s->fields, r1);
4930 store_freg(f1, o->out);
4931 store_freg(f1 + 2, o->out2);
4933 #define SPEC_wout_x1 SPEC_r1_f128
4935 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4937 if (get_field(f, r1) != get_field(f, r2)) {
4938 store_reg32_i64(get_field(f, r1), o->out);
4941 #define SPEC_wout_cond_r1r2_32 0
4943 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4945 if (get_field(f, r1) != get_field(f, r2)) {
4946 store_freg32_i64(get_field(f, r1), o->out);
4949 #define SPEC_wout_cond_e1e2 0
4951 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4953 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4955 #define SPEC_wout_m1_8 0
4957 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4959 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4961 #define SPEC_wout_m1_16 0
4963 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4965 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4967 #define SPEC_wout_m1_32 0
4969 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4971 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4973 #define SPEC_wout_m1_64 0
4975 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4977 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4979 #define SPEC_wout_m2_32 0
4981 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4983 store_reg(get_field(f, r1), o->in2);
4985 #define SPEC_wout_in2_r1 0
4987 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4989 store_reg32_i64(get_field(f, r1), o->in2);
4991 #define SPEC_wout_in2_r1_32 0
4993 /* ====================================================================== */
4994 /* The "INput 1" generators. These load the first operand to an insn. */
4996 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4998 o->in1 = load_reg(get_field(f, r1));
5000 #define SPEC_in1_r1 0
5002 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5004 o->in1 = regs[get_field(f, r1)];
5007 #define SPEC_in1_r1_o 0
5009 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5011 o->in1 = tcg_temp_new_i64();
5012 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
5014 #define SPEC_in1_r1_32s 0
5016 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5018 o->in1 = tcg_temp_new_i64();
5019 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
5021 #define SPEC_in1_r1_32u 0
5023 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5025 o->in1 = tcg_temp_new_i64();
5026 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
5028 #define SPEC_in1_r1_sr32 0
5030 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
5032 o->in1 = load_reg(get_field(f, r1) + 1);
5034 #define SPEC_in1_r1p1 SPEC_r1_even
5036 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5038 o->in1 = tcg_temp_new_i64();
5039 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
5041 #define SPEC_in1_r1p1_32s SPEC_r1_even
5043 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5045 o->in1 = tcg_temp_new_i64();
5046 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
5048 #define SPEC_in1_r1p1_32u SPEC_r1_even
5050 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5052 int r1 = get_field(f, r1);
5053 o->in1 = tcg_temp_new_i64();
5054 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5056 #define SPEC_in1_r1_D32 SPEC_r1_even
5058 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5060 o->in1 = load_reg(get_field(f, r2));
5062 #define SPEC_in1_r2 0
5064 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5066 o->in1 = tcg_temp_new_i64();
5067 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
5069 #define SPEC_in1_r2_sr32 0
5071 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5073 o->in1 = load_reg(get_field(f, r3));
5075 #define SPEC_in1_r3 0
5077 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5079 o->in1 = regs[get_field(f, r3)];
5082 #define SPEC_in1_r3_o 0
5084 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5086 o->in1 = tcg_temp_new_i64();
5087 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
5089 #define SPEC_in1_r3_32s 0
5091 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5093 o->in1 = tcg_temp_new_i64();
5094 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
5096 #define SPEC_in1_r3_32u 0
5098 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5100 int r3 = get_field(f, r3);
5101 o->in1 = tcg_temp_new_i64();
5102 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5104 #define SPEC_in1_r3_D32 SPEC_r3_even
5106 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5108 o->in1 = load_freg32_i64(get_field(f, r1));
5110 #define SPEC_in1_e1 0
5112 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5114 o->in1 = fregs[get_field(f, r1)];
5117 #define SPEC_in1_f1_o 0
5119 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5121 int r1 = get_field(f, r1);
5123 o->out2 = fregs[r1 + 2];
5124 o->g_out = o->g_out2 = true;
5126 #define SPEC_in1_x1_o SPEC_r1_f128
5128 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5130 o->in1 = fregs[get_field(f, r3)];
5133 #define SPEC_in1_f3_o 0
5135 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
5137 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
5139 #define SPEC_in1_la1 0
5141 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
5143 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5144 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5146 #define SPEC_in1_la2 0
5148 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5151 o->in1 = tcg_temp_new_i64();
5152 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5154 #define SPEC_in1_m1_8u 0
5156 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5159 o->in1 = tcg_temp_new_i64();
5160 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5162 #define SPEC_in1_m1_16s 0
5164 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5167 o->in1 = tcg_temp_new_i64();
5168 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5170 #define SPEC_in1_m1_16u 0
5172 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5175 o->in1 = tcg_temp_new_i64();
5176 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5178 #define SPEC_in1_m1_32s 0
5180 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5183 o->in1 = tcg_temp_new_i64();
5184 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5186 #define SPEC_in1_m1_32u 0
5188 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5191 o->in1 = tcg_temp_new_i64();
5192 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5194 #define SPEC_in1_m1_64 0
5196 /* ====================================================================== */
5197 /* The "INput 2" generators. These load the second operand to an insn. */
5199 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5201 o->in2 = regs[get_field(f, r1)];
5204 #define SPEC_in2_r1_o 0
5206 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5208 o->in2 = tcg_temp_new_i64();
5209 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5211 #define SPEC_in2_r1_16u 0
5213 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5215 o->in2 = tcg_temp_new_i64();
5216 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5218 #define SPEC_in2_r1_32u 0
5220 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5222 int r1 = get_field(f, r1);
5223 o->in2 = tcg_temp_new_i64();
5224 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5226 #define SPEC_in2_r1_D32 SPEC_r1_even
5228 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5230 o->in2 = load_reg(get_field(f, r2));
5232 #define SPEC_in2_r2 0
5234 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5236 o->in2 = regs[get_field(f, r2)];
5239 #define SPEC_in2_r2_o 0
5241 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5243 int r2 = get_field(f, r2);
5245 o->in2 = load_reg(r2);
5248 #define SPEC_in2_r2_nz 0
5250 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5252 o->in2 = tcg_temp_new_i64();
5253 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5255 #define SPEC_in2_r2_8s 0
5257 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5259 o->in2 = tcg_temp_new_i64();
5260 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5262 #define SPEC_in2_r2_8u 0
5264 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5266 o->in2 = tcg_temp_new_i64();
5267 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5269 #define SPEC_in2_r2_16s 0
5271 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5273 o->in2 = tcg_temp_new_i64();
5274 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5276 #define SPEC_in2_r2_16u 0
5278 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5280 o->in2 = load_reg(get_field(f, r3));
5282 #define SPEC_in2_r3 0
5284 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5286 o->in2 = tcg_temp_new_i64();
5287 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5289 #define SPEC_in2_r3_sr32 0
5291 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5293 o->in2 = tcg_temp_new_i64();
5294 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5296 #define SPEC_in2_r2_32s 0
5298 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5300 o->in2 = tcg_temp_new_i64();
5301 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5303 #define SPEC_in2_r2_32u 0
5305 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5307 o->in2 = tcg_temp_new_i64();
5308 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5310 #define SPEC_in2_r2_sr32 0
5312 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5314 o->in2 = load_freg32_i64(get_field(f, r2));
5316 #define SPEC_in2_e2 0
5318 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5320 o->in2 = fregs[get_field(f, r2)];
5323 #define SPEC_in2_f2_o 0
5325 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5327 int r2 = get_field(f, r2);
5329 o->in2 = fregs[r2 + 2];
5330 o->g_in1 = o->g_in2 = true;
5332 #define SPEC_in2_x2_o SPEC_r2_f128
5334 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5336 o->in2 = get_address(s, 0, get_field(f, r2), 0);
5338 #define SPEC_in2_ra2 0
5340 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5342 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5343 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5345 #define SPEC_in2_a2 0
5347 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5349 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
5351 #define SPEC_in2_ri2 0
5353 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5355 help_l2_shift(s, f, o, 31);
5357 #define SPEC_in2_sh32 0
5359 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5361 help_l2_shift(s, f, o, 63);
5363 #define SPEC_in2_sh64 0
5365 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5368 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5370 #define SPEC_in2_m2_8u 0
5372 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5375 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5377 #define SPEC_in2_m2_16s 0
5379 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5382 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5384 #define SPEC_in2_m2_16u 0
5386 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5389 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5391 #define SPEC_in2_m2_32s 0
5393 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5396 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5398 #define SPEC_in2_m2_32u 0
5400 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5403 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5405 #define SPEC_in2_m2_64 0
5407 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5410 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5412 #define SPEC_in2_mri2_16u 0
5414 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5417 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5419 #define SPEC_in2_mri2_32s 0
5421 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5424 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5426 #define SPEC_in2_mri2_32u 0
5428 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5431 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5433 #define SPEC_in2_mri2_64 0
5435 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5437 o->in2 = tcg_const_i64(get_field(f, i2));
5439 #define SPEC_in2_i2 0
5441 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5443 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5445 #define SPEC_in2_i2_8u 0
5447 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5449 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5451 #define SPEC_in2_i2_16u 0
5453 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5455 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5457 #define SPEC_in2_i2_32u 0
5459 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5461 uint64_t i2 = (uint16_t)get_field(f, i2);
5462 o->in2 = tcg_const_i64(i2 << s->insn->data);
5464 #define SPEC_in2_i2_16u_shl 0
5466 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5468 uint64_t i2 = (uint32_t)get_field(f, i2);
5469 o->in2 = tcg_const_i64(i2 << s->insn->data);
5471 #define SPEC_in2_i2_32u_shl 0
5473 #ifndef CONFIG_USER_ONLY
5474 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5476 o->in2 = tcg_const_i64(s->fields->raw_insn);
5478 #define SPEC_in2_insn 0
5481 /* ====================================================================== */
5483 /* Find opc within the table of insns. This is formulated as a switch
5484 statement so that (1) we get compile-time notice of cut-paste errors
5485 for duplicated opcodes, and (2) the compiler generates the binary
5486 search tree, rather than us having to post-process the table. */
5488 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5489 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5491 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5493 enum DisasInsnEnum {
5494 #include "insn-data.def"
5498 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5502 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5504 .help_in1 = in1_##I1, \
5505 .help_in2 = in2_##I2, \
5506 .help_prep = prep_##P, \
5507 .help_wout = wout_##W, \
5508 .help_cout = cout_##CC, \
5509 .help_op = op_##OP, \
5513 /* Allow 0 to be used for NULL in the table below. */
5521 #define SPEC_in1_0 0
5522 #define SPEC_in2_0 0
5523 #define SPEC_prep_0 0
5524 #define SPEC_wout_0 0
5526 /* Give smaller names to the various facilities. */
5527 #define FAC_Z S390_FEAT_ZARCH
5528 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5529 #define FAC_DFP S390_FEAT_DFP
5530 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5531 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5532 #define FAC_EE S390_FEAT_EXECUTE_EXT
5533 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5534 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5535 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5536 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5537 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5538 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5539 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5540 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5541 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5542 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5543 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5544 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5545 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5546 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5547 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5548 #define FAC_SFLE S390_FEAT_STFLE
5549 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5550 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5551 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5552 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5553 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5554 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5555 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5556 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5557 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5559 static const DisasInsn insn_info[] = {
5560 #include "insn-data.def"
5564 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5565 case OPC: return &insn_info[insn_ ## NM];
5567 static const DisasInsn *lookup_opc(uint16_t opc)
5570 #include "insn-data.def"
5579 /* Extract a field from the insn. The INSN should be left-aligned in
5580 the uint64_t so that we can more easily utilize the big-bit-endian
5581 definitions we extract from the Principals of Operation. */
5583 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5591 /* Zero extract the field from the insn. */
5592 r = (insn << f->beg) >> (64 - f->size);
5594 /* Sign-extend, or un-swap the field as necessary. */
5596 case 0: /* unsigned */
5598 case 1: /* signed */
5599 assert(f->size <= 32);
5600 m = 1u << (f->size - 1);
5603 case 2: /* dl+dh split, signed 20 bit. */
5604 r = ((int8_t)r << 12) | (r >> 8);
5610 /* Validate that the "compressed" encoding we selected above is valid.
5611 I.e. we havn't make two different original fields overlap. */
5612 assert(((o->presentC >> f->indexC) & 1) == 0);
5613 o->presentC |= 1 << f->indexC;
5614 o->presentO |= 1 << f->indexO;
5616 o->c[f->indexC] = r;
5619 /* Lookup the insn at the current PC, extracting the operands into O and
5620 returning the info struct for the insn. Returns NULL for invalid insn. */
5622 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5625 uint64_t insn, pc = s->pc;
5627 const DisasInsn *info;
5629 if (unlikely(s->ex_value)) {
5630 /* Drop the EX data now, so that it's clear on exception paths. */
5631 TCGv_i64 zero = tcg_const_i64(0);
5632 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
5633 tcg_temp_free_i64(zero);
5635 /* Extract the values saved by EXECUTE. */
5636 insn = s->ex_value & 0xffffffffffff0000ull;
5637 ilen = s->ex_value & 0xf;
5640 insn = ld_code2(env, pc);
5641 op = (insn >> 8) & 0xff;
5642 ilen = get_ilen(op);
5648 insn = ld_code4(env, pc) << 32;
5651 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5654 g_assert_not_reached();
5657 s->next_pc = s->pc + ilen;
5660 /* We can't actually determine the insn format until we've looked up
5661 the full insn opcode. Which we can't do without locating the
5662 secondary opcode. Assume by default that OP2 is at bit 40; for
5663 those smaller insns that don't actually have a secondary opcode
5664 this will correctly result in OP2 = 0. */
5670 case 0xb2: /* S, RRF, RRE, IE */
5671 case 0xb3: /* RRE, RRD, RRF */
5672 case 0xb9: /* RRE, RRF */
5673 case 0xe5: /* SSE, SIL */
5674 op2 = (insn << 8) >> 56;
5678 case 0xc0: /* RIL */
5679 case 0xc2: /* RIL */
5680 case 0xc4: /* RIL */
5681 case 0xc6: /* RIL */
5682 case 0xc8: /* SSF */
5683 case 0xcc: /* RIL */
5684 op2 = (insn << 12) >> 60;
5686 case 0xc5: /* MII */
5687 case 0xc7: /* SMI */
5688 case 0xd0 ... 0xdf: /* SS */
5694 case 0xee ... 0xf3: /* SS */
5695 case 0xf8 ... 0xfd: /* SS */
5699 op2 = (insn << 40) >> 56;
5703 memset(f, 0, sizeof(*f));
5708 /* Lookup the instruction. */
5709 info = lookup_opc(op << 8 | op2);
5711 /* If we found it, extract the operands. */
5713 DisasFormat fmt = info->fmt;
5716 for (i = 0; i < NUM_C_FIELD; ++i) {
5717 extract_field(f, &format_info[fmt].op[i], insn);
5723 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5725 const DisasInsn *insn;
5726 ExitStatus ret = NO_EXIT;
5730 /* Search for the insn in the table. */
5731 insn = extract_insn(env, s, &f);
5733 /* Not found means unimplemented/illegal opcode. */
5735 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5737 gen_illegal_opcode(s);
5738 return EXIT_NORETURN;
5741 #ifndef CONFIG_USER_ONLY
5742 if (s->tb->flags & FLAG_MASK_PER) {
5743 TCGv_i64 addr = tcg_const_i64(s->pc);
5744 gen_helper_per_ifetch(cpu_env, addr);
5745 tcg_temp_free_i64(addr);
5749 /* Check for insn specification exceptions. */
5751 int spec = insn->spec, excp = 0, r;
5753 if (spec & SPEC_r1_even) {
5754 r = get_field(&f, r1);
5756 excp = PGM_SPECIFICATION;
5759 if (spec & SPEC_r2_even) {
5760 r = get_field(&f, r2);
5762 excp = PGM_SPECIFICATION;
5765 if (spec & SPEC_r3_even) {
5766 r = get_field(&f, r3);
5768 excp = PGM_SPECIFICATION;
5771 if (spec & SPEC_r1_f128) {
5772 r = get_field(&f, r1);
5774 excp = PGM_SPECIFICATION;
5777 if (spec & SPEC_r2_f128) {
5778 r = get_field(&f, r2);
5780 excp = PGM_SPECIFICATION;
5784 gen_program_exception(s, excp);
5785 return EXIT_NORETURN;
5789 /* Set up the strutures we use to communicate with the helpers. */
5792 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5793 TCGV_UNUSED_I64(o.out);
5794 TCGV_UNUSED_I64(o.out2);
5795 TCGV_UNUSED_I64(o.in1);
5796 TCGV_UNUSED_I64(o.in2);
5797 TCGV_UNUSED_I64(o.addr1);
5799 /* Implement the instruction. */
5800 if (insn->help_in1) {
5801 insn->help_in1(s, &f, &o);
5803 if (insn->help_in2) {
5804 insn->help_in2(s, &f, &o);
5806 if (insn->help_prep) {
5807 insn->help_prep(s, &f, &o);
5809 if (insn->help_op) {
5810 ret = insn->help_op(s, &o);
5812 if (insn->help_wout) {
5813 insn->help_wout(s, &f, &o);
5815 if (insn->help_cout) {
5816 insn->help_cout(s, &o);
5819 /* Free any temporaries created by the helpers. */
5820 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5821 tcg_temp_free_i64(o.out);
5823 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5824 tcg_temp_free_i64(o.out2);
5826 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5827 tcg_temp_free_i64(o.in1);
5829 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5830 tcg_temp_free_i64(o.in2);
5832 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5833 tcg_temp_free_i64(o.addr1);
5836 #ifndef CONFIG_USER_ONLY
5837 if (s->tb->flags & FLAG_MASK_PER) {
5838 /* An exception might be triggered, save PSW if not already done. */
5839 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
5840 tcg_gen_movi_i64(psw_addr, s->next_pc);
5846 /* Call the helper to check for a possible PER exception. */
5847 gen_helper_per_check_exception(cpu_env);
5851 /* Advance to the next instruction. */
5856 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
5858 CPUS390XState *env = cs->env_ptr;
5860 target_ulong pc_start;
5861 uint64_t next_page_start;
5862 int num_insns, max_insns;
5869 if (!(tb->flags & FLAG_MASK_64)) {
5870 pc_start &= 0x7fffffff;
5875 dc.cc_op = CC_OP_DYNAMIC;
5876 dc.ex_value = tb->cs_base;
5877 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5879 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5882 max_insns = tb->cflags & CF_COUNT_MASK;
5883 if (max_insns == 0) {
5884 max_insns = CF_COUNT_MASK;
5886 if (max_insns > TCG_MAX_INSNS) {
5887 max_insns = TCG_MAX_INSNS;
5893 tcg_gen_insn_start(dc.pc, dc.cc_op);
5896 if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
5897 status = EXIT_PC_STALE;
5899 /* The address covered by the breakpoint must be included in
5900 [tb->pc, tb->pc + tb->size) in order to for it to be
5901 properly cleared -- thus we increment the PC here so that
5902 the logic setting tb->size below does the right thing. */
5907 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5911 status = translate_one(env, &dc);
5913 /* If we reach a page boundary, are single stepping,
5914 or exhaust instruction count, stop generation. */
5915 if (status == NO_EXIT
5916 && (dc.pc >= next_page_start
5917 || tcg_op_buf_full()
5918 || num_insns >= max_insns
5920 || cs->singlestep_enabled
5922 status = EXIT_PC_STALE;
5924 } while (status == NO_EXIT);
5926 if (tb->cflags & CF_LAST_IO) {
5935 case EXIT_PC_STALE_NOCHAIN:
5936 update_psw_addr(&dc);
5938 case EXIT_PC_UPDATED:
5939 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5940 cc op type is in env */
5943 case EXIT_PC_CC_UPDATED:
5944 /* Exit the TB, either by raising a debug exception or by return. */
5946 gen_exception(EXCP_DEBUG);
5947 } else if (use_exit_tb(&dc) || status == EXIT_PC_STALE_NOCHAIN) {
5950 tcg_gen_lookup_and_goto_ptr(psw_addr);
5954 g_assert_not_reached();
5957 gen_tb_end(tb, num_insns);
5959 tb->size = dc.pc - pc_start;
5960 tb->icount = num_insns;
5962 #if defined(S390X_DEBUG_DISAS)
5963 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5964 && qemu_log_in_addr_range(pc_start)) {
5966 if (unlikely(dc.ex_value)) {
5967 /* ??? Unfortunately log_target_disas can't use host memory. */
5968 qemu_log("IN: EXECUTE %016" PRIx64 "\n", dc.ex_value);
5970 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5971 log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
5979 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
5982 int cc_op = data[1];
5983 env->psw.addr = data[0];
5984 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {