4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
36 #include "exec/cpu_ldst.h"
38 /* global register indexes */
39 static TCGv_ptr cpu_env;
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext;
50 typedef struct DisasInsn DisasInsn;
51 typedef struct DisasFields DisasFields;
54 struct TranslationBlock *tb;
55 const DisasInsn *insn;
59 bool singlestep_enabled;
62 /* Information carried about a condition to be evaluated. */
69 struct { TCGv_i64 a, b; } s64;
70 struct { TCGv_i32 a, b; } s32;
76 #ifdef DEBUG_INLINE_BRANCHES
77 static uint64_t inline_branch_hit[CC_OP_MAX];
78 static uint64_t inline_branch_miss[CC_OP_MAX];
81 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
83 if (!(s->tb->flags & FLAG_MASK_64)) {
84 if (s->tb->flags & FLAG_MASK_32) {
85 return pc | 0x80000000;
91 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
94 S390CPU *cpu = S390_CPU(cs);
95 CPUS390XState *env = &cpu->env;
99 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
100 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
102 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
103 env->psw.mask, env->psw.addr, env->cc_op);
106 for (i = 0; i < 16; i++) {
107 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
109 cpu_fprintf(f, "\n");
115 for (i = 0; i < 16; i++) {
116 cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
118 cpu_fprintf(f, "\n");
124 for (i = 0; i < 32; i++) {
125 cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
126 env->vregs[i][0].ll, env->vregs[i][1].ll);
127 cpu_fprintf(f, (i % 2) ? " " : "\n");
130 #ifndef CONFIG_USER_ONLY
131 for (i = 0; i < 16; i++) {
132 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
134 cpu_fprintf(f, "\n");
141 #ifdef DEBUG_INLINE_BRANCHES
142 for (i = 0; i < CC_OP_MAX; i++) {
143 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
144 inline_branch_miss[i], inline_branch_hit[i]);
148 cpu_fprintf(f, "\n");
151 static TCGv_i64 psw_addr;
152 static TCGv_i64 psw_mask;
154 static TCGv_i32 cc_op;
155 static TCGv_i64 cc_src;
156 static TCGv_i64 cc_dst;
157 static TCGv_i64 cc_vr;
159 static char cpu_reg_names[32][4];
160 static TCGv_i64 regs[16];
161 static TCGv_i64 fregs[16];
163 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
165 void s390x_translate_init(void)
169 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
170 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
171 offsetof(CPUS390XState, psw.addr),
173 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
174 offsetof(CPUS390XState, psw.mask),
177 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
179 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
181 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
183 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
186 for (i = 0; i < 16; i++) {
187 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
188 regs[i] = tcg_global_mem_new(TCG_AREG0,
189 offsetof(CPUS390XState, regs[i]),
193 for (i = 0; i < 16; i++) {
194 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
195 fregs[i] = tcg_global_mem_new(TCG_AREG0,
196 offsetof(CPUS390XState, vregs[i][0].d),
197 cpu_reg_names[i + 16]);
201 static TCGv_i64 load_reg(int reg)
203 TCGv_i64 r = tcg_temp_new_i64();
204 tcg_gen_mov_i64(r, regs[reg]);
208 static TCGv_i64 load_freg32_i64(int reg)
210 TCGv_i64 r = tcg_temp_new_i64();
211 tcg_gen_shri_i64(r, fregs[reg], 32);
215 static void store_reg(int reg, TCGv_i64 v)
217 tcg_gen_mov_i64(regs[reg], v);
220 static void store_freg(int reg, TCGv_i64 v)
222 tcg_gen_mov_i64(fregs[reg], v);
225 static void store_reg32_i64(int reg, TCGv_i64 v)
227 /* 32 bit register writes keep the upper half */
228 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
231 static void store_reg32h_i64(int reg, TCGv_i64 v)
233 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
236 static void store_freg32_i64(int reg, TCGv_i64 v)
238 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
241 static void return_low128(TCGv_i64 dest)
243 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
246 static void update_psw_addr(DisasContext *s)
249 tcg_gen_movi_i64(psw_addr, s->pc);
252 static void update_cc_op(DisasContext *s)
254 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
255 tcg_gen_movi_i32(cc_op, s->cc_op);
259 static void potential_page_fault(DisasContext *s)
265 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
267 return (uint64_t)cpu_lduw_code(env, pc);
270 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
272 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
275 static int get_mem_index(DisasContext *s)
277 switch (s->tb->flags & FLAG_MASK_ASC) {
278 case PSW_ASC_PRIMARY >> 32:
280 case PSW_ASC_SECONDARY >> 32:
282 case PSW_ASC_HOME >> 32:
290 static void gen_exception(int excp)
292 TCGv_i32 tmp = tcg_const_i32(excp);
293 gen_helper_exception(cpu_env, tmp);
294 tcg_temp_free_i32(tmp);
297 static void gen_program_exception(DisasContext *s, int code)
301 /* Remember what pgm exeption this was. */
302 tmp = tcg_const_i32(code);
303 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
304 tcg_temp_free_i32(tmp);
306 tmp = tcg_const_i32(s->next_pc - s->pc);
307 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
308 tcg_temp_free_i32(tmp);
310 /* Advance past instruction. */
317 /* Trigger exception. */
318 gen_exception(EXCP_PGM);
321 static inline void gen_illegal_opcode(DisasContext *s)
323 gen_program_exception(s, PGM_OPERATION);
326 static inline void gen_trap(DisasContext *s)
330 /* Set DXC to 0xff. */
331 t = tcg_temp_new_i32();
332 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
333 tcg_gen_ori_i32(t, t, 0xff00);
334 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
335 tcg_temp_free_i32(t);
337 gen_program_exception(s, PGM_DATA);
340 #ifndef CONFIG_USER_ONLY
341 static void check_privileged(DisasContext *s)
343 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
344 gen_program_exception(s, PGM_PRIVILEGED);
349 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
351 TCGv_i64 tmp = tcg_temp_new_i64();
352 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
354 /* Note that d2 is limited to 20 bits, signed. If we crop negative
355 displacements early we create larger immedate addends. */
357 /* Note that addi optimizes the imm==0 case. */
359 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
360 tcg_gen_addi_i64(tmp, tmp, d2);
362 tcg_gen_addi_i64(tmp, regs[b2], d2);
364 tcg_gen_addi_i64(tmp, regs[x2], d2);
370 tcg_gen_movi_i64(tmp, d2);
373 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
379 static inline bool live_cc_data(DisasContext *s)
381 return (s->cc_op != CC_OP_DYNAMIC
382 && s->cc_op != CC_OP_STATIC
386 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
388 if (live_cc_data(s)) {
389 tcg_gen_discard_i64(cc_src);
390 tcg_gen_discard_i64(cc_dst);
391 tcg_gen_discard_i64(cc_vr);
393 s->cc_op = CC_OP_CONST0 + val;
396 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
398 if (live_cc_data(s)) {
399 tcg_gen_discard_i64(cc_src);
400 tcg_gen_discard_i64(cc_vr);
402 tcg_gen_mov_i64(cc_dst, dst);
406 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
409 if (live_cc_data(s)) {
410 tcg_gen_discard_i64(cc_vr);
412 tcg_gen_mov_i64(cc_src, src);
413 tcg_gen_mov_i64(cc_dst, dst);
417 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
418 TCGv_i64 dst, TCGv_i64 vr)
420 tcg_gen_mov_i64(cc_src, src);
421 tcg_gen_mov_i64(cc_dst, dst);
422 tcg_gen_mov_i64(cc_vr, vr);
426 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
428 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
431 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
433 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
436 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
438 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
441 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
443 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
446 /* CC value is in env->cc_op */
447 static void set_cc_static(DisasContext *s)
449 if (live_cc_data(s)) {
450 tcg_gen_discard_i64(cc_src);
451 tcg_gen_discard_i64(cc_dst);
452 tcg_gen_discard_i64(cc_vr);
454 s->cc_op = CC_OP_STATIC;
457 /* calculates cc into cc_op */
458 static void gen_op_calc_cc(DisasContext *s)
460 TCGv_i32 local_cc_op;
463 TCGV_UNUSED_I32(local_cc_op);
464 TCGV_UNUSED_I64(dummy);
467 dummy = tcg_const_i64(0);
481 local_cc_op = tcg_const_i32(s->cc_op);
497 /* s->cc_op is the cc value */
498 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
501 /* env->cc_op already is the cc value */
516 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
521 case CC_OP_LTUGTU_32:
522 case CC_OP_LTUGTU_64:
529 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
544 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
547 /* unknown operation - assume 3 arguments and cc_op in env */
548 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
554 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
555 tcg_temp_free_i32(local_cc_op);
557 if (!TCGV_IS_UNUSED_I64(dummy)) {
558 tcg_temp_free_i64(dummy);
561 /* We now have cc in cc_op as constant */
565 static int use_goto_tb(DisasContext *s, uint64_t dest)
567 /* NOTE: we handle the case where the TB spans two pages here */
568 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
569 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
570 && !s->singlestep_enabled
571 && !(s->tb->cflags & CF_LAST_IO));
574 static void account_noninline_branch(DisasContext *s, int cc_op)
576 #ifdef DEBUG_INLINE_BRANCHES
577 inline_branch_miss[cc_op]++;
581 static void account_inline_branch(DisasContext *s, int cc_op)
583 #ifdef DEBUG_INLINE_BRANCHES
584 inline_branch_hit[cc_op]++;
588 /* Table of mask values to comparison codes, given a comparison as input.
589 For such, CC=3 should not be possible. */
590 static const TCGCond ltgt_cond[16] = {
591 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
592 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
593 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
594 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
595 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
596 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
597 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
598 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
601 /* Table of mask values to comparison codes, given a logic op as input.
602 For such, only CC=0 and CC=1 should be possible. */
603 static const TCGCond nz_cond[16] = {
604 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
605 TCG_COND_NEVER, TCG_COND_NEVER,
606 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
607 TCG_COND_NE, TCG_COND_NE,
608 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
609 TCG_COND_EQ, TCG_COND_EQ,
610 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
611 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
614 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
615 details required to generate a TCG comparison. */
616 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
619 enum cc_op old_cc_op = s->cc_op;
621 if (mask == 15 || mask == 0) {
622 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
625 c->g1 = c->g2 = true;
630 /* Find the TCG condition for the mask + cc op. */
636 cond = ltgt_cond[mask];
637 if (cond == TCG_COND_NEVER) {
640 account_inline_branch(s, old_cc_op);
643 case CC_OP_LTUGTU_32:
644 case CC_OP_LTUGTU_64:
645 cond = tcg_unsigned_cond(ltgt_cond[mask]);
646 if (cond == TCG_COND_NEVER) {
649 account_inline_branch(s, old_cc_op);
653 cond = nz_cond[mask];
654 if (cond == TCG_COND_NEVER) {
657 account_inline_branch(s, old_cc_op);
672 account_inline_branch(s, old_cc_op);
687 account_inline_branch(s, old_cc_op);
691 switch (mask & 0xa) {
692 case 8: /* src == 0 -> no one bit found */
695 case 2: /* src != 0 -> one bit found */
701 account_inline_branch(s, old_cc_op);
707 case 8 | 2: /* vr == 0 */
710 case 4 | 1: /* vr != 0 */
713 case 8 | 4: /* no carry -> vr >= src */
716 case 2 | 1: /* carry -> vr < src */
722 account_inline_branch(s, old_cc_op);
727 /* Note that CC=0 is impossible; treat it as dont-care. */
729 case 2: /* zero -> op1 == op2 */
732 case 4 | 1: /* !zero -> op1 != op2 */
735 case 4: /* borrow (!carry) -> op1 < op2 */
738 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
744 account_inline_branch(s, old_cc_op);
749 /* Calculate cc value. */
754 /* Jump based on CC. We'll load up the real cond below;
755 the assignment here merely avoids a compiler warning. */
756 account_noninline_branch(s, old_cc_op);
757 old_cc_op = CC_OP_STATIC;
758 cond = TCG_COND_NEVER;
762 /* Load up the arguments of the comparison. */
764 c->g1 = c->g2 = false;
768 c->u.s32.a = tcg_temp_new_i32();
769 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
770 c->u.s32.b = tcg_const_i32(0);
773 case CC_OP_LTUGTU_32:
776 c->u.s32.a = tcg_temp_new_i32();
777 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
778 c->u.s32.b = tcg_temp_new_i32();
779 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
786 c->u.s64.b = tcg_const_i64(0);
790 case CC_OP_LTUGTU_64:
794 c->g1 = c->g2 = true;
800 c->u.s64.a = tcg_temp_new_i64();
801 c->u.s64.b = tcg_const_i64(0);
802 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
807 c->u.s32.a = tcg_temp_new_i32();
808 c->u.s32.b = tcg_temp_new_i32();
809 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
810 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
811 tcg_gen_movi_i32(c->u.s32.b, 0);
813 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
820 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
821 c->u.s64.b = tcg_const_i64(0);
833 case 0x8 | 0x4 | 0x2: /* cc != 3 */
835 c->u.s32.b = tcg_const_i32(3);
837 case 0x8 | 0x4 | 0x1: /* cc != 2 */
839 c->u.s32.b = tcg_const_i32(2);
841 case 0x8 | 0x2 | 0x1: /* cc != 1 */
843 c->u.s32.b = tcg_const_i32(1);
845 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
848 c->u.s32.a = tcg_temp_new_i32();
849 c->u.s32.b = tcg_const_i32(0);
850 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
852 case 0x8 | 0x4: /* cc < 2 */
854 c->u.s32.b = tcg_const_i32(2);
856 case 0x8: /* cc == 0 */
858 c->u.s32.b = tcg_const_i32(0);
860 case 0x4 | 0x2 | 0x1: /* cc != 0 */
862 c->u.s32.b = tcg_const_i32(0);
864 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
867 c->u.s32.a = tcg_temp_new_i32();
868 c->u.s32.b = tcg_const_i32(0);
869 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
871 case 0x4: /* cc == 1 */
873 c->u.s32.b = tcg_const_i32(1);
875 case 0x2 | 0x1: /* cc > 1 */
877 c->u.s32.b = tcg_const_i32(1);
879 case 0x2: /* cc == 2 */
881 c->u.s32.b = tcg_const_i32(2);
883 case 0x1: /* cc == 3 */
885 c->u.s32.b = tcg_const_i32(3);
888 /* CC is masked by something else: (8 >> cc) & mask. */
891 c->u.s32.a = tcg_const_i32(8);
892 c->u.s32.b = tcg_const_i32(0);
893 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
894 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
905 static void free_compare(DisasCompare *c)
909 tcg_temp_free_i64(c->u.s64.a);
911 tcg_temp_free_i32(c->u.s32.a);
916 tcg_temp_free_i64(c->u.s64.b);
918 tcg_temp_free_i32(c->u.s32.b);
923 /* ====================================================================== */
924 /* Define the insn format enumeration. */
925 #define F0(N) FMT_##N,
926 #define F1(N, X1) F0(N)
927 #define F2(N, X1, X2) F0(N)
928 #define F3(N, X1, X2, X3) F0(N)
929 #define F4(N, X1, X2, X3, X4) F0(N)
930 #define F5(N, X1, X2, X3, X4, X5) F0(N)
933 #include "insn-format.def"
943 /* Define a structure to hold the decoded fields. We'll store each inside
944 an array indexed by an enum. In order to conserve memory, we'll arrange
945 for fields that do not exist at the same time to overlap, thus the "C"
946 for compact. For checking purposes there is an "O" for original index
947 as well that will be applied to availability bitmaps. */
949 enum DisasFieldIndexO {
972 enum DisasFieldIndexC {
1003 struct DisasFields {
1007 unsigned presentC:16;
1008 unsigned int presentO;
1012 /* This is the way fields are to be accessed out of DisasFields. */
1013 #define have_field(S, F) have_field1((S), FLD_O_##F)
1014 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1016 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1018 return (f->presentO >> c) & 1;
1021 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1022 enum DisasFieldIndexC c)
1024 assert(have_field1(f, o));
1028 /* Describe the layout of each field in each format. */
1029 typedef struct DisasField {
1031 unsigned int size:8;
1032 unsigned int type:2;
1033 unsigned int indexC:6;
1034 enum DisasFieldIndexO indexO:8;
1037 typedef struct DisasFormatInfo {
1038 DisasField op[NUM_C_FIELD];
1041 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1042 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1043 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1044 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1045 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1046 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1047 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1048 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1049 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1050 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1051 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1052 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1053 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1054 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1056 #define F0(N) { { } },
1057 #define F1(N, X1) { { X1 } },
1058 #define F2(N, X1, X2) { { X1, X2 } },
1059 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1060 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1061 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1063 static const DisasFormatInfo format_info[] = {
1064 #include "insn-format.def"
1082 /* Generally, we'll extract operands into this structures, operate upon
1083 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1084 of routines below for more details. */
1086 bool g_out, g_out2, g_in1, g_in2;
1087 TCGv_i64 out, out2, in1, in2;
1091 /* Instructions can place constraints on their operands, raising specification
1092 exceptions if they are violated. To make this easy to automate, each "in1",
1093 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1094 of the following, or 0. To make this easy to document, we'll put the
1095 SPEC_<name> defines next to <name>. */
1097 #define SPEC_r1_even 1
1098 #define SPEC_r2_even 2
1099 #define SPEC_r3_even 4
1100 #define SPEC_r1_f128 8
1101 #define SPEC_r2_f128 16
1103 /* Return values from translate_one, indicating the state of the TB. */
1105 /* Continue the TB. */
1107 /* We have emitted one or more goto_tb. No fixup required. */
1109 /* We are not using a goto_tb (for whatever reason), but have updated
1110 the PC (for whatever reason), so there's no need to do it again on
1113 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1114 updated the PC for the next instruction to be executed. */
1116 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1117 No following code will be executed. */
1121 typedef enum DisasFacility {
1122 FAC_Z, /* zarch (default) */
1123 FAC_CASS, /* compare and swap and store */
1124 FAC_CASS2, /* compare and swap and store 2*/
1125 FAC_DFP, /* decimal floating point */
1126 FAC_DFPR, /* decimal floating point rounding */
1127 FAC_DO, /* distinct operands */
1128 FAC_EE, /* execute extensions */
1129 FAC_EI, /* extended immediate */
1130 FAC_FPE, /* floating point extension */
1131 FAC_FPSSH, /* floating point support sign handling */
1132 FAC_FPRGR, /* FPR-GR transfer */
1133 FAC_GIE, /* general instructions extension */
1134 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1135 FAC_HW, /* high-word */
1136 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1137 FAC_MIE, /* miscellaneous-instruction-extensions */
1138 FAC_LAT, /* load-and-trap */
1139 FAC_LOC, /* load/store on condition */
1140 FAC_LD, /* long displacement */
1141 FAC_PC, /* population count */
1142 FAC_SCF, /* store clock fast */
1143 FAC_SFLE, /* store facility list extended */
1144 FAC_ILA, /* interlocked access facility 1 */
1150 DisasFacility fac:8;
1155 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1156 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1157 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1158 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1159 void (*help_cout)(DisasContext *, DisasOps *);
1160 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1165 /* ====================================================================== */
1166 /* Miscellaneous helpers, used by several operations. */
1168 static void help_l2_shift(DisasContext *s, DisasFields *f,
1169 DisasOps *o, int mask)
1171 int b2 = get_field(f, b2);
1172 int d2 = get_field(f, d2);
1175 o->in2 = tcg_const_i64(d2 & mask);
1177 o->in2 = get_address(s, 0, b2, d2);
1178 tcg_gen_andi_i64(o->in2, o->in2, mask);
1182 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1184 if (dest == s->next_pc) {
1187 if (use_goto_tb(s, dest)) {
1190 tcg_gen_movi_i64(psw_addr, dest);
1191 tcg_gen_exit_tb((uintptr_t)s->tb);
1192 return EXIT_GOTO_TB;
1194 tcg_gen_movi_i64(psw_addr, dest);
1195 return EXIT_PC_UPDATED;
1199 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1200 bool is_imm, int imm, TCGv_i64 cdest)
1203 uint64_t dest = s->pc + 2 * imm;
1206 /* Take care of the special cases first. */
1207 if (c->cond == TCG_COND_NEVER) {
1212 if (dest == s->next_pc) {
1213 /* Branch to next. */
1217 if (c->cond == TCG_COND_ALWAYS) {
1218 ret = help_goto_direct(s, dest);
1222 if (TCGV_IS_UNUSED_I64(cdest)) {
1223 /* E.g. bcr %r0 -> no branch. */
1227 if (c->cond == TCG_COND_ALWAYS) {
1228 tcg_gen_mov_i64(psw_addr, cdest);
1229 ret = EXIT_PC_UPDATED;
1234 if (use_goto_tb(s, s->next_pc)) {
1235 if (is_imm && use_goto_tb(s, dest)) {
1236 /* Both exits can use goto_tb. */
1239 lab = gen_new_label();
1241 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1243 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1246 /* Branch not taken. */
1248 tcg_gen_movi_i64(psw_addr, s->next_pc);
1249 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1254 tcg_gen_movi_i64(psw_addr, dest);
1255 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1259 /* Fallthru can use goto_tb, but taken branch cannot. */
1260 /* Store taken branch destination before the brcond. This
1261 avoids having to allocate a new local temp to hold it.
1262 We'll overwrite this in the not taken case anyway. */
1264 tcg_gen_mov_i64(psw_addr, cdest);
1267 lab = gen_new_label();
1269 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1271 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1274 /* Branch not taken. */
1277 tcg_gen_movi_i64(psw_addr, s->next_pc);
1278 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1282 tcg_gen_movi_i64(psw_addr, dest);
1284 ret = EXIT_PC_UPDATED;
1287 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1288 Most commonly we're single-stepping or some other condition that
1289 disables all use of goto_tb. Just update the PC and exit. */
1291 TCGv_i64 next = tcg_const_i64(s->next_pc);
1293 cdest = tcg_const_i64(dest);
1297 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1300 TCGv_i32 t0 = tcg_temp_new_i32();
1301 TCGv_i64 t1 = tcg_temp_new_i64();
1302 TCGv_i64 z = tcg_const_i64(0);
1303 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1304 tcg_gen_extu_i32_i64(t1, t0);
1305 tcg_temp_free_i32(t0);
1306 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1307 tcg_temp_free_i64(t1);
1308 tcg_temp_free_i64(z);
1312 tcg_temp_free_i64(cdest);
1314 tcg_temp_free_i64(next);
1316 ret = EXIT_PC_UPDATED;
1324 /* ====================================================================== */
1325 /* The operations. These perform the bulk of the work for any insn,
1326 usually after the operands have been loaded and output initialized. */
1328 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1331 z = tcg_const_i64(0);
1332 n = tcg_temp_new_i64();
1333 tcg_gen_neg_i64(n, o->in2);
1334 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1335 tcg_temp_free_i64(n);
1336 tcg_temp_free_i64(z);
1340 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1342 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1346 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1348 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1352 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1354 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1355 tcg_gen_mov_i64(o->out2, o->in2);
1359 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1361 tcg_gen_add_i64(o->out, o->in1, o->in2);
1365 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1370 tcg_gen_add_i64(o->out, o->in1, o->in2);
1372 /* The carry flag is the msb of CC, therefore the branch mask that would
1373 create that comparison is 3. Feeding the generated comparison to
1374 setcond produces the carry flag that we desire. */
1375 disas_jcc(s, &cmp, 3);
1376 carry = tcg_temp_new_i64();
1378 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1380 TCGv_i32 t = tcg_temp_new_i32();
1381 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1382 tcg_gen_extu_i32_i64(carry, t);
1383 tcg_temp_free_i32(t);
1387 tcg_gen_add_i64(o->out, o->out, carry);
1388 tcg_temp_free_i64(carry);
1392 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1394 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1398 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1400 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1404 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1406 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1407 return_low128(o->out2);
1411 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1413 tcg_gen_and_i64(o->out, o->in1, o->in2);
1417 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1419 int shift = s->insn->data & 0xff;
1420 int size = s->insn->data >> 8;
1421 uint64_t mask = ((1ull << size) - 1) << shift;
1424 tcg_gen_shli_i64(o->in2, o->in2, shift);
1425 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1426 tcg_gen_and_i64(o->out, o->in1, o->in2);
1428 /* Produce the CC from only the bits manipulated. */
1429 tcg_gen_andi_i64(cc_dst, o->out, mask);
1430 set_cc_nz_u64(s, cc_dst);
1434 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1436 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1437 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1438 tcg_gen_mov_i64(psw_addr, o->in2);
1439 return EXIT_PC_UPDATED;
1445 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1447 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1448 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1451 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1453 int m1 = get_field(s->fields, m1);
1454 bool is_imm = have_field(s->fields, i2);
1455 int imm = is_imm ? get_field(s->fields, i2) : 0;
1458 disas_jcc(s, &c, m1);
1459 return help_branch(s, &c, is_imm, imm, o->in2);
1462 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1464 int r1 = get_field(s->fields, r1);
1465 bool is_imm = have_field(s->fields, i2);
1466 int imm = is_imm ? get_field(s->fields, i2) : 0;
1470 c.cond = TCG_COND_NE;
1475 t = tcg_temp_new_i64();
1476 tcg_gen_subi_i64(t, regs[r1], 1);
1477 store_reg32_i64(r1, t);
1478 c.u.s32.a = tcg_temp_new_i32();
1479 c.u.s32.b = tcg_const_i32(0);
1480 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1481 tcg_temp_free_i64(t);
1483 return help_branch(s, &c, is_imm, imm, o->in2);
1486 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1488 int r1 = get_field(s->fields, r1);
1489 int imm = get_field(s->fields, i2);
1493 c.cond = TCG_COND_NE;
1498 t = tcg_temp_new_i64();
1499 tcg_gen_shri_i64(t, regs[r1], 32);
1500 tcg_gen_subi_i64(t, t, 1);
1501 store_reg32h_i64(r1, t);
1502 c.u.s32.a = tcg_temp_new_i32();
1503 c.u.s32.b = tcg_const_i32(0);
1504 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1505 tcg_temp_free_i64(t);
1507 return help_branch(s, &c, 1, imm, o->in2);
1510 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1512 int r1 = get_field(s->fields, r1);
1513 bool is_imm = have_field(s->fields, i2);
1514 int imm = is_imm ? get_field(s->fields, i2) : 0;
1517 c.cond = TCG_COND_NE;
1522 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1523 c.u.s64.a = regs[r1];
1524 c.u.s64.b = tcg_const_i64(0);
1526 return help_branch(s, &c, is_imm, imm, o->in2);
1529 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1531 int r1 = get_field(s->fields, r1);
1532 int r3 = get_field(s->fields, r3);
1533 bool is_imm = have_field(s->fields, i2);
1534 int imm = is_imm ? get_field(s->fields, i2) : 0;
1538 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1543 t = tcg_temp_new_i64();
1544 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1545 c.u.s32.a = tcg_temp_new_i32();
1546 c.u.s32.b = tcg_temp_new_i32();
1547 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1548 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1549 store_reg32_i64(r1, t);
1550 tcg_temp_free_i64(t);
1552 return help_branch(s, &c, is_imm, imm, o->in2);
1555 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1557 int r1 = get_field(s->fields, r1);
1558 int r3 = get_field(s->fields, r3);
1559 bool is_imm = have_field(s->fields, i2);
1560 int imm = is_imm ? get_field(s->fields, i2) : 0;
1563 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1566 if (r1 == (r3 | 1)) {
1567 c.u.s64.b = load_reg(r3 | 1);
1570 c.u.s64.b = regs[r3 | 1];
1574 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1575 c.u.s64.a = regs[r1];
1578 return help_branch(s, &c, is_imm, imm, o->in2);
1581 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1583 int imm, m3 = get_field(s->fields, m3);
1587 c.cond = ltgt_cond[m3];
1588 if (s->insn->data) {
1589 c.cond = tcg_unsigned_cond(c.cond);
1591 c.is_64 = c.g1 = c.g2 = true;
1595 is_imm = have_field(s->fields, i4);
1597 imm = get_field(s->fields, i4);
1600 o->out = get_address(s, 0, get_field(s->fields, b4),
1601 get_field(s->fields, d4));
1604 return help_branch(s, &c, is_imm, imm, o->out);
1607 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1609 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1614 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1616 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1621 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1623 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1628 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1630 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1631 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1632 tcg_temp_free_i32(m3);
1633 gen_set_cc_nz_f32(s, o->in2);
1637 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1639 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1640 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1641 tcg_temp_free_i32(m3);
1642 gen_set_cc_nz_f64(s, o->in2);
1646 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1648 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1649 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1650 tcg_temp_free_i32(m3);
1651 gen_set_cc_nz_f128(s, o->in1, o->in2);
1655 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1657 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1658 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1659 tcg_temp_free_i32(m3);
1660 gen_set_cc_nz_f32(s, o->in2);
1664 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1666 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1667 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1668 tcg_temp_free_i32(m3);
1669 gen_set_cc_nz_f64(s, o->in2);
1673 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1675 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1676 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1677 tcg_temp_free_i32(m3);
1678 gen_set_cc_nz_f128(s, o->in1, o->in2);
1682 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1684 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1685 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1686 tcg_temp_free_i32(m3);
1687 gen_set_cc_nz_f32(s, o->in2);
1691 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1693 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1694 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1695 tcg_temp_free_i32(m3);
1696 gen_set_cc_nz_f64(s, o->in2);
1700 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1702 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1703 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1704 tcg_temp_free_i32(m3);
1705 gen_set_cc_nz_f128(s, o->in1, o->in2);
1709 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1711 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1712 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1713 tcg_temp_free_i32(m3);
1714 gen_set_cc_nz_f32(s, o->in2);
1718 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1720 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1721 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1722 tcg_temp_free_i32(m3);
1723 gen_set_cc_nz_f64(s, o->in2);
1727 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1729 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1730 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1731 tcg_temp_free_i32(m3);
1732 gen_set_cc_nz_f128(s, o->in1, o->in2);
1736 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1738 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1739 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1740 tcg_temp_free_i32(m3);
1744 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1746 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1747 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1748 tcg_temp_free_i32(m3);
1752 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1754 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1755 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1756 tcg_temp_free_i32(m3);
1757 return_low128(o->out2);
1761 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1763 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1764 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1765 tcg_temp_free_i32(m3);
1769 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1771 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1772 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1773 tcg_temp_free_i32(m3);
1777 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1779 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1780 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1781 tcg_temp_free_i32(m3);
1782 return_low128(o->out2);
1786 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1788 int r2 = get_field(s->fields, r2);
1789 TCGv_i64 len = tcg_temp_new_i64();
1791 potential_page_fault(s);
1792 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1794 return_low128(o->out);
1796 tcg_gen_add_i64(regs[r2], regs[r2], len);
1797 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1798 tcg_temp_free_i64(len);
1803 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1805 int l = get_field(s->fields, l1);
1810 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1811 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1814 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1815 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1818 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1819 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1822 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1823 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1826 potential_page_fault(s);
1827 vl = tcg_const_i32(l);
1828 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1829 tcg_temp_free_i32(vl);
1833 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1837 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1839 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1840 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1841 potential_page_fault(s);
1842 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1843 tcg_temp_free_i32(r1);
1844 tcg_temp_free_i32(r3);
1849 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1851 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1852 TCGv_i32 t1 = tcg_temp_new_i32();
1853 tcg_gen_trunc_i64_i32(t1, o->in1);
1854 potential_page_fault(s);
1855 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1857 tcg_temp_free_i32(t1);
1858 tcg_temp_free_i32(m3);
1862 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1864 potential_page_fault(s);
1865 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1867 return_low128(o->in2);
1871 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1873 TCGv_i64 t = tcg_temp_new_i64();
1874 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1875 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1876 tcg_gen_or_i64(o->out, o->out, t);
1877 tcg_temp_free_i64(t);
1881 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1883 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1884 int d2 = get_field(s->fields, d2);
1885 int b2 = get_field(s->fields, b2);
1886 int is_64 = s->insn->data;
1887 TCGv_i64 addr, mem, cc, z;
1889 /* Note that in1 = R3 (new value) and
1890 in2 = (zero-extended) R1 (expected value). */
1892 /* Load the memory into the (temporary) output. While the PoO only talks
1893 about moving the memory to R1 on inequality, if we include equality it
1894 means that R1 is equal to the memory in all conditions. */
1895 addr = get_address(s, 0, b2, d2);
1897 tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1899 tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1902 /* Are the memory and expected values (un)equal? Note that this setcond
1903 produces the output CC value, thus the NE sense of the test. */
1904 cc = tcg_temp_new_i64();
1905 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1907 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1908 Recall that we are allowed to unconditionally issue the store (and
1909 thus any possible write trap), so (re-)store the original contents
1910 of MEM in case of inequality. */
1911 z = tcg_const_i64(0);
1912 mem = tcg_temp_new_i64();
1913 tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1915 tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1917 tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1919 tcg_temp_free_i64(z);
1920 tcg_temp_free_i64(mem);
1921 tcg_temp_free_i64(addr);
1923 /* Store CC back to cc_op. Wait until after the store so that any
1924 exception gets the old cc_op value. */
1925 tcg_gen_trunc_i64_i32(cc_op, cc);
1926 tcg_temp_free_i64(cc);
1931 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1933 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1934 int r1 = get_field(s->fields, r1);
1935 int r3 = get_field(s->fields, r3);
1936 int d2 = get_field(s->fields, d2);
1937 int b2 = get_field(s->fields, b2);
1938 TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
1940 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1942 addrh = get_address(s, 0, b2, d2);
1943 addrl = get_address(s, 0, b2, d2 + 8);
1944 outh = tcg_temp_new_i64();
1945 outl = tcg_temp_new_i64();
1947 tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
1948 tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
1950 /* Fold the double-word compare with arithmetic. */
1951 cc = tcg_temp_new_i64();
1952 z = tcg_temp_new_i64();
1953 tcg_gen_xor_i64(cc, outh, regs[r1]);
1954 tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
1955 tcg_gen_or_i64(cc, cc, z);
1956 tcg_gen_movi_i64(z, 0);
1957 tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
1959 memh = tcg_temp_new_i64();
1960 meml = tcg_temp_new_i64();
1961 tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
1962 tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
1963 tcg_temp_free_i64(z);
1965 tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
1966 tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
1967 tcg_temp_free_i64(memh);
1968 tcg_temp_free_i64(meml);
1969 tcg_temp_free_i64(addrh);
1970 tcg_temp_free_i64(addrl);
1972 /* Save back state now that we've passed all exceptions. */
1973 tcg_gen_mov_i64(regs[r1], outh);
1974 tcg_gen_mov_i64(regs[r1 + 1], outl);
1975 tcg_gen_trunc_i64_i32(cc_op, cc);
1976 tcg_temp_free_i64(outh);
1977 tcg_temp_free_i64(outl);
1978 tcg_temp_free_i64(cc);
1983 #ifndef CONFIG_USER_ONLY
1984 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1986 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1987 check_privileged(s);
1988 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1989 tcg_temp_free_i32(r1);
1995 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1997 TCGv_i64 t1 = tcg_temp_new_i64();
1998 TCGv_i32 t2 = tcg_temp_new_i32();
1999 tcg_gen_trunc_i64_i32(t2, o->in1);
2000 gen_helper_cvd(t1, t2);
2001 tcg_temp_free_i32(t2);
2002 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2003 tcg_temp_free_i64(t1);
2007 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2009 int m3 = get_field(s->fields, m3);
2010 TCGLabel *lab = gen_new_label();
2013 c = tcg_invert_cond(ltgt_cond[m3]);
2014 if (s->insn->data) {
2015 c = tcg_unsigned_cond(c);
2017 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2026 #ifndef CONFIG_USER_ONLY
2027 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2029 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2030 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2031 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2033 check_privileged(s);
2037 gen_helper_diag(cpu_env, r1, r3, func_code);
2039 tcg_temp_free_i32(func_code);
2040 tcg_temp_free_i32(r3);
2041 tcg_temp_free_i32(r1);
2046 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2048 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2049 return_low128(o->out);
2053 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2055 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2056 return_low128(o->out);
2060 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2062 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2063 return_low128(o->out);
2067 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2069 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2070 return_low128(o->out);
2074 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2076 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2080 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2082 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2086 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2088 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2089 return_low128(o->out2);
2093 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2095 int r2 = get_field(s->fields, r2);
2096 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2100 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2102 /* No cache information provided. */
2103 tcg_gen_movi_i64(o->out, -1);
2107 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2109 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2113 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2115 int r1 = get_field(s->fields, r1);
2116 int r2 = get_field(s->fields, r2);
2117 TCGv_i64 t = tcg_temp_new_i64();
2119 /* Note the "subsequently" in the PoO, which implies a defined result
2120 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2121 tcg_gen_shri_i64(t, psw_mask, 32);
2122 store_reg32_i64(r1, t);
2124 store_reg32_i64(r2, psw_mask);
2127 tcg_temp_free_i64(t);
2131 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2133 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2134 tb->flags, (ab)use the tb->cs_base field as the address of
2135 the template in memory, and grab 8 bits of tb->flags/cflags for
2136 the contents of the register. We would then recognize all this
2137 in gen_intermediate_code_internal, generating code for exactly
2138 one instruction. This new TB then gets executed normally.
2140 On the other hand, this seems to be mostly used for modifying
2141 MVC inside of memcpy, which needs a helper call anyway. So
2142 perhaps this doesn't bear thinking about any further. */
2149 tmp = tcg_const_i64(s->next_pc);
2150 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2151 tcg_temp_free_i64(tmp);
2156 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2158 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2159 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2160 tcg_temp_free_i32(m3);
2164 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2166 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2167 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2168 tcg_temp_free_i32(m3);
2172 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2174 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2175 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2176 return_low128(o->out2);
2177 tcg_temp_free_i32(m3);
2181 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2183 /* We'll use the original input for cc computation, since we get to
2184 compare that against 0, which ought to be better than comparing
2185 the real output against 64. It also lets cc_dst be a convenient
2186 temporary during our computation. */
2187 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2189 /* R1 = IN ? CLZ(IN) : 64. */
2190 gen_helper_clz(o->out, o->in2);
2192 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2193 value by 64, which is undefined. But since the shift is 64 iff the
2194 input is zero, we still get the correct result after and'ing. */
2195 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2196 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2197 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2201 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2203 int m3 = get_field(s->fields, m3);
2204 int pos, len, base = s->insn->data;
2205 TCGv_i64 tmp = tcg_temp_new_i64();
2210 /* Effectively a 32-bit load. */
2211 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2218 /* Effectively a 16-bit load. */
2219 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2227 /* Effectively an 8-bit load. */
2228 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2233 pos = base + ctz32(m3) * 8;
2234 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2235 ccm = ((1ull << len) - 1) << pos;
2239 /* This is going to be a sequence of loads and inserts. */
2240 pos = base + 32 - 8;
2244 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2245 tcg_gen_addi_i64(o->in2, o->in2, 1);
2246 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2249 m3 = (m3 << 1) & 0xf;
2255 tcg_gen_movi_i64(tmp, ccm);
2256 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2257 tcg_temp_free_i64(tmp);
2261 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2263 int shift = s->insn->data & 0xff;
2264 int size = s->insn->data >> 8;
2265 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2269 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2274 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2276 t1 = tcg_temp_new_i64();
2277 tcg_gen_shli_i64(t1, psw_mask, 20);
2278 tcg_gen_shri_i64(t1, t1, 36);
2279 tcg_gen_or_i64(o->out, o->out, t1);
2281 tcg_gen_extu_i32_i64(t1, cc_op);
2282 tcg_gen_shli_i64(t1, t1, 28);
2283 tcg_gen_or_i64(o->out, o->out, t1);
2284 tcg_temp_free_i64(t1);
2288 #ifndef CONFIG_USER_ONLY
2289 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2291 check_privileged(s);
2292 gen_helper_ipte(cpu_env, o->in1, o->in2);
2296 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2298 check_privileged(s);
2299 gen_helper_iske(o->out, cpu_env, o->in2);
2304 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2306 gen_helper_ldeb(o->out, cpu_env, o->in2);
2310 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2312 gen_helper_ledb(o->out, cpu_env, o->in2);
2316 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2318 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2322 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2324 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2328 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2330 gen_helper_lxdb(o->out, cpu_env, o->in2);
2331 return_low128(o->out2);
2335 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2337 gen_helper_lxeb(o->out, cpu_env, o->in2);
2338 return_low128(o->out2);
2342 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2344 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2348 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2350 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2354 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2356 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2360 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2362 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2366 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2368 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2372 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2374 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2378 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2380 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2384 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2386 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2390 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2392 TCGLabel *lab = gen_new_label();
2393 store_reg32_i64(get_field(s->fields, r1), o->in2);
2394 /* The value is stored even in case of trap. */
2395 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2401 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2403 TCGLabel *lab = gen_new_label();
2404 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2405 /* The value is stored even in case of trap. */
2406 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2412 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2414 TCGLabel *lab = gen_new_label();
2415 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2416 /* The value is stored even in case of trap. */
2417 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2423 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2425 TCGLabel *lab = gen_new_label();
2426 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2427 /* The value is stored even in case of trap. */
2428 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2434 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2436 TCGLabel *lab = gen_new_label();
2437 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2438 /* The value is stored even in case of trap. */
2439 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2445 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2449 disas_jcc(s, &c, get_field(s->fields, m3));
2452 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2456 TCGv_i32 t32 = tcg_temp_new_i32();
2459 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2462 t = tcg_temp_new_i64();
2463 tcg_gen_extu_i32_i64(t, t32);
2464 tcg_temp_free_i32(t32);
2466 z = tcg_const_i64(0);
2467 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2468 tcg_temp_free_i64(t);
2469 tcg_temp_free_i64(z);
2475 #ifndef CONFIG_USER_ONLY
2476 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2478 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2479 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2480 check_privileged(s);
2481 potential_page_fault(s);
2482 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2483 tcg_temp_free_i32(r1);
2484 tcg_temp_free_i32(r3);
2488 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2490 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2491 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2492 check_privileged(s);
2493 potential_page_fault(s);
2494 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2495 tcg_temp_free_i32(r1);
2496 tcg_temp_free_i32(r3);
2499 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2501 check_privileged(s);
2502 potential_page_fault(s);
2503 gen_helper_lra(o->out, cpu_env, o->in2);
2508 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2512 check_privileged(s);
2514 t1 = tcg_temp_new_i64();
2515 t2 = tcg_temp_new_i64();
2516 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2517 tcg_gen_addi_i64(o->in2, o->in2, 4);
2518 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2519 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2520 tcg_gen_shli_i64(t1, t1, 32);
2521 gen_helper_load_psw(cpu_env, t1, t2);
2522 tcg_temp_free_i64(t1);
2523 tcg_temp_free_i64(t2);
2524 return EXIT_NORETURN;
2527 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2531 check_privileged(s);
2533 t1 = tcg_temp_new_i64();
2534 t2 = tcg_temp_new_i64();
2535 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2536 tcg_gen_addi_i64(o->in2, o->in2, 8);
2537 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2538 gen_helper_load_psw(cpu_env, t1, t2);
2539 tcg_temp_free_i64(t1);
2540 tcg_temp_free_i64(t2);
2541 return EXIT_NORETURN;
2545 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2547 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2548 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2549 potential_page_fault(s);
2550 gen_helper_lam(cpu_env, r1, o->in2, r3);
2551 tcg_temp_free_i32(r1);
2552 tcg_temp_free_i32(r3);
2556 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2558 int r1 = get_field(s->fields, r1);
2559 int r3 = get_field(s->fields, r3);
2562 /* Only one register to read. */
2563 t1 = tcg_temp_new_i64();
2564 if (unlikely(r1 == r3)) {
2565 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2566 store_reg32_i64(r1, t1);
2571 /* First load the values of the first and last registers to trigger
2572 possible page faults. */
2573 t2 = tcg_temp_new_i64();
2574 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2575 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2576 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2577 store_reg32_i64(r1, t1);
2578 store_reg32_i64(r3, t2);
2580 /* Only two registers to read. */
2581 if (((r1 + 1) & 15) == r3) {
2587 /* Then load the remaining registers. Page fault can't occur. */
2589 tcg_gen_movi_i64(t2, 4);
2592 tcg_gen_add_i64(o->in2, o->in2, t2);
2593 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2594 store_reg32_i64(r1, t1);
2602 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2604 int r1 = get_field(s->fields, r1);
2605 int r3 = get_field(s->fields, r3);
2608 /* Only one register to read. */
2609 t1 = tcg_temp_new_i64();
2610 if (unlikely(r1 == r3)) {
2611 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2612 store_reg32h_i64(r1, t1);
2617 /* First load the values of the first and last registers to trigger
2618 possible page faults. */
2619 t2 = tcg_temp_new_i64();
2620 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2621 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2622 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2623 store_reg32h_i64(r1, t1);
2624 store_reg32h_i64(r3, t2);
2626 /* Only two registers to read. */
2627 if (((r1 + 1) & 15) == r3) {
2633 /* Then load the remaining registers. Page fault can't occur. */
2635 tcg_gen_movi_i64(t2, 4);
2638 tcg_gen_add_i64(o->in2, o->in2, t2);
2639 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2640 store_reg32h_i64(r1, t1);
2648 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2650 int r1 = get_field(s->fields, r1);
2651 int r3 = get_field(s->fields, r3);
2654 /* Only one register to read. */
2655 if (unlikely(r1 == r3)) {
2656 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2660 /* First load the values of the first and last registers to trigger
2661 possible page faults. */
2662 t1 = tcg_temp_new_i64();
2663 t2 = tcg_temp_new_i64();
2664 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2665 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2666 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2667 tcg_gen_mov_i64(regs[r1], t1);
2670 /* Only two registers to read. */
2671 if (((r1 + 1) & 15) == r3) {
2676 /* Then load the remaining registers. Page fault can't occur. */
2678 tcg_gen_movi_i64(t1, 8);
2681 tcg_gen_add_i64(o->in2, o->in2, t1);
2682 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2689 #ifndef CONFIG_USER_ONLY
2690 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2692 check_privileged(s);
2693 potential_page_fault(s);
2694 gen_helper_lura(o->out, cpu_env, o->in2);
2698 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2700 check_privileged(s);
2701 potential_page_fault(s);
2702 gen_helper_lurag(o->out, cpu_env, o->in2);
2707 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2710 o->g_out = o->g_in2;
2711 TCGV_UNUSED_I64(o->in2);
2716 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2718 int b2 = get_field(s->fields, b2);
2719 TCGv ar1 = tcg_temp_new_i64();
2722 o->g_out = o->g_in2;
2723 TCGV_UNUSED_I64(o->in2);
2726 switch (s->tb->flags & FLAG_MASK_ASC) {
2727 case PSW_ASC_PRIMARY >> 32:
2728 tcg_gen_movi_i64(ar1, 0);
2730 case PSW_ASC_ACCREG >> 32:
2731 tcg_gen_movi_i64(ar1, 1);
2733 case PSW_ASC_SECONDARY >> 32:
2735 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
2737 tcg_gen_movi_i64(ar1, 0);
2740 case PSW_ASC_HOME >> 32:
2741 tcg_gen_movi_i64(ar1, 2);
2745 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
2746 tcg_temp_free_i64(ar1);
2751 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2755 o->g_out = o->g_in1;
2756 o->g_out2 = o->g_in2;
2757 TCGV_UNUSED_I64(o->in1);
2758 TCGV_UNUSED_I64(o->in2);
2759 o->g_in1 = o->g_in2 = false;
2763 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2765 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2766 potential_page_fault(s);
2767 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2768 tcg_temp_free_i32(l);
2772 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2774 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2775 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2776 potential_page_fault(s);
2777 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2778 tcg_temp_free_i32(r1);
2779 tcg_temp_free_i32(r2);
2784 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2786 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2787 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2788 potential_page_fault(s);
2789 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2790 tcg_temp_free_i32(r1);
2791 tcg_temp_free_i32(r3);
2796 #ifndef CONFIG_USER_ONLY
2797 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2799 int r1 = get_field(s->fields, l1);
2800 check_privileged(s);
2801 potential_page_fault(s);
2802 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2807 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2809 int r1 = get_field(s->fields, l1);
2810 check_privileged(s);
2811 potential_page_fault(s);
2812 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2818 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2820 potential_page_fault(s);
2821 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2826 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2828 potential_page_fault(s);
2829 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2831 return_low128(o->in2);
2835 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2837 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2841 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2843 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2847 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2849 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2853 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2855 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2859 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2861 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2865 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2867 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2868 return_low128(o->out2);
2872 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2874 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2875 return_low128(o->out2);
2879 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2881 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2882 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2883 tcg_temp_free_i64(r3);
2887 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2889 int r3 = get_field(s->fields, r3);
2890 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2894 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2896 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2897 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2898 tcg_temp_free_i64(r3);
2902 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2904 int r3 = get_field(s->fields, r3);
2905 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2909 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2912 z = tcg_const_i64(0);
2913 n = tcg_temp_new_i64();
2914 tcg_gen_neg_i64(n, o->in2);
2915 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
2916 tcg_temp_free_i64(n);
2917 tcg_temp_free_i64(z);
2921 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2923 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2927 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2929 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2933 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2935 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2936 tcg_gen_mov_i64(o->out2, o->in2);
2940 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2942 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2943 potential_page_fault(s);
2944 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2945 tcg_temp_free_i32(l);
2950 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2952 tcg_gen_neg_i64(o->out, o->in2);
2956 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2958 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2962 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2964 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2968 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2970 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2971 tcg_gen_mov_i64(o->out2, o->in2);
2975 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2977 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2978 potential_page_fault(s);
2979 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2980 tcg_temp_free_i32(l);
2985 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2987 tcg_gen_or_i64(o->out, o->in1, o->in2);
2991 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2993 int shift = s->insn->data & 0xff;
2994 int size = s->insn->data >> 8;
2995 uint64_t mask = ((1ull << size) - 1) << shift;
2998 tcg_gen_shli_i64(o->in2, o->in2, shift);
2999 tcg_gen_or_i64(o->out, o->in1, o->in2);
3001 /* Produce the CC from only the bits manipulated. */
3002 tcg_gen_andi_i64(cc_dst, o->out, mask);
3003 set_cc_nz_u64(s, cc_dst);
3007 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3009 gen_helper_popcnt(o->out, o->in2);
3013 #ifndef CONFIG_USER_ONLY
3014 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3016 check_privileged(s);
3017 gen_helper_ptlb(cpu_env);
3022 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3024 int i3 = get_field(s->fields, i3);
3025 int i4 = get_field(s->fields, i4);
3026 int i5 = get_field(s->fields, i5);
3027 int do_zero = i4 & 0x80;
3028 uint64_t mask, imask, pmask;
3031 /* Adjust the arguments for the specific insn. */
3032 switch (s->fields->op2) {
3033 case 0x55: /* risbg */
3038 case 0x5d: /* risbhg */
3041 pmask = 0xffffffff00000000ull;
3043 case 0x51: /* risblg */
3046 pmask = 0x00000000ffffffffull;
3052 /* MASK is the set of bits to be inserted from R2.
3053 Take care for I3/I4 wraparound. */
3056 mask ^= pmask >> i4 >> 1;
3058 mask |= ~(pmask >> i4 >> 1);
3062 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3063 insns, we need to keep the other half of the register. */
3064 imask = ~mask | ~pmask;
3066 if (s->fields->op2 == 0x55) {
3073 /* In some cases we can implement this with deposit, which can be more
3074 efficient on some hosts. */
3075 if (~mask == imask && i3 <= i4) {
3076 if (s->fields->op2 == 0x5d) {
3079 /* Note that we rotate the bits to be inserted to the lsb, not to
3080 the position as described in the PoO. */
3083 rot = (i5 - pos) & 63;
3089 /* Rotate the input as necessary. */
3090 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3092 /* Insert the selected bits into the output. */
3094 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3095 } else if (imask == 0) {
3096 tcg_gen_andi_i64(o->out, o->in2, mask);
3098 tcg_gen_andi_i64(o->in2, o->in2, mask);
3099 tcg_gen_andi_i64(o->out, o->out, imask);
3100 tcg_gen_or_i64(o->out, o->out, o->in2);
3105 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3107 int i3 = get_field(s->fields, i3);
3108 int i4 = get_field(s->fields, i4);
3109 int i5 = get_field(s->fields, i5);
3112 /* If this is a test-only form, arrange to discard the result. */
3114 o->out = tcg_temp_new_i64();
3122 /* MASK is the set of bits to be operated on from R2.
3123 Take care for I3/I4 wraparound. */
3126 mask ^= ~0ull >> i4 >> 1;
3128 mask |= ~(~0ull >> i4 >> 1);
3131 /* Rotate the input as necessary. */
3132 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3135 switch (s->fields->op2) {
3136 case 0x55: /* AND */
3137 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3138 tcg_gen_and_i64(o->out, o->out, o->in2);
3141 tcg_gen_andi_i64(o->in2, o->in2, mask);
3142 tcg_gen_or_i64(o->out, o->out, o->in2);
3144 case 0x57: /* XOR */
3145 tcg_gen_andi_i64(o->in2, o->in2, mask);
3146 tcg_gen_xor_i64(o->out, o->out, o->in2);
3153 tcg_gen_andi_i64(cc_dst, o->out, mask);
3154 set_cc_nz_u64(s, cc_dst);
3158 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3160 tcg_gen_bswap16_i64(o->out, o->in2);
3164 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3166 tcg_gen_bswap32_i64(o->out, o->in2);
3170 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3172 tcg_gen_bswap64_i64(o->out, o->in2);
3176 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3178 TCGv_i32 t1 = tcg_temp_new_i32();
3179 TCGv_i32 t2 = tcg_temp_new_i32();
3180 TCGv_i32 to = tcg_temp_new_i32();
3181 tcg_gen_trunc_i64_i32(t1, o->in1);
3182 tcg_gen_trunc_i64_i32(t2, o->in2);
3183 tcg_gen_rotl_i32(to, t1, t2);
3184 tcg_gen_extu_i32_i64(o->out, to);
3185 tcg_temp_free_i32(t1);
3186 tcg_temp_free_i32(t2);
3187 tcg_temp_free_i32(to);
3191 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3193 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3197 #ifndef CONFIG_USER_ONLY
3198 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3200 check_privileged(s);
3201 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3206 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3208 check_privileged(s);
3209 gen_helper_sacf(cpu_env, o->in2);
3210 /* Addressing mode has changed, so end the block. */
3211 return EXIT_PC_STALE;
3215 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3217 int sam = s->insn->data;
3233 /* Bizarre but true, we check the address of the current insn for the
3234 specification exception, not the next to be executed. Thus the PoO
3235 documents that Bad Things Happen two bytes before the end. */
3236 if (s->pc & ~mask) {
3237 gen_program_exception(s, PGM_SPECIFICATION);
3238 return EXIT_NORETURN;
3242 tsam = tcg_const_i64(sam);
3243 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3244 tcg_temp_free_i64(tsam);
3246 /* Always exit the TB, since we (may have) changed execution mode. */
3247 return EXIT_PC_STALE;
3250 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3252 int r1 = get_field(s->fields, r1);
3253 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3257 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3259 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3263 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3265 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3269 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3271 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3272 return_low128(o->out2);
3276 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3278 gen_helper_sqeb(o->out, cpu_env, o->in2);
3282 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3284 gen_helper_sqdb(o->out, cpu_env, o->in2);
3288 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3290 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3291 return_low128(o->out2);
3295 #ifndef CONFIG_USER_ONLY
3296 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3298 check_privileged(s);
3299 potential_page_fault(s);
3300 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3305 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3307 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3308 check_privileged(s);
3309 potential_page_fault(s);
3310 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3311 tcg_temp_free_i32(r1);
3316 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3323 disas_jcc(s, &c, get_field(s->fields, m3));
3325 /* We want to store when the condition is fulfilled, so branch
3326 out when it's not */
3327 c.cond = tcg_invert_cond(c.cond);
3329 lab = gen_new_label();
3331 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3333 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3337 r1 = get_field(s->fields, r1);
3338 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3339 if (s->insn->data) {
3340 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3342 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3344 tcg_temp_free_i64(a);
3350 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3352 uint64_t sign = 1ull << s->insn->data;
3353 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3354 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3355 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3356 /* The arithmetic left shift is curious in that it does not affect
3357 the sign bit. Copy that over from the source unchanged. */
3358 tcg_gen_andi_i64(o->out, o->out, ~sign);
3359 tcg_gen_andi_i64(o->in1, o->in1, sign);
3360 tcg_gen_or_i64(o->out, o->out, o->in1);
3364 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3366 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3370 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3372 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3376 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3378 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3382 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3384 gen_helper_sfpc(cpu_env, o->in2);
3388 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3390 gen_helper_sfas(cpu_env, o->in2);
3394 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3396 int b2 = get_field(s->fields, b2);
3397 int d2 = get_field(s->fields, d2);
3398 TCGv_i64 t1 = tcg_temp_new_i64();
3399 TCGv_i64 t2 = tcg_temp_new_i64();
3402 switch (s->fields->op2) {
3403 case 0x99: /* SRNM */
3406 case 0xb8: /* SRNMB */
3409 case 0xb9: /* SRNMT */
3415 mask = (1 << len) - 1;
3417 /* Insert the value into the appropriate field of the FPC. */
3419 tcg_gen_movi_i64(t1, d2 & mask);
3421 tcg_gen_addi_i64(t1, regs[b2], d2);
3422 tcg_gen_andi_i64(t1, t1, mask);
3424 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3425 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3426 tcg_temp_free_i64(t1);
3428 /* Then install the new FPC to set the rounding mode in fpu_status. */
3429 gen_helper_sfpc(cpu_env, t2);
3430 tcg_temp_free_i64(t2);
3434 #ifndef CONFIG_USER_ONLY
3435 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3437 check_privileged(s);
3438 tcg_gen_shri_i64(o->in2, o->in2, 4);
3439 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3443 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3445 check_privileged(s);
3446 gen_helper_sske(cpu_env, o->in1, o->in2);
3450 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3452 check_privileged(s);
3453 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3457 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3459 check_privileged(s);
3460 /* ??? Surely cpu address != cpu number. In any case the previous
3461 version of this stored more than the required half-word, so it
3462 is unlikely this has ever been tested. */
3463 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3467 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3469 gen_helper_stck(o->out, cpu_env);
3470 /* ??? We don't implement clock states. */
3471 gen_op_movi_cc(s, 0);
3475 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3477 TCGv_i64 c1 = tcg_temp_new_i64();
3478 TCGv_i64 c2 = tcg_temp_new_i64();
3479 gen_helper_stck(c1, cpu_env);
3480 /* Shift the 64-bit value into its place as a zero-extended
3481 104-bit value. Note that "bit positions 64-103 are always
3482 non-zero so that they compare differently to STCK"; we set
3483 the least significant bit to 1. */
3484 tcg_gen_shli_i64(c2, c1, 56);
3485 tcg_gen_shri_i64(c1, c1, 8);
3486 tcg_gen_ori_i64(c2, c2, 0x10000);
3487 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3488 tcg_gen_addi_i64(o->in2, o->in2, 8);
3489 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3490 tcg_temp_free_i64(c1);
3491 tcg_temp_free_i64(c2);
3492 /* ??? We don't implement clock states. */
3493 gen_op_movi_cc(s, 0);
3497 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3499 check_privileged(s);
3500 gen_helper_sckc(cpu_env, o->in2);
3504 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3506 check_privileged(s);
3507 gen_helper_stckc(o->out, cpu_env);
3511 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3513 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3514 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3515 check_privileged(s);
3516 potential_page_fault(s);
3517 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3518 tcg_temp_free_i32(r1);
3519 tcg_temp_free_i32(r3);
3523 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3525 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3526 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3527 check_privileged(s);
3528 potential_page_fault(s);
3529 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3530 tcg_temp_free_i32(r1);
3531 tcg_temp_free_i32(r3);
3535 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3537 TCGv_i64 t1 = tcg_temp_new_i64();
3539 check_privileged(s);
3540 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3541 tcg_gen_ld32u_i64(t1, cpu_env, offsetof(CPUS390XState, machine_type));
3542 tcg_gen_deposit_i64(o->out, o->out, t1, 32, 32);
3543 tcg_temp_free_i64(t1);
3548 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3550 check_privileged(s);
3551 gen_helper_spt(cpu_env, o->in2);
3555 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3558 /* We really ought to have more complete indication of facilities
3559 that we implement. Address this when STFLE is implemented. */
3560 check_privileged(s);
3561 f = tcg_const_i64(0xc0000000);
3562 a = tcg_const_i64(200);
3563 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3564 tcg_temp_free_i64(f);
3565 tcg_temp_free_i64(a);
3569 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3571 check_privileged(s);
3572 gen_helper_stpt(o->out, cpu_env);
3576 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3578 check_privileged(s);
3579 potential_page_fault(s);
3580 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3585 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3587 check_privileged(s);
3588 gen_helper_spx(cpu_env, o->in2);
3592 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
3594 check_privileged(s);
3595 potential_page_fault(s);
3596 gen_helper_xsch(cpu_env, regs[1]);
3601 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
3603 check_privileged(s);
3604 potential_page_fault(s);
3605 gen_helper_csch(cpu_env, regs[1]);
3610 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
3612 check_privileged(s);
3613 potential_page_fault(s);
3614 gen_helper_hsch(cpu_env, regs[1]);
3619 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
3621 check_privileged(s);
3622 potential_page_fault(s);
3623 gen_helper_msch(cpu_env, regs[1], o->in2);
3628 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
3630 check_privileged(s);
3631 potential_page_fault(s);
3632 gen_helper_rchp(cpu_env, regs[1]);
3637 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
3639 check_privileged(s);
3640 potential_page_fault(s);
3641 gen_helper_rsch(cpu_env, regs[1]);
3646 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
3648 check_privileged(s);
3649 potential_page_fault(s);
3650 gen_helper_ssch(cpu_env, regs[1], o->in2);
3655 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
3657 check_privileged(s);
3658 potential_page_fault(s);
3659 gen_helper_stsch(cpu_env, regs[1], o->in2);
3664 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
3666 check_privileged(s);
3667 potential_page_fault(s);
3668 gen_helper_tsch(cpu_env, regs[1], o->in2);
3673 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
3675 check_privileged(s);
3676 potential_page_fault(s);
3677 gen_helper_chsc(cpu_env, o->in2);
3682 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3684 check_privileged(s);
3685 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3686 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3690 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3692 uint64_t i2 = get_field(s->fields, i2);
3695 check_privileged(s);
3697 /* It is important to do what the instruction name says: STORE THEN.
3698 If we let the output hook perform the store then if we fault and
3699 restart, we'll have the wrong SYSTEM MASK in place. */
3700 t = tcg_temp_new_i64();
3701 tcg_gen_shri_i64(t, psw_mask, 56);
3702 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3703 tcg_temp_free_i64(t);
3705 if (s->fields->op == 0xac) {
3706 tcg_gen_andi_i64(psw_mask, psw_mask,
3707 (i2 << 56) | 0x00ffffffffffffffull);
3709 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3714 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3716 check_privileged(s);
3717 potential_page_fault(s);
3718 gen_helper_stura(cpu_env, o->in2, o->in1);
3722 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
3724 check_privileged(s);
3725 potential_page_fault(s);
3726 gen_helper_sturg(cpu_env, o->in2, o->in1);
3731 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3733 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3737 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3739 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3743 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3745 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3749 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3751 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3755 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3757 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3758 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3759 potential_page_fault(s);
3760 gen_helper_stam(cpu_env, r1, o->in2, r3);
3761 tcg_temp_free_i32(r1);
3762 tcg_temp_free_i32(r3);
3766 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3768 int m3 = get_field(s->fields, m3);
3769 int pos, base = s->insn->data;
3770 TCGv_i64 tmp = tcg_temp_new_i64();
3772 pos = base + ctz32(m3) * 8;
3775 /* Effectively a 32-bit store. */
3776 tcg_gen_shri_i64(tmp, o->in1, pos);
3777 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3783 /* Effectively a 16-bit store. */
3784 tcg_gen_shri_i64(tmp, o->in1, pos);
3785 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3792 /* Effectively an 8-bit store. */
3793 tcg_gen_shri_i64(tmp, o->in1, pos);
3794 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3798 /* This is going to be a sequence of shifts and stores. */
3799 pos = base + 32 - 8;
3802 tcg_gen_shri_i64(tmp, o->in1, pos);
3803 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3804 tcg_gen_addi_i64(o->in2, o->in2, 1);
3806 m3 = (m3 << 1) & 0xf;
3811 tcg_temp_free_i64(tmp);
3815 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3817 int r1 = get_field(s->fields, r1);
3818 int r3 = get_field(s->fields, r3);
3819 int size = s->insn->data;
3820 TCGv_i64 tsize = tcg_const_i64(size);
3824 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3826 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3831 tcg_gen_add_i64(o->in2, o->in2, tsize);
3835 tcg_temp_free_i64(tsize);
3839 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3841 int r1 = get_field(s->fields, r1);
3842 int r3 = get_field(s->fields, r3);
3843 TCGv_i64 t = tcg_temp_new_i64();
3844 TCGv_i64 t4 = tcg_const_i64(4);
3845 TCGv_i64 t32 = tcg_const_i64(32);
3848 tcg_gen_shl_i64(t, regs[r1], t32);
3849 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3853 tcg_gen_add_i64(o->in2, o->in2, t4);
3857 tcg_temp_free_i64(t);
3858 tcg_temp_free_i64(t4);
3859 tcg_temp_free_i64(t32);
3863 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3865 potential_page_fault(s);
3866 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3868 return_low128(o->in2);
3872 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3874 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3878 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3883 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3885 /* The !borrow flag is the msb of CC. Since we want the inverse of
3886 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3887 disas_jcc(s, &cmp, 8 | 4);
3888 borrow = tcg_temp_new_i64();
3890 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
3892 TCGv_i32 t = tcg_temp_new_i32();
3893 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
3894 tcg_gen_extu_i32_i64(borrow, t);
3895 tcg_temp_free_i32(t);
3899 tcg_gen_sub_i64(o->out, o->out, borrow);
3900 tcg_temp_free_i64(borrow);
3904 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3911 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3912 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3913 tcg_temp_free_i32(t);
3915 t = tcg_const_i32(s->next_pc - s->pc);
3916 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3917 tcg_temp_free_i32(t);
3919 gen_exception(EXCP_SVC);
3920 return EXIT_NORETURN;
3923 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3925 gen_helper_tceb(cc_op, o->in1, o->in2);
3930 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3932 gen_helper_tcdb(cc_op, o->in1, o->in2);
3937 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3939 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3944 #ifndef CONFIG_USER_ONLY
3945 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3947 potential_page_fault(s);
3948 gen_helper_tprot(cc_op, o->addr1, o->in2);
3954 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3956 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3957 potential_page_fault(s);
3958 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3959 tcg_temp_free_i32(l);
3964 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
3966 potential_page_fault(s);
3967 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
3968 return_low128(o->out2);
3973 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
3975 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3976 potential_page_fault(s);
3977 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
3978 tcg_temp_free_i32(l);
3983 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3985 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3986 potential_page_fault(s);
3987 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3988 tcg_temp_free_i32(l);
3992 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3994 int d1 = get_field(s->fields, d1);
3995 int d2 = get_field(s->fields, d2);
3996 int b1 = get_field(s->fields, b1);
3997 int b2 = get_field(s->fields, b2);
3998 int l = get_field(s->fields, l1);
4001 o->addr1 = get_address(s, 0, b1, d1);
4003 /* If the addresses are identical, this is a store/memset of zero. */
4004 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4005 o->in2 = tcg_const_i64(0);
4009 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4012 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4016 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4019 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4023 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4026 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4030 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4032 gen_op_movi_cc(s, 0);
4036 /* But in general we'll defer to a helper. */
4037 o->in2 = get_address(s, 0, b2, d2);
4038 t32 = tcg_const_i32(l);
4039 potential_page_fault(s);
4040 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4041 tcg_temp_free_i32(t32);
4046 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4048 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4052 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4054 int shift = s->insn->data & 0xff;
4055 int size = s->insn->data >> 8;
4056 uint64_t mask = ((1ull << size) - 1) << shift;
4059 tcg_gen_shli_i64(o->in2, o->in2, shift);
4060 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4062 /* Produce the CC from only the bits manipulated. */
4063 tcg_gen_andi_i64(cc_dst, o->out, mask);
4064 set_cc_nz_u64(s, cc_dst);
4068 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4070 o->out = tcg_const_i64(0);
4074 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4076 o->out = tcg_const_i64(0);
4082 /* ====================================================================== */
4083 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4084 the original inputs), update the various cc data structures in order to
4085 be able to compute the new condition code. */
4087 static void cout_abs32(DisasContext *s, DisasOps *o)
4089 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4092 static void cout_abs64(DisasContext *s, DisasOps *o)
4094 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4097 static void cout_adds32(DisasContext *s, DisasOps *o)
4099 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4102 static void cout_adds64(DisasContext *s, DisasOps *o)
4104 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4107 static void cout_addu32(DisasContext *s, DisasOps *o)
4109 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4112 static void cout_addu64(DisasContext *s, DisasOps *o)
4114 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4117 static void cout_addc32(DisasContext *s, DisasOps *o)
4119 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4122 static void cout_addc64(DisasContext *s, DisasOps *o)
4124 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4127 static void cout_cmps32(DisasContext *s, DisasOps *o)
4129 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4132 static void cout_cmps64(DisasContext *s, DisasOps *o)
4134 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4137 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4139 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4142 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4144 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4147 static void cout_f32(DisasContext *s, DisasOps *o)
4149 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4152 static void cout_f64(DisasContext *s, DisasOps *o)
4154 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4157 static void cout_f128(DisasContext *s, DisasOps *o)
4159 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4162 static void cout_nabs32(DisasContext *s, DisasOps *o)
4164 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4167 static void cout_nabs64(DisasContext *s, DisasOps *o)
4169 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4172 static void cout_neg32(DisasContext *s, DisasOps *o)
4174 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4177 static void cout_neg64(DisasContext *s, DisasOps *o)
4179 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4182 static void cout_nz32(DisasContext *s, DisasOps *o)
4184 tcg_gen_ext32u_i64(cc_dst, o->out);
4185 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4188 static void cout_nz64(DisasContext *s, DisasOps *o)
4190 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4193 static void cout_s32(DisasContext *s, DisasOps *o)
4195 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4198 static void cout_s64(DisasContext *s, DisasOps *o)
4200 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4203 static void cout_subs32(DisasContext *s, DisasOps *o)
4205 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4208 static void cout_subs64(DisasContext *s, DisasOps *o)
4210 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4213 static void cout_subu32(DisasContext *s, DisasOps *o)
4215 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4218 static void cout_subu64(DisasContext *s, DisasOps *o)
4220 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4223 static void cout_subb32(DisasContext *s, DisasOps *o)
4225 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4228 static void cout_subb64(DisasContext *s, DisasOps *o)
4230 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4233 static void cout_tm32(DisasContext *s, DisasOps *o)
4235 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4238 static void cout_tm64(DisasContext *s, DisasOps *o)
4240 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4243 /* ====================================================================== */
4244 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4245 with the TCG register to which we will write. Used in combination with
4246 the "wout" generators, in some cases we need a new temporary, and in
4247 some cases we can write to a TCG global. */
4249 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4251 o->out = tcg_temp_new_i64();
4253 #define SPEC_prep_new 0
4255 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4257 o->out = tcg_temp_new_i64();
4258 o->out2 = tcg_temp_new_i64();
4260 #define SPEC_prep_new_P 0
4262 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4264 o->out = regs[get_field(f, r1)];
4267 #define SPEC_prep_r1 0
4269 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4271 int r1 = get_field(f, r1);
4273 o->out2 = regs[r1 + 1];
4274 o->g_out = o->g_out2 = true;
4276 #define SPEC_prep_r1_P SPEC_r1_even
4278 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4280 o->out = fregs[get_field(f, r1)];
4283 #define SPEC_prep_f1 0
4285 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4287 int r1 = get_field(f, r1);
4289 o->out2 = fregs[r1 + 2];
4290 o->g_out = o->g_out2 = true;
4292 #define SPEC_prep_x1 SPEC_r1_f128
4294 /* ====================================================================== */
4295 /* The "Write OUTput" generators. These generally perform some non-trivial
4296 copy of data to TCG globals, or to main memory. The trivial cases are
4297 generally handled by having a "prep" generator install the TCG global
4298 as the destination of the operation. */
4300 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4302 store_reg(get_field(f, r1), o->out);
4304 #define SPEC_wout_r1 0
4306 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4308 int r1 = get_field(f, r1);
4309 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4311 #define SPEC_wout_r1_8 0
4313 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4315 int r1 = get_field(f, r1);
4316 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4318 #define SPEC_wout_r1_16 0
4320 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4322 store_reg32_i64(get_field(f, r1), o->out);
4324 #define SPEC_wout_r1_32 0
4326 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4328 store_reg32h_i64(get_field(f, r1), o->out);
4330 #define SPEC_wout_r1_32h 0
4332 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4334 int r1 = get_field(f, r1);
4335 store_reg32_i64(r1, o->out);
4336 store_reg32_i64(r1 + 1, o->out2);
4338 #define SPEC_wout_r1_P32 SPEC_r1_even
4340 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4342 int r1 = get_field(f, r1);
4343 store_reg32_i64(r1 + 1, o->out);
4344 tcg_gen_shri_i64(o->out, o->out, 32);
4345 store_reg32_i64(r1, o->out);
4347 #define SPEC_wout_r1_D32 SPEC_r1_even
4349 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4351 store_freg32_i64(get_field(f, r1), o->out);
4353 #define SPEC_wout_e1 0
4355 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4357 store_freg(get_field(f, r1), o->out);
4359 #define SPEC_wout_f1 0
4361 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4363 int f1 = get_field(s->fields, r1);
4364 store_freg(f1, o->out);
4365 store_freg(f1 + 2, o->out2);
4367 #define SPEC_wout_x1 SPEC_r1_f128
4369 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4371 if (get_field(f, r1) != get_field(f, r2)) {
4372 store_reg32_i64(get_field(f, r1), o->out);
4375 #define SPEC_wout_cond_r1r2_32 0
4377 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4379 if (get_field(f, r1) != get_field(f, r2)) {
4380 store_freg32_i64(get_field(f, r1), o->out);
4383 #define SPEC_wout_cond_e1e2 0
4385 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4387 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4389 #define SPEC_wout_m1_8 0
4391 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4393 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4395 #define SPEC_wout_m1_16 0
4397 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4399 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4401 #define SPEC_wout_m1_32 0
4403 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4405 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4407 #define SPEC_wout_m1_64 0
4409 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4411 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4413 #define SPEC_wout_m2_32 0
4415 static void wout_m2_32_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4417 /* XXX release reservation */
4418 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4419 store_reg32_i64(get_field(f, r1), o->in2);
4421 #define SPEC_wout_m2_32_r1_atomic 0
4423 static void wout_m2_64_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4425 /* XXX release reservation */
4426 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4427 store_reg(get_field(f, r1), o->in2);
4429 #define SPEC_wout_m2_64_r1_atomic 0
4431 /* ====================================================================== */
4432 /* The "INput 1" generators. These load the first operand to an insn. */
4434 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4436 o->in1 = load_reg(get_field(f, r1));
4438 #define SPEC_in1_r1 0
4440 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4442 o->in1 = regs[get_field(f, r1)];
4445 #define SPEC_in1_r1_o 0
4447 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4449 o->in1 = tcg_temp_new_i64();
4450 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4452 #define SPEC_in1_r1_32s 0
4454 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4456 o->in1 = tcg_temp_new_i64();
4457 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4459 #define SPEC_in1_r1_32u 0
4461 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4463 o->in1 = tcg_temp_new_i64();
4464 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4466 #define SPEC_in1_r1_sr32 0
4468 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4470 o->in1 = load_reg(get_field(f, r1) + 1);
4472 #define SPEC_in1_r1p1 SPEC_r1_even
4474 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4476 o->in1 = tcg_temp_new_i64();
4477 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4479 #define SPEC_in1_r1p1_32s SPEC_r1_even
4481 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4483 o->in1 = tcg_temp_new_i64();
4484 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4486 #define SPEC_in1_r1p1_32u SPEC_r1_even
4488 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4490 int r1 = get_field(f, r1);
4491 o->in1 = tcg_temp_new_i64();
4492 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4494 #define SPEC_in1_r1_D32 SPEC_r1_even
4496 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4498 o->in1 = load_reg(get_field(f, r2));
4500 #define SPEC_in1_r2 0
4502 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4504 o->in1 = tcg_temp_new_i64();
4505 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
4507 #define SPEC_in1_r2_sr32 0
4509 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4511 o->in1 = load_reg(get_field(f, r3));
4513 #define SPEC_in1_r3 0
4515 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4517 o->in1 = regs[get_field(f, r3)];
4520 #define SPEC_in1_r3_o 0
4522 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4524 o->in1 = tcg_temp_new_i64();
4525 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4527 #define SPEC_in1_r3_32s 0
4529 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4531 o->in1 = tcg_temp_new_i64();
4532 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4534 #define SPEC_in1_r3_32u 0
4536 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4538 int r3 = get_field(f, r3);
4539 o->in1 = tcg_temp_new_i64();
4540 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4542 #define SPEC_in1_r3_D32 SPEC_r3_even
4544 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4546 o->in1 = load_freg32_i64(get_field(f, r1));
4548 #define SPEC_in1_e1 0
4550 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4552 o->in1 = fregs[get_field(f, r1)];
4555 #define SPEC_in1_f1_o 0
4557 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4559 int r1 = get_field(f, r1);
4561 o->out2 = fregs[r1 + 2];
4562 o->g_out = o->g_out2 = true;
4564 #define SPEC_in1_x1_o SPEC_r1_f128
4566 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4568 o->in1 = fregs[get_field(f, r3)];
4571 #define SPEC_in1_f3_o 0
4573 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4575 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4577 #define SPEC_in1_la1 0
4579 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4581 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4582 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4584 #define SPEC_in1_la2 0
4586 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4589 o->in1 = tcg_temp_new_i64();
4590 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4592 #define SPEC_in1_m1_8u 0
4594 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4597 o->in1 = tcg_temp_new_i64();
4598 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4600 #define SPEC_in1_m1_16s 0
4602 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4605 o->in1 = tcg_temp_new_i64();
4606 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4608 #define SPEC_in1_m1_16u 0
4610 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4613 o->in1 = tcg_temp_new_i64();
4614 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4616 #define SPEC_in1_m1_32s 0
4618 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4621 o->in1 = tcg_temp_new_i64();
4622 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4624 #define SPEC_in1_m1_32u 0
4626 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4629 o->in1 = tcg_temp_new_i64();
4630 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4632 #define SPEC_in1_m1_64 0
4634 /* ====================================================================== */
4635 /* The "INput 2" generators. These load the second operand to an insn. */
4637 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4639 o->in2 = regs[get_field(f, r1)];
4642 #define SPEC_in2_r1_o 0
4644 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4646 o->in2 = tcg_temp_new_i64();
4647 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4649 #define SPEC_in2_r1_16u 0
4651 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4653 o->in2 = tcg_temp_new_i64();
4654 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4656 #define SPEC_in2_r1_32u 0
4658 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4660 int r1 = get_field(f, r1);
4661 o->in2 = tcg_temp_new_i64();
4662 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4664 #define SPEC_in2_r1_D32 SPEC_r1_even
4666 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4668 o->in2 = load_reg(get_field(f, r2));
4670 #define SPEC_in2_r2 0
4672 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4674 o->in2 = regs[get_field(f, r2)];
4677 #define SPEC_in2_r2_o 0
4679 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4681 int r2 = get_field(f, r2);
4683 o->in2 = load_reg(r2);
4686 #define SPEC_in2_r2_nz 0
4688 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4690 o->in2 = tcg_temp_new_i64();
4691 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4693 #define SPEC_in2_r2_8s 0
4695 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4697 o->in2 = tcg_temp_new_i64();
4698 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4700 #define SPEC_in2_r2_8u 0
4702 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4704 o->in2 = tcg_temp_new_i64();
4705 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4707 #define SPEC_in2_r2_16s 0
4709 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4711 o->in2 = tcg_temp_new_i64();
4712 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4714 #define SPEC_in2_r2_16u 0
4716 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4718 o->in2 = load_reg(get_field(f, r3));
4720 #define SPEC_in2_r3 0
4722 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4724 o->in2 = tcg_temp_new_i64();
4725 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
4727 #define SPEC_in2_r3_sr32 0
4729 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4731 o->in2 = tcg_temp_new_i64();
4732 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4734 #define SPEC_in2_r2_32s 0
4736 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4738 o->in2 = tcg_temp_new_i64();
4739 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4741 #define SPEC_in2_r2_32u 0
4743 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4745 o->in2 = tcg_temp_new_i64();
4746 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
4748 #define SPEC_in2_r2_sr32 0
4750 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4752 o->in2 = load_freg32_i64(get_field(f, r2));
4754 #define SPEC_in2_e2 0
4756 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4758 o->in2 = fregs[get_field(f, r2)];
4761 #define SPEC_in2_f2_o 0
4763 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4765 int r2 = get_field(f, r2);
4767 o->in2 = fregs[r2 + 2];
4768 o->g_in1 = o->g_in2 = true;
4770 #define SPEC_in2_x2_o SPEC_r2_f128
4772 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4774 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4776 #define SPEC_in2_ra2 0
4778 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4780 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4781 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4783 #define SPEC_in2_a2 0
4785 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4787 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4789 #define SPEC_in2_ri2 0
4791 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4793 help_l2_shift(s, f, o, 31);
4795 #define SPEC_in2_sh32 0
4797 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4799 help_l2_shift(s, f, o, 63);
4801 #define SPEC_in2_sh64 0
4803 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4806 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4808 #define SPEC_in2_m2_8u 0
4810 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4813 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4815 #define SPEC_in2_m2_16s 0
4817 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4820 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4822 #define SPEC_in2_m2_16u 0
4824 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4827 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4829 #define SPEC_in2_m2_32s 0
4831 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4834 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4836 #define SPEC_in2_m2_32u 0
4838 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4841 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4843 #define SPEC_in2_m2_64 0
4845 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4848 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4850 #define SPEC_in2_mri2_16u 0
4852 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4855 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4857 #define SPEC_in2_mri2_32s 0
4859 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4862 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4864 #define SPEC_in2_mri2_32u 0
4866 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4869 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4871 #define SPEC_in2_mri2_64 0
4873 static void in2_m2_32s_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4875 /* XXX should reserve the address */
4877 o->in2 = tcg_temp_new_i64();
4878 tcg_gen_qemu_ld32s(o->in2, o->addr1, get_mem_index(s));
4880 #define SPEC_in2_m2_32s_atomic 0
4882 static void in2_m2_64_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4884 /* XXX should reserve the address */
4886 o->in2 = tcg_temp_new_i64();
4887 tcg_gen_qemu_ld64(o->in2, o->addr1, get_mem_index(s));
4889 #define SPEC_in2_m2_64_atomic 0
4891 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4893 o->in2 = tcg_const_i64(get_field(f, i2));
4895 #define SPEC_in2_i2 0
4897 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4899 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4901 #define SPEC_in2_i2_8u 0
4903 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4905 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4907 #define SPEC_in2_i2_16u 0
4909 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4911 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4913 #define SPEC_in2_i2_32u 0
4915 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4917 uint64_t i2 = (uint16_t)get_field(f, i2);
4918 o->in2 = tcg_const_i64(i2 << s->insn->data);
4920 #define SPEC_in2_i2_16u_shl 0
4922 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4924 uint64_t i2 = (uint32_t)get_field(f, i2);
4925 o->in2 = tcg_const_i64(i2 << s->insn->data);
4927 #define SPEC_in2_i2_32u_shl 0
4929 #ifndef CONFIG_USER_ONLY
4930 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
4932 o->in2 = tcg_const_i64(s->fields->raw_insn);
4934 #define SPEC_in2_insn 0
4937 /* ====================================================================== */
4939 /* Find opc within the table of insns. This is formulated as a switch
4940 statement so that (1) we get compile-time notice of cut-paste errors
4941 for duplicated opcodes, and (2) the compiler generates the binary
4942 search tree, rather than us having to post-process the table. */
4944 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4945 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4947 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4949 enum DisasInsnEnum {
4950 #include "insn-data.def"
4954 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4958 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4960 .help_in1 = in1_##I1, \
4961 .help_in2 = in2_##I2, \
4962 .help_prep = prep_##P, \
4963 .help_wout = wout_##W, \
4964 .help_cout = cout_##CC, \
4965 .help_op = op_##OP, \
4969 /* Allow 0 to be used for NULL in the table below. */
4977 #define SPEC_in1_0 0
4978 #define SPEC_in2_0 0
4979 #define SPEC_prep_0 0
4980 #define SPEC_wout_0 0
4982 static const DisasInsn insn_info[] = {
4983 #include "insn-data.def"
4987 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4988 case OPC: return &insn_info[insn_ ## NM];
4990 static const DisasInsn *lookup_opc(uint16_t opc)
4993 #include "insn-data.def"
5002 /* Extract a field from the insn. The INSN should be left-aligned in
5003 the uint64_t so that we can more easily utilize the big-bit-endian
5004 definitions we extract from the Principals of Operation. */
5006 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5014 /* Zero extract the field from the insn. */
5015 r = (insn << f->beg) >> (64 - f->size);
5017 /* Sign-extend, or un-swap the field as necessary. */
5019 case 0: /* unsigned */
5021 case 1: /* signed */
5022 assert(f->size <= 32);
5023 m = 1u << (f->size - 1);
5026 case 2: /* dl+dh split, signed 20 bit. */
5027 r = ((int8_t)r << 12) | (r >> 8);
5033 /* Validate that the "compressed" encoding we selected above is valid.
5034 I.e. we havn't make two different original fields overlap. */
5035 assert(((o->presentC >> f->indexC) & 1) == 0);
5036 o->presentC |= 1 << f->indexC;
5037 o->presentO |= 1 << f->indexO;
5039 o->c[f->indexC] = r;
5042 /* Lookup the insn at the current PC, extracting the operands into O and
5043 returning the info struct for the insn. Returns NULL for invalid insn. */
5045 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5048 uint64_t insn, pc = s->pc;
5050 const DisasInsn *info;
5052 insn = ld_code2(env, pc);
5053 op = (insn >> 8) & 0xff;
5054 ilen = get_ilen(op);
5055 s->next_pc = s->pc + ilen;
5062 insn = ld_code4(env, pc) << 32;
5065 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5071 /* We can't actually determine the insn format until we've looked up
5072 the full insn opcode. Which we can't do without locating the
5073 secondary opcode. Assume by default that OP2 is at bit 40; for
5074 those smaller insns that don't actually have a secondary opcode
5075 this will correctly result in OP2 = 0. */
5081 case 0xb2: /* S, RRF, RRE */
5082 case 0xb3: /* RRE, RRD, RRF */
5083 case 0xb9: /* RRE, RRF */
5084 case 0xe5: /* SSE, SIL */
5085 op2 = (insn << 8) >> 56;
5089 case 0xc0: /* RIL */
5090 case 0xc2: /* RIL */
5091 case 0xc4: /* RIL */
5092 case 0xc6: /* RIL */
5093 case 0xc8: /* SSF */
5094 case 0xcc: /* RIL */
5095 op2 = (insn << 12) >> 60;
5097 case 0xd0 ... 0xdf: /* SS */
5103 case 0xee ... 0xf3: /* SS */
5104 case 0xf8 ... 0xfd: /* SS */
5108 op2 = (insn << 40) >> 56;
5112 memset(f, 0, sizeof(*f));
5117 /* Lookup the instruction. */
5118 info = lookup_opc(op << 8 | op2);
5120 /* If we found it, extract the operands. */
5122 DisasFormat fmt = info->fmt;
5125 for (i = 0; i < NUM_C_FIELD; ++i) {
5126 extract_field(f, &format_info[fmt].op[i], insn);
5132 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5134 const DisasInsn *insn;
5135 ExitStatus ret = NO_EXIT;
5139 /* Search for the insn in the table. */
5140 insn = extract_insn(env, s, &f);
5142 /* Not found means unimplemented/illegal opcode. */
5144 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5146 gen_illegal_opcode(s);
5147 return EXIT_NORETURN;
5150 /* Check for insn specification exceptions. */
5152 int spec = insn->spec, excp = 0, r;
5154 if (spec & SPEC_r1_even) {
5155 r = get_field(&f, r1);
5157 excp = PGM_SPECIFICATION;
5160 if (spec & SPEC_r2_even) {
5161 r = get_field(&f, r2);
5163 excp = PGM_SPECIFICATION;
5166 if (spec & SPEC_r3_even) {
5167 r = get_field(&f, r3);
5169 excp = PGM_SPECIFICATION;
5172 if (spec & SPEC_r1_f128) {
5173 r = get_field(&f, r1);
5175 excp = PGM_SPECIFICATION;
5178 if (spec & SPEC_r2_f128) {
5179 r = get_field(&f, r2);
5181 excp = PGM_SPECIFICATION;
5185 gen_program_exception(s, excp);
5186 return EXIT_NORETURN;
5190 /* Set up the strutures we use to communicate with the helpers. */
5193 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5194 TCGV_UNUSED_I64(o.out);
5195 TCGV_UNUSED_I64(o.out2);
5196 TCGV_UNUSED_I64(o.in1);
5197 TCGV_UNUSED_I64(o.in2);
5198 TCGV_UNUSED_I64(o.addr1);
5200 /* Implement the instruction. */
5201 if (insn->help_in1) {
5202 insn->help_in1(s, &f, &o);
5204 if (insn->help_in2) {
5205 insn->help_in2(s, &f, &o);
5207 if (insn->help_prep) {
5208 insn->help_prep(s, &f, &o);
5210 if (insn->help_op) {
5211 ret = insn->help_op(s, &o);
5213 if (insn->help_wout) {
5214 insn->help_wout(s, &f, &o);
5216 if (insn->help_cout) {
5217 insn->help_cout(s, &o);
5220 /* Free any temporaries created by the helpers. */
5221 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5222 tcg_temp_free_i64(o.out);
5224 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5225 tcg_temp_free_i64(o.out2);
5227 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5228 tcg_temp_free_i64(o.in1);
5230 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5231 tcg_temp_free_i64(o.in2);
5233 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5234 tcg_temp_free_i64(o.addr1);
5237 /* Advance to the next instruction. */
5242 static inline void gen_intermediate_code_internal(S390CPU *cpu,
5243 TranslationBlock *tb,
5246 CPUState *cs = CPU(cpu);
5247 CPUS390XState *env = &cpu->env;
5249 target_ulong pc_start;
5250 uint64_t next_page_start;
5252 int num_insns, max_insns;
5260 if (!(tb->flags & FLAG_MASK_64)) {
5261 pc_start &= 0x7fffffff;
5266 dc.cc_op = CC_OP_DYNAMIC;
5267 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5269 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5272 max_insns = tb->cflags & CF_COUNT_MASK;
5273 if (max_insns == 0) {
5274 max_insns = CF_COUNT_MASK;
5281 j = tcg_op_buf_count();
5285 tcg_ctx.gen_opc_instr_start[lj++] = 0;
5288 tcg_ctx.gen_opc_pc[lj] = dc.pc;
5289 gen_opc_cc_op[lj] = dc.cc_op;
5290 tcg_ctx.gen_opc_instr_start[lj] = 1;
5291 tcg_ctx.gen_opc_icount[lj] = num_insns;
5293 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5297 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5298 tcg_gen_debug_insn_start(dc.pc);
5302 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
5303 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
5304 if (bp->pc == dc.pc) {
5305 status = EXIT_PC_STALE;
5311 if (status == NO_EXIT) {
5312 status = translate_one(env, &dc);
5315 /* If we reach a page boundary, are single stepping,
5316 or exhaust instruction count, stop generation. */
5317 if (status == NO_EXIT
5318 && (dc.pc >= next_page_start
5319 || tcg_op_buf_full()
5320 || num_insns >= max_insns
5322 || cs->singlestep_enabled)) {
5323 status = EXIT_PC_STALE;
5325 } while (status == NO_EXIT);
5327 if (tb->cflags & CF_LAST_IO) {
5336 update_psw_addr(&dc);
5338 case EXIT_PC_UPDATED:
5339 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5340 cc op type is in env */
5342 /* Exit the TB, either by raising a debug exception or by return. */
5344 gen_exception(EXCP_DEBUG);
5353 gen_tb_end(tb, num_insns);
5356 j = tcg_op_buf_count();
5359 tcg_ctx.gen_opc_instr_start[lj++] = 0;
5362 tb->size = dc.pc - pc_start;
5363 tb->icount = num_insns;
5366 #if defined(S390X_DEBUG_DISAS)
5367 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5368 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5369 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
5375 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
5377 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, false);
5380 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
5382 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, true);
5385 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
5388 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
5389 cc_op = gen_opc_cc_op[pc_pos];
5390 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {