4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
40 #include "exec/gen-icount.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
57 bool singlestep_enabled;
61 /* Information carried about a condition to be evaluated. */
68 struct { TCGv_i64 a, b; } s64;
69 struct { TCGv_i32 a, b; } s32;
75 static void gen_op_calc_cc(DisasContext *s);
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit[CC_OP_MAX];
79 static uint64_t inline_branch_miss[CC_OP_MAX];
82 static inline void debug_insn(uint64_t insn)
84 LOG_DISAS("insn: 0x%" PRIx64 "\n", insn);
87 static inline uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
89 if (!(s->tb->flags & FLAG_MASK_64)) {
90 if (s->tb->flags & FLAG_MASK_32) {
91 return pc | 0x80000000;
97 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
102 if (env->cc_op > 3) {
103 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
104 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
106 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
107 env->psw.mask, env->psw.addr, env->cc_op);
110 for (i = 0; i < 16; i++) {
111 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
113 cpu_fprintf(f, "\n");
119 for (i = 0; i < 16; i++) {
120 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
122 cpu_fprintf(f, "\n");
128 #ifndef CONFIG_USER_ONLY
129 for (i = 0; i < 16; i++) {
130 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
132 cpu_fprintf(f, "\n");
139 #ifdef DEBUG_INLINE_BRANCHES
140 for (i = 0; i < CC_OP_MAX; i++) {
141 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
142 inline_branch_miss[i], inline_branch_hit[i]);
146 cpu_fprintf(f, "\n");
149 static TCGv_i64 psw_addr;
150 static TCGv_i64 psw_mask;
152 static TCGv_i32 cc_op;
153 static TCGv_i64 cc_src;
154 static TCGv_i64 cc_dst;
155 static TCGv_i64 cc_vr;
157 static char cpu_reg_names[32][4];
158 static TCGv_i64 regs[16];
159 static TCGv_i64 fregs[16];
161 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
163 void s390x_translate_init(void)
167 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
168 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
169 offsetof(CPUS390XState, psw.addr),
171 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
172 offsetof(CPUS390XState, psw.mask),
175 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
177 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
179 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
181 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
184 for (i = 0; i < 16; i++) {
185 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
186 regs[i] = tcg_global_mem_new(TCG_AREG0,
187 offsetof(CPUS390XState, regs[i]),
191 for (i = 0; i < 16; i++) {
192 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
193 fregs[i] = tcg_global_mem_new(TCG_AREG0,
194 offsetof(CPUS390XState, fregs[i].d),
195 cpu_reg_names[i + 16]);
198 /* register helpers */
203 static inline TCGv_i64 load_reg(int reg)
205 TCGv_i64 r = tcg_temp_new_i64();
206 tcg_gen_mov_i64(r, regs[reg]);
210 static inline TCGv_i64 load_freg(int reg)
212 TCGv_i64 r = tcg_temp_new_i64();
213 tcg_gen_mov_i64(r, fregs[reg]);
217 static inline TCGv_i32 load_freg32(int reg)
219 TCGv_i32 r = tcg_temp_new_i32();
220 #if HOST_LONG_BITS == 32
221 tcg_gen_mov_i32(r, TCGV_HIGH(fregs[reg]));
223 tcg_gen_shri_i64(MAKE_TCGV_I64(GET_TCGV_I32(r)), fregs[reg], 32);
228 static inline TCGv_i64 load_freg32_i64(int reg)
230 TCGv_i64 r = tcg_temp_new_i64();
231 tcg_gen_shri_i64(r, fregs[reg], 32);
235 static inline TCGv_i32 load_reg32(int reg)
237 TCGv_i32 r = tcg_temp_new_i32();
238 tcg_gen_trunc_i64_i32(r, regs[reg]);
242 static inline TCGv_i64 load_reg32_i64(int reg)
244 TCGv_i64 r = tcg_temp_new_i64();
245 tcg_gen_ext32s_i64(r, regs[reg]);
249 static inline void store_reg(int reg, TCGv_i64 v)
251 tcg_gen_mov_i64(regs[reg], v);
254 static inline void store_freg(int reg, TCGv_i64 v)
256 tcg_gen_mov_i64(fregs[reg], v);
259 static inline void store_reg32(int reg, TCGv_i32 v)
261 /* 32 bit register writes keep the upper half */
262 #if HOST_LONG_BITS == 32
263 tcg_gen_mov_i32(TCGV_LOW(regs[reg]), v);
265 tcg_gen_deposit_i64(regs[reg], regs[reg],
266 MAKE_TCGV_I64(GET_TCGV_I32(v)), 0, 32);
270 static inline void store_reg32_i64(int reg, TCGv_i64 v)
272 /* 32 bit register writes keep the upper half */
273 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
276 static inline void store_reg32h_i64(int reg, TCGv_i64 v)
278 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
281 static inline void store_freg32(int reg, TCGv_i32 v)
283 /* 32 bit register writes keep the lower half */
284 #if HOST_LONG_BITS == 32
285 tcg_gen_mov_i32(TCGV_HIGH(fregs[reg]), v);
287 tcg_gen_deposit_i64(fregs[reg], fregs[reg],
288 MAKE_TCGV_I64(GET_TCGV_I32(v)), 32, 32);
292 static inline void store_freg32_i64(int reg, TCGv_i64 v)
294 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
297 static inline void return_low128(TCGv_i64 dest)
299 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
302 static inline void update_psw_addr(DisasContext *s)
305 tcg_gen_movi_i64(psw_addr, s->pc);
308 static inline void potential_page_fault(DisasContext *s)
310 #ifndef CONFIG_USER_ONLY
316 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
318 return (uint64_t)cpu_lduw_code(env, pc);
321 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
323 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
326 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
328 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
331 static inline int get_mem_index(DisasContext *s)
333 switch (s->tb->flags & FLAG_MASK_ASC) {
334 case PSW_ASC_PRIMARY >> 32:
336 case PSW_ASC_SECONDARY >> 32:
338 case PSW_ASC_HOME >> 32:
346 static void gen_exception(int excp)
348 TCGv_i32 tmp = tcg_const_i32(excp);
349 gen_helper_exception(cpu_env, tmp);
350 tcg_temp_free_i32(tmp);
353 static void gen_program_exception(DisasContext *s, int code)
357 /* Remember what pgm exeption this was. */
358 tmp = tcg_const_i32(code);
359 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
360 tcg_temp_free_i32(tmp);
362 tmp = tcg_const_i32(s->next_pc - s->pc);
363 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
364 tcg_temp_free_i32(tmp);
366 /* Advance past instruction. */
373 /* Trigger exception. */
374 gen_exception(EXCP_PGM);
377 s->is_jmp = DISAS_EXCP;
380 static inline void gen_illegal_opcode(DisasContext *s)
382 gen_program_exception(s, PGM_SPECIFICATION);
385 static inline void check_privileged(DisasContext *s)
387 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
388 gen_program_exception(s, PGM_PRIVILEGED);
392 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
396 /* 31-bitify the immediate part; register contents are dealt with below */
397 if (!(s->tb->flags & FLAG_MASK_64)) {
403 tmp = tcg_const_i64(d2);
404 tcg_gen_add_i64(tmp, tmp, regs[x2]);
409 tcg_gen_add_i64(tmp, tmp, regs[b2]);
413 tmp = tcg_const_i64(d2);
414 tcg_gen_add_i64(tmp, tmp, regs[b2]);
419 tmp = tcg_const_i64(d2);
422 /* 31-bit mode mask if there are values loaded from registers */
423 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
424 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
430 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
432 s->cc_op = CC_OP_CONST0 + val;
435 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
437 tcg_gen_discard_i64(cc_src);
438 tcg_gen_mov_i64(cc_dst, dst);
439 tcg_gen_discard_i64(cc_vr);
443 static void gen_op_update1_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 dst)
445 tcg_gen_discard_i64(cc_src);
446 tcg_gen_extu_i32_i64(cc_dst, dst);
447 tcg_gen_discard_i64(cc_vr);
451 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
454 tcg_gen_mov_i64(cc_src, src);
455 tcg_gen_mov_i64(cc_dst, dst);
456 tcg_gen_discard_i64(cc_vr);
460 static void gen_op_update2_cc_i32(DisasContext *s, enum cc_op op, TCGv_i32 src,
463 tcg_gen_extu_i32_i64(cc_src, src);
464 tcg_gen_extu_i32_i64(cc_dst, dst);
465 tcg_gen_discard_i64(cc_vr);
469 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
470 TCGv_i64 dst, TCGv_i64 vr)
472 tcg_gen_mov_i64(cc_src, src);
473 tcg_gen_mov_i64(cc_dst, dst);
474 tcg_gen_mov_i64(cc_vr, vr);
478 static inline void set_cc_nz_u32(DisasContext *s, TCGv_i32 val)
480 gen_op_update1_cc_i32(s, CC_OP_NZ, val);
483 static inline void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
485 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
488 static inline void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
490 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
493 static inline void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
495 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
498 static inline void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
500 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
503 static inline void cmp_32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2,
506 gen_op_update2_cc_i32(s, cond, v1, v2);
509 static inline void cmp_64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2,
512 gen_op_update2_cc_i64(s, cond, v1, v2);
515 static inline void cmp_s32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
517 cmp_32(s, v1, v2, CC_OP_LTGT_32);
520 static inline void cmp_u32(DisasContext *s, TCGv_i32 v1, TCGv_i32 v2)
522 cmp_32(s, v1, v2, CC_OP_LTUGTU_32);
525 static inline void cmp_s32c(DisasContext *s, TCGv_i32 v1, int32_t v2)
527 /* XXX optimize for the constant? put it in s? */
528 TCGv_i32 tmp = tcg_const_i32(v2);
529 cmp_32(s, v1, tmp, CC_OP_LTGT_32);
530 tcg_temp_free_i32(tmp);
533 static inline void cmp_u32c(DisasContext *s, TCGv_i32 v1, uint32_t v2)
535 TCGv_i32 tmp = tcg_const_i32(v2);
536 cmp_32(s, v1, tmp, CC_OP_LTUGTU_32);
537 tcg_temp_free_i32(tmp);
540 static inline void cmp_s64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
542 cmp_64(s, v1, v2, CC_OP_LTGT_64);
545 static inline void cmp_u64(DisasContext *s, TCGv_i64 v1, TCGv_i64 v2)
547 cmp_64(s, v1, v2, CC_OP_LTUGTU_64);
550 static inline void cmp_s64c(DisasContext *s, TCGv_i64 v1, int64_t v2)
552 TCGv_i64 tmp = tcg_const_i64(v2);
554 tcg_temp_free_i64(tmp);
557 static inline void cmp_u64c(DisasContext *s, TCGv_i64 v1, uint64_t v2)
559 TCGv_i64 tmp = tcg_const_i64(v2);
561 tcg_temp_free_i64(tmp);
564 static inline void set_cc_s32(DisasContext *s, TCGv_i32 val)
566 gen_op_update1_cc_i32(s, CC_OP_LTGT0_32, val);
569 static inline void set_cc_s64(DisasContext *s, TCGv_i64 val)
571 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, val);
574 /* CC value is in env->cc_op */
575 static inline void set_cc_static(DisasContext *s)
577 tcg_gen_discard_i64(cc_src);
578 tcg_gen_discard_i64(cc_dst);
579 tcg_gen_discard_i64(cc_vr);
580 s->cc_op = CC_OP_STATIC;
583 static inline void gen_op_set_cc_op(DisasContext *s)
585 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
586 tcg_gen_movi_i32(cc_op, s->cc_op);
590 static inline void gen_update_cc_op(DisasContext *s)
595 /* calculates cc into cc_op */
596 static void gen_op_calc_cc(DisasContext *s)
598 TCGv_i32 local_cc_op = tcg_const_i32(s->cc_op);
599 TCGv_i64 dummy = tcg_const_i64(0);
606 /* s->cc_op is the cc value */
607 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
610 /* env->cc_op already is the cc value */
625 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
630 case CC_OP_LTUGTU_32:
631 case CC_OP_LTUGTU_64:
638 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
653 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
656 /* unknown operation - assume 3 arguments and cc_op in env */
657 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
663 tcg_temp_free_i32(local_cc_op);
664 tcg_temp_free_i64(dummy);
666 /* We now have cc in cc_op as constant */
670 static inline void decode_rr(DisasContext *s, uint64_t insn, int *r1, int *r2)
674 *r1 = (insn >> 4) & 0xf;
678 static inline TCGv_i64 decode_rx(DisasContext *s, uint64_t insn, int *r1,
679 int *x2, int *b2, int *d2)
683 *r1 = (insn >> 20) & 0xf;
684 *x2 = (insn >> 16) & 0xf;
685 *b2 = (insn >> 12) & 0xf;
688 return get_address(s, *x2, *b2, *d2);
691 static inline void decode_rs(DisasContext *s, uint64_t insn, int *r1, int *r3,
696 *r1 = (insn >> 20) & 0xf;
698 *r3 = (insn >> 16) & 0xf;
699 *b2 = (insn >> 12) & 0xf;
703 static inline TCGv_i64 decode_si(DisasContext *s, uint64_t insn, int *i2,
708 *i2 = (insn >> 16) & 0xff;
709 *b1 = (insn >> 12) & 0xf;
712 return get_address(s, 0, *b1, *d1);
715 static int use_goto_tb(DisasContext *s, uint64_t dest)
717 /* NOTE: we handle the case where the TB spans two pages here */
718 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
719 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
720 && !s->singlestep_enabled
721 && !(s->tb->cflags & CF_LAST_IO));
724 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong pc)
728 if (use_goto_tb(s, pc)) {
729 tcg_gen_goto_tb(tb_num);
730 tcg_gen_movi_i64(psw_addr, pc);
731 tcg_gen_exit_tb((tcg_target_long)s->tb + tb_num);
733 /* jump to another page: currently not optimized */
734 tcg_gen_movi_i64(psw_addr, pc);
739 static inline void account_noninline_branch(DisasContext *s, int cc_op)
741 #ifdef DEBUG_INLINE_BRANCHES
742 inline_branch_miss[cc_op]++;
746 static inline void account_inline_branch(DisasContext *s, int cc_op)
748 #ifdef DEBUG_INLINE_BRANCHES
749 inline_branch_hit[cc_op]++;
753 /* Table of mask values to comparison codes, given a comparison as input.
754 For a true comparison CC=3 will never be set, but we treat this
755 conservatively for possible use when CC=3 indicates overflow. */
756 static const TCGCond ltgt_cond[16] = {
757 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
758 TCG_COND_GT, TCG_COND_NEVER, /* | | GT | x */
759 TCG_COND_LT, TCG_COND_NEVER, /* | LT | | x */
760 TCG_COND_NE, TCG_COND_NEVER, /* | LT | GT | x */
761 TCG_COND_EQ, TCG_COND_NEVER, /* EQ | | | x */
762 TCG_COND_GE, TCG_COND_NEVER, /* EQ | | GT | x */
763 TCG_COND_LE, TCG_COND_NEVER, /* EQ | LT | | x */
764 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
767 /* Table of mask values to comparison codes, given a logic op as input.
768 For such, only CC=0 and CC=1 should be possible. */
769 static const TCGCond nz_cond[16] = {
771 TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
773 TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
775 TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
776 /* EQ | NE | x | x */
777 TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
780 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
781 details required to generate a TCG comparison. */
782 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
785 enum cc_op old_cc_op = s->cc_op;
787 if (mask == 15 || mask == 0) {
788 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
791 c->g1 = c->g2 = true;
796 /* Find the TCG condition for the mask + cc op. */
802 cond = ltgt_cond[mask];
803 if (cond == TCG_COND_NEVER) {
806 account_inline_branch(s, old_cc_op);
809 case CC_OP_LTUGTU_32:
810 case CC_OP_LTUGTU_64:
811 cond = tcg_unsigned_cond(ltgt_cond[mask]);
812 if (cond == TCG_COND_NEVER) {
815 account_inline_branch(s, old_cc_op);
819 cond = nz_cond[mask];
820 if (cond == TCG_COND_NEVER) {
823 account_inline_branch(s, old_cc_op);
838 account_inline_branch(s, old_cc_op);
853 account_inline_branch(s, old_cc_op);
857 switch (mask & 0xa) {
858 case 8: /* src == 0 -> no one bit found */
861 case 2: /* src != 0 -> one bit found */
867 account_inline_branch(s, old_cc_op);
872 /* Calculate cc value. */
877 /* Jump based on CC. We'll load up the real cond below;
878 the assignment here merely avoids a compiler warning. */
879 account_noninline_branch(s, old_cc_op);
880 old_cc_op = CC_OP_STATIC;
881 cond = TCG_COND_NEVER;
885 /* Load up the arguments of the comparison. */
887 c->g1 = c->g2 = false;
891 c->u.s32.a = tcg_temp_new_i32();
892 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
893 c->u.s32.b = tcg_const_i32(0);
896 case CC_OP_LTUGTU_32:
898 c->u.s32.a = tcg_temp_new_i32();
899 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
900 c->u.s32.b = tcg_temp_new_i32();
901 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
908 c->u.s64.b = tcg_const_i64(0);
912 case CC_OP_LTUGTU_64:
915 c->g1 = c->g2 = true;
921 c->u.s64.a = tcg_temp_new_i64();
922 c->u.s64.b = tcg_const_i64(0);
923 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
931 case 0x8 | 0x4 | 0x2: /* cc != 3 */
933 c->u.s32.b = tcg_const_i32(3);
935 case 0x8 | 0x4 | 0x1: /* cc != 2 */
937 c->u.s32.b = tcg_const_i32(2);
939 case 0x8 | 0x2 | 0x1: /* cc != 1 */
941 c->u.s32.b = tcg_const_i32(1);
943 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
946 c->u.s32.a = tcg_temp_new_i32();
947 c->u.s32.b = tcg_const_i32(0);
948 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
950 case 0x8 | 0x4: /* cc < 2 */
952 c->u.s32.b = tcg_const_i32(2);
954 case 0x8: /* cc == 0 */
956 c->u.s32.b = tcg_const_i32(0);
958 case 0x4 | 0x2 | 0x1: /* cc != 0 */
960 c->u.s32.b = tcg_const_i32(0);
962 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
965 c->u.s32.a = tcg_temp_new_i32();
966 c->u.s32.b = tcg_const_i32(0);
967 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
969 case 0x4: /* cc == 1 */
971 c->u.s32.b = tcg_const_i32(1);
973 case 0x2 | 0x1: /* cc > 1 */
975 c->u.s32.b = tcg_const_i32(1);
977 case 0x2: /* cc == 2 */
979 c->u.s32.b = tcg_const_i32(2);
981 case 0x1: /* cc == 3 */
983 c->u.s32.b = tcg_const_i32(3);
986 /* CC is masked by something else: (8 >> cc) & mask. */
989 c->u.s32.a = tcg_const_i32(8);
990 c->u.s32.b = tcg_const_i32(0);
991 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
992 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
1003 static void free_compare(DisasCompare *c)
1007 tcg_temp_free_i64(c->u.s64.a);
1009 tcg_temp_free_i32(c->u.s32.a);
1014 tcg_temp_free_i64(c->u.s64.b);
1016 tcg_temp_free_i32(c->u.s32.b);
1021 static void disas_b2(CPUS390XState *env, DisasContext *s, int op,
1024 #ifndef CONFIG_USER_ONLY
1025 TCGv_i64 tmp, tmp2, tmp3;
1026 TCGv_i32 tmp32_1, tmp32_2;
1030 r1 = (insn >> 4) & 0xf;
1033 LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op, r1, r2);
1036 case 0x2a: /* RRBE R1,R2 [RRE] */
1037 /* Set Storage Key Extended */
1038 check_privileged(s);
1039 r1 = (insn >> 4) & 0xf;
1041 tmp32_1 = load_reg32(r1);
1043 gen_helper_rrbe(cc_op, cpu_env, tmp32_1, tmp);
1045 tcg_temp_free_i32(tmp32_1);
1046 tcg_temp_free_i64(tmp);
1048 case 0x2b: /* SSKE R1,R2 [RRE] */
1049 /* Set Storage Key Extended */
1050 check_privileged(s);
1051 r1 = (insn >> 4) & 0xf;
1053 tmp32_1 = load_reg32(r1);
1055 gen_helper_sske(cpu_env, tmp32_1, tmp);
1056 tcg_temp_free_i32(tmp32_1);
1057 tcg_temp_free_i64(tmp);
1059 case 0x34: /* STCH ? */
1060 /* Store Subchannel */
1061 check_privileged(s);
1062 gen_op_movi_cc(s, 3);
1064 case 0x46: /* STURA R1,R2 [RRE] */
1065 /* Store Using Real Address */
1066 check_privileged(s);
1067 r1 = (insn >> 4) & 0xf;
1069 tmp32_1 = load_reg32(r1);
1071 potential_page_fault(s);
1072 gen_helper_stura(cpu_env, tmp, tmp32_1);
1073 tcg_temp_free_i32(tmp32_1);
1074 tcg_temp_free_i64(tmp);
1076 case 0x50: /* CSP R1,R2 [RRE] */
1077 /* Compare And Swap And Purge */
1078 check_privileged(s);
1079 r1 = (insn >> 4) & 0xf;
1081 tmp32_1 = tcg_const_i32(r1);
1082 tmp32_2 = tcg_const_i32(r2);
1083 gen_helper_csp(cc_op, cpu_env, tmp32_1, tmp32_2);
1085 tcg_temp_free_i32(tmp32_1);
1086 tcg_temp_free_i32(tmp32_2);
1088 case 0x5f: /* CHSC ? */
1089 /* Channel Subsystem Call */
1090 check_privileged(s);
1091 gen_op_movi_cc(s, 3);
1093 case 0x78: /* STCKE D2(B2) [S] */
1094 /* Store Clock Extended */
1095 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1096 tmp = get_address(s, 0, b2, d2);
1097 potential_page_fault(s);
1098 gen_helper_stcke(cc_op, cpu_env, tmp);
1100 tcg_temp_free_i64(tmp);
1102 case 0x79: /* SACF D2(B2) [S] */
1103 /* Set Address Space Control Fast */
1104 check_privileged(s);
1105 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1106 tmp = get_address(s, 0, b2, d2);
1107 potential_page_fault(s);
1108 gen_helper_sacf(cpu_env, tmp);
1109 tcg_temp_free_i64(tmp);
1110 /* addressing mode has changed, so end the block */
1113 s->is_jmp = DISAS_JUMP;
1115 case 0x7d: /* STSI D2,(B2) [S] */
1116 check_privileged(s);
1117 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1118 tmp = get_address(s, 0, b2, d2);
1119 tmp32_1 = load_reg32(0);
1120 tmp32_2 = load_reg32(1);
1121 potential_page_fault(s);
1122 gen_helper_stsi(cc_op, cpu_env, tmp, tmp32_1, tmp32_2);
1124 tcg_temp_free_i64(tmp);
1125 tcg_temp_free_i32(tmp32_1);
1126 tcg_temp_free_i32(tmp32_2);
1128 case 0xb1: /* STFL D2(B2) [S] */
1129 /* Store Facility List (CPU features) at 200 */
1130 check_privileged(s);
1131 tmp2 = tcg_const_i64(0xc0000000);
1132 tmp = tcg_const_i64(200);
1133 tcg_gen_qemu_st32(tmp2, tmp, get_mem_index(s));
1134 tcg_temp_free_i64(tmp2);
1135 tcg_temp_free_i64(tmp);
1137 case 0xb2: /* LPSWE D2(B2) [S] */
1138 /* Load PSW Extended */
1139 check_privileged(s);
1140 decode_rs(s, insn, &r1, &r3, &b2, &d2);
1141 tmp = get_address(s, 0, b2, d2);
1142 tmp2 = tcg_temp_new_i64();
1143 tmp3 = tcg_temp_new_i64();
1144 tcg_gen_qemu_ld64(tmp2, tmp, get_mem_index(s));
1145 tcg_gen_addi_i64(tmp, tmp, 8);
1146 tcg_gen_qemu_ld64(tmp3, tmp, get_mem_index(s));
1147 gen_helper_load_psw(cpu_env, tmp2, tmp3);
1148 /* we need to keep cc_op intact */
1149 s->is_jmp = DISAS_JUMP;
1150 tcg_temp_free_i64(tmp);
1151 tcg_temp_free_i64(tmp2);
1152 tcg_temp_free_i64(tmp3);
1154 case 0x20: /* SERVC R1,R2 [RRE] */
1155 /* SCLP Service call (PV hypercall) */
1156 check_privileged(s);
1157 potential_page_fault(s);
1158 tmp32_1 = load_reg32(r2);
1160 gen_helper_servc(cc_op, cpu_env, tmp32_1, tmp);
1162 tcg_temp_free_i32(tmp32_1);
1163 tcg_temp_free_i64(tmp);
1167 LOG_DISAS("illegal b2 operation 0x%x\n", op);
1168 gen_illegal_opcode(s);
1169 #ifndef CONFIG_USER_ONLY
1175 static void disas_s390_insn(CPUS390XState *env, DisasContext *s)
1181 opc = cpu_ldub_code(env, s->pc);
1182 LOG_DISAS("opc 0x%x\n", opc);
1186 insn = ld_code4(env, s->pc);
1187 op = (insn >> 16) & 0xff;
1188 disas_b2(env, s, op, insn);
1191 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%x\n", opc);
1192 gen_illegal_opcode(s);
1197 /* ====================================================================== */
1198 /* Define the insn format enumeration. */
1199 #define F0(N) FMT_##N,
1200 #define F1(N, X1) F0(N)
1201 #define F2(N, X1, X2) F0(N)
1202 #define F3(N, X1, X2, X3) F0(N)
1203 #define F4(N, X1, X2, X3, X4) F0(N)
1204 #define F5(N, X1, X2, X3, X4, X5) F0(N)
1207 #include "insn-format.def"
1217 /* Define a structure to hold the decoded fields. We'll store each inside
1218 an array indexed by an enum. In order to conserve memory, we'll arrange
1219 for fields that do not exist at the same time to overlap, thus the "C"
1220 for compact. For checking purposes there is an "O" for original index
1221 as well that will be applied to availability bitmaps. */
1223 enum DisasFieldIndexO {
1246 enum DisasFieldIndexC {
1277 struct DisasFields {
1280 unsigned presentC:16;
1281 unsigned int presentO;
1285 /* This is the way fields are to be accessed out of DisasFields. */
1286 #define have_field(S, F) have_field1((S), FLD_O_##F)
1287 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1289 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1291 return (f->presentO >> c) & 1;
1294 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1295 enum DisasFieldIndexC c)
1297 assert(have_field1(f, o));
1301 /* Describe the layout of each field in each format. */
1302 typedef struct DisasField {
1304 unsigned int size:8;
1305 unsigned int type:2;
1306 unsigned int indexC:6;
1307 enum DisasFieldIndexO indexO:8;
1310 typedef struct DisasFormatInfo {
1311 DisasField op[NUM_C_FIELD];
1314 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1315 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1316 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1317 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1318 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1319 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1320 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1321 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1322 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1323 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1324 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1325 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1326 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1327 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1329 #define F0(N) { { } },
1330 #define F1(N, X1) { { X1 } },
1331 #define F2(N, X1, X2) { { X1, X2 } },
1332 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1333 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1334 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1336 static const DisasFormatInfo format_info[] = {
1337 #include "insn-format.def"
1355 /* Generally, we'll extract operands into this structures, operate upon
1356 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1357 of routines below for more details. */
1359 bool g_out, g_out2, g_in1, g_in2;
1360 TCGv_i64 out, out2, in1, in2;
1364 /* Return values from translate_one, indicating the state of the TB. */
1366 /* Continue the TB. */
1368 /* We have emitted one or more goto_tb. No fixup required. */
1370 /* We are not using a goto_tb (for whatever reason), but have updated
1371 the PC (for whatever reason), so there's no need to do it again on
1374 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1375 updated the PC for the next instruction to be executed. */
1377 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1378 No following code will be executed. */
1382 typedef enum DisasFacility {
1383 FAC_Z, /* zarch (default) */
1384 FAC_CASS, /* compare and swap and store */
1385 FAC_CASS2, /* compare and swap and store 2*/
1386 FAC_DFP, /* decimal floating point */
1387 FAC_DFPR, /* decimal floating point rounding */
1388 FAC_DO, /* distinct operands */
1389 FAC_EE, /* execute extensions */
1390 FAC_EI, /* extended immediate */
1391 FAC_FPE, /* floating point extension */
1392 FAC_FPSSH, /* floating point support sign handling */
1393 FAC_FPRGR, /* FPR-GR transfer */
1394 FAC_GIE, /* general instructions extension */
1395 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1396 FAC_HW, /* high-word */
1397 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1398 FAC_LOC, /* load/store on condition */
1399 FAC_LD, /* long displacement */
1400 FAC_PC, /* population count */
1401 FAC_SCF, /* store clock fast */
1402 FAC_SFLE, /* store facility list extended */
1408 DisasFacility fac:6;
1412 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1413 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1414 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1415 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1416 void (*help_cout)(DisasContext *, DisasOps *);
1417 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1422 /* ====================================================================== */
1423 /* Miscelaneous helpers, used by several operations. */
1425 static void help_l2_shift(DisasContext *s, DisasFields *f,
1426 DisasOps *o, int mask)
1428 int b2 = get_field(f, b2);
1429 int d2 = get_field(f, d2);
1432 o->in2 = tcg_const_i64(d2 & mask);
1434 o->in2 = get_address(s, 0, b2, d2);
1435 tcg_gen_andi_i64(o->in2, o->in2, mask);
1439 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1441 if (dest == s->next_pc) {
1444 if (use_goto_tb(s, dest)) {
1445 gen_update_cc_op(s);
1447 tcg_gen_movi_i64(psw_addr, dest);
1448 tcg_gen_exit_tb((tcg_target_long)s->tb);
1449 return EXIT_GOTO_TB;
1451 tcg_gen_movi_i64(psw_addr, dest);
1452 return EXIT_PC_UPDATED;
1456 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1457 bool is_imm, int imm, TCGv_i64 cdest)
1460 uint64_t dest = s->pc + 2 * imm;
1463 /* Take care of the special cases first. */
1464 if (c->cond == TCG_COND_NEVER) {
1469 if (dest == s->next_pc) {
1470 /* Branch to next. */
1474 if (c->cond == TCG_COND_ALWAYS) {
1475 ret = help_goto_direct(s, dest);
1479 if (TCGV_IS_UNUSED_I64(cdest)) {
1480 /* E.g. bcr %r0 -> no branch. */
1484 if (c->cond == TCG_COND_ALWAYS) {
1485 tcg_gen_mov_i64(psw_addr, cdest);
1486 ret = EXIT_PC_UPDATED;
1491 if (use_goto_tb(s, s->next_pc)) {
1492 if (is_imm && use_goto_tb(s, dest)) {
1493 /* Both exits can use goto_tb. */
1494 gen_update_cc_op(s);
1496 lab = gen_new_label();
1498 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1500 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1503 /* Branch not taken. */
1505 tcg_gen_movi_i64(psw_addr, s->next_pc);
1506 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1511 tcg_gen_movi_i64(psw_addr, dest);
1512 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
1516 /* Fallthru can use goto_tb, but taken branch cannot. */
1517 /* Store taken branch destination before the brcond. This
1518 avoids having to allocate a new local temp to hold it.
1519 We'll overwrite this in the not taken case anyway. */
1521 tcg_gen_mov_i64(psw_addr, cdest);
1524 lab = gen_new_label();
1526 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1528 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1531 /* Branch not taken. */
1532 gen_update_cc_op(s);
1534 tcg_gen_movi_i64(psw_addr, s->next_pc);
1535 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1539 tcg_gen_movi_i64(psw_addr, dest);
1541 ret = EXIT_PC_UPDATED;
1544 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1545 Most commonly we're single-stepping or some other condition that
1546 disables all use of goto_tb. Just update the PC and exit. */
1548 TCGv_i64 next = tcg_const_i64(s->next_pc);
1550 cdest = tcg_const_i64(dest);
1554 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1557 TCGv_i32 t0 = tcg_temp_new_i32();
1558 TCGv_i64 t1 = tcg_temp_new_i64();
1559 TCGv_i64 z = tcg_const_i64(0);
1560 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1561 tcg_gen_extu_i32_i64(t1, t0);
1562 tcg_temp_free_i32(t0);
1563 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1564 tcg_temp_free_i64(t1);
1565 tcg_temp_free_i64(z);
1569 tcg_temp_free_i64(cdest);
1571 tcg_temp_free_i64(next);
1573 ret = EXIT_PC_UPDATED;
1581 /* ====================================================================== */
1582 /* The operations. These perform the bulk of the work for any insn,
1583 usually after the operands have been loaded and output initialized. */
1585 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1587 gen_helper_abs_i64(o->out, o->in2);
1591 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1593 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1597 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1599 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1603 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1605 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1606 tcg_gen_mov_i64(o->out2, o->in2);
1610 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1612 tcg_gen_add_i64(o->out, o->in1, o->in2);
1616 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1620 tcg_gen_add_i64(o->out, o->in1, o->in2);
1622 /* XXX possible optimization point */
1624 cc = tcg_temp_new_i64();
1625 tcg_gen_extu_i32_i64(cc, cc_op);
1626 tcg_gen_shri_i64(cc, cc, 1);
1628 tcg_gen_add_i64(o->out, o->out, cc);
1629 tcg_temp_free_i64(cc);
1633 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1635 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1639 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1641 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1645 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1647 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1648 return_low128(o->out2);
1652 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1654 tcg_gen_and_i64(o->out, o->in1, o->in2);
1658 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1660 int shift = s->insn->data & 0xff;
1661 int size = s->insn->data >> 8;
1662 uint64_t mask = ((1ull << size) - 1) << shift;
1665 tcg_gen_shli_i64(o->in2, o->in2, shift);
1666 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1667 tcg_gen_and_i64(o->out, o->in1, o->in2);
1669 /* Produce the CC from only the bits manipulated. */
1670 tcg_gen_andi_i64(cc_dst, o->out, mask);
1671 set_cc_nz_u64(s, cc_dst);
1675 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1677 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1678 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1679 tcg_gen_mov_i64(psw_addr, o->in2);
1680 return EXIT_PC_UPDATED;
1686 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1688 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1689 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1692 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1694 int m1 = get_field(s->fields, m1);
1695 bool is_imm = have_field(s->fields, i2);
1696 int imm = is_imm ? get_field(s->fields, i2) : 0;
1699 disas_jcc(s, &c, m1);
1700 return help_branch(s, &c, is_imm, imm, o->in2);
1703 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1705 int r1 = get_field(s->fields, r1);
1706 bool is_imm = have_field(s->fields, i2);
1707 int imm = is_imm ? get_field(s->fields, i2) : 0;
1711 c.cond = TCG_COND_NE;
1716 t = tcg_temp_new_i64();
1717 tcg_gen_subi_i64(t, regs[r1], 1);
1718 store_reg32_i64(r1, t);
1719 c.u.s32.a = tcg_temp_new_i32();
1720 c.u.s32.b = tcg_const_i32(0);
1721 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1722 tcg_temp_free_i64(t);
1724 return help_branch(s, &c, is_imm, imm, o->in2);
1727 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1729 int r1 = get_field(s->fields, r1);
1730 bool is_imm = have_field(s->fields, i2);
1731 int imm = is_imm ? get_field(s->fields, i2) : 0;
1734 c.cond = TCG_COND_NE;
1739 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1740 c.u.s64.a = regs[r1];
1741 c.u.s64.b = tcg_const_i64(0);
1743 return help_branch(s, &c, is_imm, imm, o->in2);
1746 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1748 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1753 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1755 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1760 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1762 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1767 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1769 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1770 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1771 tcg_temp_free_i32(m3);
1772 gen_set_cc_nz_f32(s, o->in2);
1776 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1778 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1779 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1780 tcg_temp_free_i32(m3);
1781 gen_set_cc_nz_f64(s, o->in2);
1785 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1787 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1788 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1789 tcg_temp_free_i32(m3);
1790 gen_set_cc_nz_f128(s, o->in1, o->in2);
1794 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1796 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1797 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1798 tcg_temp_free_i32(m3);
1799 gen_set_cc_nz_f32(s, o->in2);
1803 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1805 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1806 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1807 tcg_temp_free_i32(m3);
1808 gen_set_cc_nz_f64(s, o->in2);
1812 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1814 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1815 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1816 tcg_temp_free_i32(m3);
1817 gen_set_cc_nz_f128(s, o->in1, o->in2);
1821 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1823 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1824 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1825 tcg_temp_free_i32(m3);
1829 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1831 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1832 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1833 tcg_temp_free_i32(m3);
1837 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1839 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1840 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1841 tcg_temp_free_i32(m3);
1842 return_low128(o->out2);
1846 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1848 int r2 = get_field(s->fields, r2);
1849 TCGv_i64 len = tcg_temp_new_i64();
1851 potential_page_fault(s);
1852 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1854 return_low128(o->out);
1856 tcg_gen_add_i64(regs[r2], regs[r2], len);
1857 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1858 tcg_temp_free_i64(len);
1863 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1865 int l = get_field(s->fields, l1);
1870 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1871 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1874 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1875 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1878 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1879 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1882 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1883 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1886 potential_page_fault(s);
1887 vl = tcg_const_i32(l);
1888 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1889 tcg_temp_free_i32(vl);
1893 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1897 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1899 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1900 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1901 potential_page_fault(s);
1902 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1903 tcg_temp_free_i32(r1);
1904 tcg_temp_free_i32(r3);
1909 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1911 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1912 TCGv_i32 t1 = tcg_temp_new_i32();
1913 tcg_gen_trunc_i64_i32(t1, o->in1);
1914 potential_page_fault(s);
1915 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1917 tcg_temp_free_i32(t1);
1918 tcg_temp_free_i32(m3);
1922 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1924 potential_page_fault(s);
1925 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1927 return_low128(o->in2);
1931 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1933 int r3 = get_field(s->fields, r3);
1934 potential_page_fault(s);
1935 gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1940 static ExitStatus op_csg(DisasContext *s, DisasOps *o)
1942 int r3 = get_field(s->fields, r3);
1943 potential_page_fault(s);
1944 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1949 static ExitStatus op_cds(DisasContext *s, DisasOps *o)
1951 int r3 = get_field(s->fields, r3);
1952 TCGv_i64 in3 = tcg_temp_new_i64();
1953 tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
1954 potential_page_fault(s);
1955 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
1956 tcg_temp_free_i64(in3);
1961 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1963 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1964 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1965 potential_page_fault(s);
1966 /* XXX rewrite in tcg */
1967 gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
1972 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1974 TCGv_i64 t1 = tcg_temp_new_i64();
1975 TCGv_i32 t2 = tcg_temp_new_i32();
1976 tcg_gen_trunc_i64_i32(t2, o->in1);
1977 gen_helper_cvd(t1, t2);
1978 tcg_temp_free_i32(t2);
1979 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1980 tcg_temp_free_i64(t1);
1984 #ifndef CONFIG_USER_ONLY
1985 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1989 check_privileged(s);
1990 potential_page_fault(s);
1992 /* We pretend the format is RX_a so that D2 is the field we want. */
1993 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1994 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1995 tcg_temp_free_i32(tmp);
2000 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2002 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2003 return_low128(o->out);
2007 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2009 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2010 return_low128(o->out);
2014 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2016 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2017 return_low128(o->out);
2021 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2023 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2024 return_low128(o->out);
2028 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2030 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2034 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2036 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2040 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2042 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2043 return_low128(o->out2);
2047 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2049 int r2 = get_field(s->fields, r2);
2050 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2054 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2056 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2060 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2062 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2063 tb->flags, (ab)use the tb->cs_base field as the address of
2064 the template in memory, and grab 8 bits of tb->flags/cflags for
2065 the contents of the register. We would then recognize all this
2066 in gen_intermediate_code_internal, generating code for exactly
2067 one instruction. This new TB then gets executed normally.
2069 On the other hand, this seems to be mostly used for modifying
2070 MVC inside of memcpy, which needs a helper call anyway. So
2071 perhaps this doesn't bear thinking about any further. */
2078 tmp = tcg_const_i64(s->next_pc);
2079 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2080 tcg_temp_free_i64(tmp);
2086 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2088 /* We'll use the original input for cc computation, since we get to
2089 compare that against 0, which ought to be better than comparing
2090 the real output against 64. It also lets cc_dst be a convenient
2091 temporary during our computation. */
2092 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2094 /* R1 = IN ? CLZ(IN) : 64. */
2095 gen_helper_clz(o->out, o->in2);
2097 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2098 value by 64, which is undefined. But since the shift is 64 iff the
2099 input is zero, we still get the correct result after and'ing. */
2100 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2101 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2102 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2106 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2108 int m3 = get_field(s->fields, m3);
2109 int pos, len, base = s->insn->data;
2110 TCGv_i64 tmp = tcg_temp_new_i64();
2115 /* Effectively a 32-bit load. */
2116 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2123 /* Effectively a 16-bit load. */
2124 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2132 /* Effectively an 8-bit load. */
2133 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2138 pos = base + ctz32(m3) * 8;
2139 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2140 ccm = ((1ull << len) - 1) << pos;
2144 /* This is going to be a sequence of loads and inserts. */
2145 pos = base + 32 - 8;
2149 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2150 tcg_gen_addi_i64(o->in2, o->in2, 1);
2151 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2154 m3 = (m3 << 1) & 0xf;
2160 tcg_gen_movi_i64(tmp, ccm);
2161 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2162 tcg_temp_free_i64(tmp);
2166 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2168 int shift = s->insn->data & 0xff;
2169 int size = s->insn->data >> 8;
2170 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2174 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2179 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2181 t1 = tcg_temp_new_i64();
2182 tcg_gen_shli_i64(t1, psw_mask, 20);
2183 tcg_gen_shri_i64(t1, t1, 36);
2184 tcg_gen_or_i64(o->out, o->out, t1);
2186 tcg_gen_extu_i32_i64(t1, cc_op);
2187 tcg_gen_shli_i64(t1, t1, 28);
2188 tcg_gen_or_i64(o->out, o->out, t1);
2189 tcg_temp_free_i64(t1);
2193 #ifndef CONFIG_USER_ONLY
2194 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2196 check_privileged(s);
2197 gen_helper_ipte(cpu_env, o->in1, o->in2);
2201 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2203 check_privileged(s);
2204 gen_helper_iske(o->out, cpu_env, o->in2);
2209 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2211 gen_helper_ldeb(o->out, cpu_env, o->in2);
2215 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2217 gen_helper_ledb(o->out, cpu_env, o->in2);
2221 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2223 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2227 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2229 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2233 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2235 gen_helper_lxdb(o->out, cpu_env, o->in2);
2236 return_low128(o->out2);
2240 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2242 gen_helper_lxeb(o->out, cpu_env, o->in2);
2243 return_low128(o->out2);
2247 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2249 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2253 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2255 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2259 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2261 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2265 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2267 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2271 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2273 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2277 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2279 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2283 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2285 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2289 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2291 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2295 #ifndef CONFIG_USER_ONLY
2296 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2298 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2299 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2300 check_privileged(s);
2301 potential_page_fault(s);
2302 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2303 tcg_temp_free_i32(r1);
2304 tcg_temp_free_i32(r3);
2308 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2310 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2311 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2312 check_privileged(s);
2313 potential_page_fault(s);
2314 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2315 tcg_temp_free_i32(r1);
2316 tcg_temp_free_i32(r3);
2319 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2321 check_privileged(s);
2322 potential_page_fault(s);
2323 gen_helper_lra(o->out, cpu_env, o->in2);
2328 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2332 check_privileged(s);
2334 t1 = tcg_temp_new_i64();
2335 t2 = tcg_temp_new_i64();
2336 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2337 tcg_gen_addi_i64(o->in2, o->in2, 4);
2338 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2339 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2340 tcg_gen_shli_i64(t1, t1, 32);
2341 gen_helper_load_psw(cpu_env, t1, t2);
2342 tcg_temp_free_i64(t1);
2343 tcg_temp_free_i64(t2);
2344 return EXIT_NORETURN;
2348 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2350 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2351 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2352 potential_page_fault(s);
2353 gen_helper_lam(cpu_env, r1, o->in2, r3);
2354 tcg_temp_free_i32(r1);
2355 tcg_temp_free_i32(r3);
2359 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2361 int r1 = get_field(s->fields, r1);
2362 int r3 = get_field(s->fields, r3);
2363 TCGv_i64 t = tcg_temp_new_i64();
2364 TCGv_i64 t4 = tcg_const_i64(4);
2367 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2368 store_reg32_i64(r1, t);
2372 tcg_gen_add_i64(o->in2, o->in2, t4);
2376 tcg_temp_free_i64(t);
2377 tcg_temp_free_i64(t4);
2381 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2383 int r1 = get_field(s->fields, r1);
2384 int r3 = get_field(s->fields, r3);
2385 TCGv_i64 t = tcg_temp_new_i64();
2386 TCGv_i64 t4 = tcg_const_i64(4);
2389 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2390 store_reg32h_i64(r1, t);
2394 tcg_gen_add_i64(o->in2, o->in2, t4);
2398 tcg_temp_free_i64(t);
2399 tcg_temp_free_i64(t4);
2403 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2405 int r1 = get_field(s->fields, r1);
2406 int r3 = get_field(s->fields, r3);
2407 TCGv_i64 t8 = tcg_const_i64(8);
2410 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2414 tcg_gen_add_i64(o->in2, o->in2, t8);
2418 tcg_temp_free_i64(t8);
2422 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2425 o->g_out = o->g_in2;
2426 TCGV_UNUSED_I64(o->in2);
2431 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2435 o->g_out = o->g_in1;
2436 o->g_out2 = o->g_in2;
2437 TCGV_UNUSED_I64(o->in1);
2438 TCGV_UNUSED_I64(o->in2);
2439 o->g_in1 = o->g_in2 = false;
2443 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2445 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2446 potential_page_fault(s);
2447 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2448 tcg_temp_free_i32(l);
2452 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2454 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2455 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2456 potential_page_fault(s);
2457 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2458 tcg_temp_free_i32(r1);
2459 tcg_temp_free_i32(r2);
2464 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2466 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2467 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2468 potential_page_fault(s);
2469 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2470 tcg_temp_free_i32(r1);
2471 tcg_temp_free_i32(r3);
2476 #ifndef CONFIG_USER_ONLY
2477 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2479 int r1 = get_field(s->fields, l1);
2480 check_privileged(s);
2481 potential_page_fault(s);
2482 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2487 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2489 int r1 = get_field(s->fields, l1);
2490 check_privileged(s);
2491 potential_page_fault(s);
2492 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2498 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2500 potential_page_fault(s);
2501 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2506 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2508 potential_page_fault(s);
2509 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2511 return_low128(o->in2);
2515 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2517 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2521 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2523 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
2524 return_low128(o->out2);
2528 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2530 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2534 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2536 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2540 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2542 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2546 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2548 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2549 return_low128(o->out2);
2553 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2555 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2556 return_low128(o->out2);
2560 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2562 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2563 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2564 tcg_temp_free_i64(r3);
2568 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2570 int r3 = get_field(s->fields, r3);
2571 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2575 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2577 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2578 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2579 tcg_temp_free_i64(r3);
2583 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2585 int r3 = get_field(s->fields, r3);
2586 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2590 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2592 gen_helper_nabs_i64(o->out, o->in2);
2596 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2598 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2602 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2604 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2608 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2610 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2611 tcg_gen_mov_i64(o->out2, o->in2);
2615 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2617 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2618 potential_page_fault(s);
2619 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2620 tcg_temp_free_i32(l);
2625 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2627 tcg_gen_neg_i64(o->out, o->in2);
2631 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2633 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2637 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2639 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2643 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2645 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2646 tcg_gen_mov_i64(o->out2, o->in2);
2650 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2652 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2653 potential_page_fault(s);
2654 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2655 tcg_temp_free_i32(l);
2660 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2662 tcg_gen_or_i64(o->out, o->in1, o->in2);
2666 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2668 int shift = s->insn->data & 0xff;
2669 int size = s->insn->data >> 8;
2670 uint64_t mask = ((1ull << size) - 1) << shift;
2673 tcg_gen_shli_i64(o->in2, o->in2, shift);
2674 tcg_gen_or_i64(o->out, o->in1, o->in2);
2676 /* Produce the CC from only the bits manipulated. */
2677 tcg_gen_andi_i64(cc_dst, o->out, mask);
2678 set_cc_nz_u64(s, cc_dst);
2682 #ifndef CONFIG_USER_ONLY
2683 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2685 check_privileged(s);
2686 gen_helper_ptlb(cpu_env);
2691 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2693 tcg_gen_bswap16_i64(o->out, o->in2);
2697 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2699 tcg_gen_bswap32_i64(o->out, o->in2);
2703 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2705 tcg_gen_bswap64_i64(o->out, o->in2);
2709 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2711 TCGv_i32 t1 = tcg_temp_new_i32();
2712 TCGv_i32 t2 = tcg_temp_new_i32();
2713 TCGv_i32 to = tcg_temp_new_i32();
2714 tcg_gen_trunc_i64_i32(t1, o->in1);
2715 tcg_gen_trunc_i64_i32(t2, o->in2);
2716 tcg_gen_rotl_i32(to, t1, t2);
2717 tcg_gen_extu_i32_i64(o->out, to);
2718 tcg_temp_free_i32(t1);
2719 tcg_temp_free_i32(t2);
2720 tcg_temp_free_i32(to);
2724 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2726 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2730 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2732 int r1 = get_field(s->fields, r1);
2733 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2737 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2739 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2743 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2745 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2749 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2751 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2752 return_low128(o->out2);
2756 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2758 gen_helper_sqeb(o->out, cpu_env, o->in2);
2762 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2764 gen_helper_sqdb(o->out, cpu_env, o->in2);
2768 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2770 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2771 return_low128(o->out2);
2775 #ifndef CONFIG_USER_ONLY
2776 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2778 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2779 check_privileged(s);
2780 potential_page_fault(s);
2781 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2782 tcg_temp_free_i32(r1);
2787 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
2789 uint64_t sign = 1ull << s->insn->data;
2790 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
2791 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
2792 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2793 /* The arithmetic left shift is curious in that it does not affect
2794 the sign bit. Copy that over from the source unchanged. */
2795 tcg_gen_andi_i64(o->out, o->out, ~sign);
2796 tcg_gen_andi_i64(o->in1, o->in1, sign);
2797 tcg_gen_or_i64(o->out, o->out, o->in1);
2801 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
2803 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2807 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
2809 tcg_gen_sar_i64(o->out, o->in1, o->in2);
2813 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
2815 tcg_gen_shr_i64(o->out, o->in1, o->in2);
2819 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
2821 gen_helper_sfpc(cpu_env, o->in2);
2825 #ifndef CONFIG_USER_ONLY
2826 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
2828 check_privileged(s);
2829 tcg_gen_shri_i64(o->in2, o->in2, 4);
2830 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
2834 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
2836 check_privileged(s);
2837 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
2841 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
2843 check_privileged(s);
2844 /* ??? Surely cpu address != cpu number. In any case the previous
2845 version of this stored more than the required half-word, so it
2846 is unlikely this has ever been tested. */
2847 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
2851 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
2853 gen_helper_stck(o->out, cpu_env);
2854 /* ??? We don't implement clock states. */
2855 gen_op_movi_cc(s, 0);
2859 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
2861 check_privileged(s);
2862 gen_helper_sckc(cpu_env, o->in2);
2866 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
2868 check_privileged(s);
2869 gen_helper_stckc(o->out, cpu_env);
2873 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
2875 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2876 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2877 check_privileged(s);
2878 potential_page_fault(s);
2879 gen_helper_stctg(cpu_env, r1, o->in2, r3);
2880 tcg_temp_free_i32(r1);
2881 tcg_temp_free_i32(r3);
2885 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
2887 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2888 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2889 check_privileged(s);
2890 potential_page_fault(s);
2891 gen_helper_stctl(cpu_env, r1, o->in2, r3);
2892 tcg_temp_free_i32(r1);
2893 tcg_temp_free_i32(r3);
2897 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
2899 check_privileged(s);
2900 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
2904 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
2906 check_privileged(s);
2907 gen_helper_spt(cpu_env, o->in2);
2911 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
2913 check_privileged(s);
2914 gen_helper_stpt(o->out, cpu_env);
2918 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
2920 check_privileged(s);
2921 gen_helper_spx(cpu_env, o->in2);
2925 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
2927 check_privileged(s);
2928 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
2929 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
2933 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
2935 uint64_t i2 = get_field(s->fields, i2);
2938 check_privileged(s);
2940 /* It is important to do what the instruction name says: STORE THEN.
2941 If we let the output hook perform the store then if we fault and
2942 restart, we'll have the wrong SYSTEM MASK in place. */
2943 t = tcg_temp_new_i64();
2944 tcg_gen_shri_i64(t, psw_mask, 56);
2945 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
2946 tcg_temp_free_i64(t);
2948 if (s->fields->op == 0xac) {
2949 tcg_gen_andi_i64(psw_mask, psw_mask,
2950 (i2 << 56) | 0x00ffffffffffffffull);
2952 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
2958 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
2960 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
2964 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
2966 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
2970 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
2972 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
2976 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
2978 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
2982 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
2984 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2985 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2986 potential_page_fault(s);
2987 gen_helper_stam(cpu_env, r1, o->in2, r3);
2988 tcg_temp_free_i32(r1);
2989 tcg_temp_free_i32(r3);
2993 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
2995 int m3 = get_field(s->fields, m3);
2996 int pos, base = s->insn->data;
2997 TCGv_i64 tmp = tcg_temp_new_i64();
2999 pos = base + ctz32(m3) * 8;
3002 /* Effectively a 32-bit store. */
3003 tcg_gen_shri_i64(tmp, o->in1, pos);
3004 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3010 /* Effectively a 16-bit store. */
3011 tcg_gen_shri_i64(tmp, o->in1, pos);
3012 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3019 /* Effectively an 8-bit store. */
3020 tcg_gen_shri_i64(tmp, o->in1, pos);
3021 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3025 /* This is going to be a sequence of shifts and stores. */
3026 pos = base + 32 - 8;
3029 tcg_gen_shri_i64(tmp, o->in1, pos);
3030 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3031 tcg_gen_addi_i64(o->in2, o->in2, 1);
3033 m3 = (m3 << 1) & 0xf;
3038 tcg_temp_free_i64(tmp);
3042 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3044 int r1 = get_field(s->fields, r1);
3045 int r3 = get_field(s->fields, r3);
3046 int size = s->insn->data;
3047 TCGv_i64 tsize = tcg_const_i64(size);
3051 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3053 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3058 tcg_gen_add_i64(o->in2, o->in2, tsize);
3062 tcg_temp_free_i64(tsize);
3066 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3068 int r1 = get_field(s->fields, r1);
3069 int r3 = get_field(s->fields, r3);
3070 TCGv_i64 t = tcg_temp_new_i64();
3071 TCGv_i64 t4 = tcg_const_i64(4);
3072 TCGv_i64 t32 = tcg_const_i64(32);
3075 tcg_gen_shl_i64(t, regs[r1], t32);
3076 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3080 tcg_gen_add_i64(o->in2, o->in2, t4);
3084 tcg_temp_free_i64(t);
3085 tcg_temp_free_i64(t4);
3086 tcg_temp_free_i64(t32);
3090 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3092 potential_page_fault(s);
3093 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3095 return_low128(o->in2);
3099 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3101 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3105 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3110 tcg_gen_not_i64(o->in2, o->in2);
3111 tcg_gen_add_i64(o->out, o->in1, o->in2);
3113 /* XXX possible optimization point */
3115 cc = tcg_temp_new_i64();
3116 tcg_gen_extu_i32_i64(cc, cc_op);
3117 tcg_gen_shri_i64(cc, cc, 1);
3118 tcg_gen_add_i64(o->out, o->out, cc);
3119 tcg_temp_free_i64(cc);
3123 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3130 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3131 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3132 tcg_temp_free_i32(t);
3134 t = tcg_const_i32(s->next_pc - s->pc);
3135 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3136 tcg_temp_free_i32(t);
3138 gen_exception(EXCP_SVC);
3139 return EXIT_NORETURN;
3142 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3144 gen_helper_tceb(cc_op, o->in1, o->in2);
3149 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3151 gen_helper_tcdb(cc_op, o->in1, o->in2);
3156 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3158 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3163 #ifndef CONFIG_USER_ONLY
3164 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3166 potential_page_fault(s);
3167 gen_helper_tprot(cc_op, o->addr1, o->in2);
3173 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3175 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3176 potential_page_fault(s);
3177 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3178 tcg_temp_free_i32(l);
3183 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3185 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3186 potential_page_fault(s);
3187 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3188 tcg_temp_free_i32(l);
3192 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3194 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3195 potential_page_fault(s);
3196 gen_helper_xc(cc_op, cpu_env, l, o->addr1, o->in2);
3197 tcg_temp_free_i32(l);
3202 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3204 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3208 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3210 int shift = s->insn->data & 0xff;
3211 int size = s->insn->data >> 8;
3212 uint64_t mask = ((1ull << size) - 1) << shift;
3215 tcg_gen_shli_i64(o->in2, o->in2, shift);
3216 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3218 /* Produce the CC from only the bits manipulated. */
3219 tcg_gen_andi_i64(cc_dst, o->out, mask);
3220 set_cc_nz_u64(s, cc_dst);
3224 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3226 o->out = tcg_const_i64(0);
3230 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3232 o->out = tcg_const_i64(0);
3238 /* ====================================================================== */
3239 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3240 the original inputs), update the various cc data structures in order to
3241 be able to compute the new condition code. */
3243 static void cout_abs32(DisasContext *s, DisasOps *o)
3245 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3248 static void cout_abs64(DisasContext *s, DisasOps *o)
3250 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3253 static void cout_adds32(DisasContext *s, DisasOps *o)
3255 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3258 static void cout_adds64(DisasContext *s, DisasOps *o)
3260 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3263 static void cout_addu32(DisasContext *s, DisasOps *o)
3265 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3268 static void cout_addu64(DisasContext *s, DisasOps *o)
3270 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3273 static void cout_addc32(DisasContext *s, DisasOps *o)
3275 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3278 static void cout_addc64(DisasContext *s, DisasOps *o)
3280 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3283 static void cout_cmps32(DisasContext *s, DisasOps *o)
3285 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3288 static void cout_cmps64(DisasContext *s, DisasOps *o)
3290 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3293 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3295 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3298 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3300 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3303 static void cout_f32(DisasContext *s, DisasOps *o)
3305 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3308 static void cout_f64(DisasContext *s, DisasOps *o)
3310 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3313 static void cout_f128(DisasContext *s, DisasOps *o)
3315 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3318 static void cout_nabs32(DisasContext *s, DisasOps *o)
3320 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3323 static void cout_nabs64(DisasContext *s, DisasOps *o)
3325 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3328 static void cout_neg32(DisasContext *s, DisasOps *o)
3330 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3333 static void cout_neg64(DisasContext *s, DisasOps *o)
3335 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3338 static void cout_nz32(DisasContext *s, DisasOps *o)
3340 tcg_gen_ext32u_i64(cc_dst, o->out);
3341 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3344 static void cout_nz64(DisasContext *s, DisasOps *o)
3346 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3349 static void cout_s32(DisasContext *s, DisasOps *o)
3351 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3354 static void cout_s64(DisasContext *s, DisasOps *o)
3356 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3359 static void cout_subs32(DisasContext *s, DisasOps *o)
3361 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3364 static void cout_subs64(DisasContext *s, DisasOps *o)
3366 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3369 static void cout_subu32(DisasContext *s, DisasOps *o)
3371 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3374 static void cout_subu64(DisasContext *s, DisasOps *o)
3376 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3379 static void cout_subb32(DisasContext *s, DisasOps *o)
3381 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3384 static void cout_subb64(DisasContext *s, DisasOps *o)
3386 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3389 static void cout_tm32(DisasContext *s, DisasOps *o)
3391 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3394 static void cout_tm64(DisasContext *s, DisasOps *o)
3396 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3399 /* ====================================================================== */
3400 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3401 with the TCG register to which we will write. Used in combination with
3402 the "wout" generators, in some cases we need a new temporary, and in
3403 some cases we can write to a TCG global. */
3405 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3407 o->out = tcg_temp_new_i64();
3410 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3412 o->out = tcg_temp_new_i64();
3413 o->out2 = tcg_temp_new_i64();
3416 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3418 o->out = regs[get_field(f, r1)];
3422 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3424 /* ??? Specification exception: r1 must be even. */
3425 int r1 = get_field(f, r1);
3427 o->out2 = regs[(r1 + 1) & 15];
3428 o->g_out = o->g_out2 = true;
3431 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3433 o->out = fregs[get_field(f, r1)];
3437 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3439 /* ??? Specification exception: r1 must be < 14. */
3440 int r1 = get_field(f, r1);
3442 o->out2 = fregs[(r1 + 2) & 15];
3443 o->g_out = o->g_out2 = true;
3446 /* ====================================================================== */
3447 /* The "Write OUTput" generators. These generally perform some non-trivial
3448 copy of data to TCG globals, or to main memory. The trivial cases are
3449 generally handled by having a "prep" generator install the TCG global
3450 as the destination of the operation. */
3452 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3454 store_reg(get_field(f, r1), o->out);
3457 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3459 int r1 = get_field(f, r1);
3460 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3463 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3465 int r1 = get_field(f, r1);
3466 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3469 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3471 store_reg32_i64(get_field(f, r1), o->out);
3474 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3476 /* ??? Specification exception: r1 must be even. */
3477 int r1 = get_field(f, r1);
3478 store_reg32_i64(r1, o->out);
3479 store_reg32_i64((r1 + 1) & 15, o->out2);
3482 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3484 /* ??? Specification exception: r1 must be even. */
3485 int r1 = get_field(f, r1);
3486 store_reg32_i64((r1 + 1) & 15, o->out);
3487 tcg_gen_shri_i64(o->out, o->out, 32);
3488 store_reg32_i64(r1, o->out);
3491 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3493 store_freg32_i64(get_field(f, r1), o->out);
3496 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3498 store_freg(get_field(f, r1), o->out);
3501 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3503 /* ??? Specification exception: r1 must be < 14. */
3504 int f1 = get_field(s->fields, r1);
3505 store_freg(f1, o->out);
3506 store_freg((f1 + 2) & 15, o->out2);
3509 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3511 if (get_field(f, r1) != get_field(f, r2)) {
3512 store_reg32_i64(get_field(f, r1), o->out);
3516 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3518 if (get_field(f, r1) != get_field(f, r2)) {
3519 store_freg32_i64(get_field(f, r1), o->out);
3523 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3525 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3528 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3530 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3533 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3535 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3538 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3540 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3543 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3545 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3548 /* ====================================================================== */
3549 /* The "INput 1" generators. These load the first operand to an insn. */
3551 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3553 o->in1 = load_reg(get_field(f, r1));
3556 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3558 o->in1 = regs[get_field(f, r1)];
3562 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3564 o->in1 = tcg_temp_new_i64();
3565 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3568 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3570 o->in1 = tcg_temp_new_i64();
3571 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3574 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3576 o->in1 = tcg_temp_new_i64();
3577 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
3580 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3582 /* ??? Specification exception: r1 must be even. */
3583 int r1 = get_field(f, r1);
3584 o->in1 = load_reg((r1 + 1) & 15);
3587 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3589 /* ??? Specification exception: r1 must be even. */
3590 int r1 = get_field(f, r1);
3591 o->in1 = tcg_temp_new_i64();
3592 tcg_gen_ext32s_i64(o->in1, regs[(r1 + 1) & 15]);
3595 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3597 /* ??? Specification exception: r1 must be even. */
3598 int r1 = get_field(f, r1);
3599 o->in1 = tcg_temp_new_i64();
3600 tcg_gen_ext32u_i64(o->in1, regs[(r1 + 1) & 15]);
3603 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3605 /* ??? Specification exception: r1 must be even. */
3606 int r1 = get_field(f, r1);
3607 o->in1 = tcg_temp_new_i64();
3608 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3611 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3613 o->in1 = load_reg(get_field(f, r2));
3616 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3618 o->in1 = load_reg(get_field(f, r3));
3621 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3623 o->in1 = regs[get_field(f, r3)];
3627 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3629 o->in1 = tcg_temp_new_i64();
3630 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3633 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3635 o->in1 = tcg_temp_new_i64();
3636 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3639 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3641 o->in1 = load_freg32_i64(get_field(f, r1));
3644 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3646 o->in1 = fregs[get_field(f, r1)];
3650 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3652 /* ??? Specification exception: r1 must be < 14. */
3653 int r1 = get_field(f, r1);
3655 o->out2 = fregs[(r1 + 2) & 15];
3656 o->g_out = o->g_out2 = true;
3659 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
3661 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
3664 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
3666 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3667 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3670 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3673 o->in1 = tcg_temp_new_i64();
3674 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
3677 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3680 o->in1 = tcg_temp_new_i64();
3681 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
3684 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3687 o->in1 = tcg_temp_new_i64();
3688 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
3691 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3694 o->in1 = tcg_temp_new_i64();
3695 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
3698 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3701 o->in1 = tcg_temp_new_i64();
3702 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
3705 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3708 o->in1 = tcg_temp_new_i64();
3709 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
3712 /* ====================================================================== */
3713 /* The "INput 2" generators. These load the second operand to an insn. */
3715 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3717 o->in2 = regs[get_field(f, r1)];
3721 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3723 o->in2 = tcg_temp_new_i64();
3724 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
3727 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3729 o->in2 = tcg_temp_new_i64();
3730 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
3733 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3735 o->in2 = load_reg(get_field(f, r2));
3738 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3740 o->in2 = regs[get_field(f, r2)];
3744 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
3746 int r2 = get_field(f, r2);
3748 o->in2 = load_reg(r2);
3752 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
3754 o->in2 = tcg_temp_new_i64();
3755 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
3758 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3760 o->in2 = tcg_temp_new_i64();
3761 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
3764 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3766 o->in2 = tcg_temp_new_i64();
3767 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
3770 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3772 o->in2 = tcg_temp_new_i64();
3773 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
3776 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3778 o->in2 = load_reg(get_field(f, r3));
3781 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3783 o->in2 = tcg_temp_new_i64();
3784 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
3787 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3789 o->in2 = tcg_temp_new_i64();
3790 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
3793 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
3795 o->in2 = load_freg32_i64(get_field(f, r2));
3798 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3800 o->in2 = fregs[get_field(f, r2)];
3804 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3806 /* ??? Specification exception: r1 must be < 14. */
3807 int r2 = get_field(f, r2);
3809 o->in2 = fregs[(r2 + 2) & 15];
3810 o->g_in1 = o->g_in2 = true;
3813 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
3815 o->in2 = get_address(s, 0, get_field(f, r2), 0);
3818 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
3820 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3821 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3824 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
3826 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
3829 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
3831 help_l2_shift(s, f, o, 31);
3834 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
3836 help_l2_shift(s, f, o, 63);
3839 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3842 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
3845 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3848 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
3851 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3854 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3857 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3860 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3863 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3866 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3869 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3872 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3875 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3878 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3881 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3884 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3887 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3890 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3893 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3896 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3899 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
3901 o->in2 = tcg_const_i64(get_field(f, i2));
3904 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3906 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
3909 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3911 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
3914 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3916 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
3919 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3921 uint64_t i2 = (uint16_t)get_field(f, i2);
3922 o->in2 = tcg_const_i64(i2 << s->insn->data);
3925 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3927 uint64_t i2 = (uint32_t)get_field(f, i2);
3928 o->in2 = tcg_const_i64(i2 << s->insn->data);
3931 /* ====================================================================== */
3933 /* Find opc within the table of insns. This is formulated as a switch
3934 statement so that (1) we get compile-time notice of cut-paste errors
3935 for duplicated opcodes, and (2) the compiler generates the binary
3936 search tree, rather than us having to post-process the table. */
3938 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
3939 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
3941 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
3943 enum DisasInsnEnum {
3944 #include "insn-data.def"
3948 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
3953 .help_in1 = in1_##I1, \
3954 .help_in2 = in2_##I2, \
3955 .help_prep = prep_##P, \
3956 .help_wout = wout_##W, \
3957 .help_cout = cout_##CC, \
3958 .help_op = op_##OP, \
3962 /* Allow 0 to be used for NULL in the table below. */
3970 static const DisasInsn insn_info[] = {
3971 #include "insn-data.def"
3975 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
3976 case OPC: return &insn_info[insn_ ## NM];
3978 static const DisasInsn *lookup_opc(uint16_t opc)
3981 #include "insn-data.def"
3990 /* Extract a field from the insn. The INSN should be left-aligned in
3991 the uint64_t so that we can more easily utilize the big-bit-endian
3992 definitions we extract from the Principals of Operation. */
3994 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4002 /* Zero extract the field from the insn. */
4003 r = (insn << f->beg) >> (64 - f->size);
4005 /* Sign-extend, or un-swap the field as necessary. */
4007 case 0: /* unsigned */
4009 case 1: /* signed */
4010 assert(f->size <= 32);
4011 m = 1u << (f->size - 1);
4014 case 2: /* dl+dh split, signed 20 bit. */
4015 r = ((int8_t)r << 12) | (r >> 8);
4021 /* Validate that the "compressed" encoding we selected above is valid.
4022 I.e. we havn't make two different original fields overlap. */
4023 assert(((o->presentC >> f->indexC) & 1) == 0);
4024 o->presentC |= 1 << f->indexC;
4025 o->presentO |= 1 << f->indexO;
4027 o->c[f->indexC] = r;
4030 /* Lookup the insn at the current PC, extracting the operands into O and
4031 returning the info struct for the insn. Returns NULL for invalid insn. */
4033 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4036 uint64_t insn, pc = s->pc;
4038 const DisasInsn *info;
4040 insn = ld_code2(env, pc);
4041 op = (insn >> 8) & 0xff;
4042 ilen = get_ilen(op);
4043 s->next_pc = s->pc + ilen;
4050 insn = ld_code4(env, pc) << 32;
4053 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4059 /* We can't actually determine the insn format until we've looked up
4060 the full insn opcode. Which we can't do without locating the
4061 secondary opcode. Assume by default that OP2 is at bit 40; for
4062 those smaller insns that don't actually have a secondary opcode
4063 this will correctly result in OP2 = 0. */
4069 case 0xb2: /* S, RRF, RRE */
4070 case 0xb3: /* RRE, RRD, RRF */
4071 case 0xb9: /* RRE, RRF */
4072 case 0xe5: /* SSE, SIL */
4073 op2 = (insn << 8) >> 56;
4077 case 0xc0: /* RIL */
4078 case 0xc2: /* RIL */
4079 case 0xc4: /* RIL */
4080 case 0xc6: /* RIL */
4081 case 0xc8: /* SSF */
4082 case 0xcc: /* RIL */
4083 op2 = (insn << 12) >> 60;
4085 case 0xd0 ... 0xdf: /* SS */
4091 case 0xee ... 0xf3: /* SS */
4092 case 0xf8 ... 0xfd: /* SS */
4096 op2 = (insn << 40) >> 56;
4100 memset(f, 0, sizeof(*f));
4104 /* Lookup the instruction. */
4105 info = lookup_opc(op << 8 | op2);
4107 /* If we found it, extract the operands. */
4109 DisasFormat fmt = info->fmt;
4112 for (i = 0; i < NUM_C_FIELD; ++i) {
4113 extract_field(f, &format_info[fmt].op[i], insn);
4119 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4121 const DisasInsn *insn;
4122 ExitStatus ret = NO_EXIT;
4126 insn = extract_insn(env, s, &f);
4128 /* If not found, try the old interpreter. This includes ILLOPC. */
4130 disas_s390_insn(env, s);
4131 switch (s->is_jmp) {
4139 ret = EXIT_PC_UPDATED;
4142 ret = EXIT_NORETURN;
4152 /* Set up the strutures we use to communicate with the helpers. */
4155 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4156 TCGV_UNUSED_I64(o.out);
4157 TCGV_UNUSED_I64(o.out2);
4158 TCGV_UNUSED_I64(o.in1);
4159 TCGV_UNUSED_I64(o.in2);
4160 TCGV_UNUSED_I64(o.addr1);
4162 /* Implement the instruction. */
4163 if (insn->help_in1) {
4164 insn->help_in1(s, &f, &o);
4166 if (insn->help_in2) {
4167 insn->help_in2(s, &f, &o);
4169 if (insn->help_prep) {
4170 insn->help_prep(s, &f, &o);
4172 if (insn->help_op) {
4173 ret = insn->help_op(s, &o);
4175 if (insn->help_wout) {
4176 insn->help_wout(s, &f, &o);
4178 if (insn->help_cout) {
4179 insn->help_cout(s, &o);
4182 /* Free any temporaries created by the helpers. */
4183 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4184 tcg_temp_free_i64(o.out);
4186 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4187 tcg_temp_free_i64(o.out2);
4189 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4190 tcg_temp_free_i64(o.in1);
4192 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4193 tcg_temp_free_i64(o.in2);
4195 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4196 tcg_temp_free_i64(o.addr1);
4199 /* Advance to the next instruction. */
4204 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4205 TranslationBlock *tb,
4209 target_ulong pc_start;
4210 uint64_t next_page_start;
4211 uint16_t *gen_opc_end;
4213 int num_insns, max_insns;
4221 if (!(tb->flags & FLAG_MASK_64)) {
4222 pc_start &= 0x7fffffff;
4227 dc.cc_op = CC_OP_DYNAMIC;
4228 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4229 dc.is_jmp = DISAS_NEXT;
4231 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4233 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4236 max_insns = tb->cflags & CF_COUNT_MASK;
4237 if (max_insns == 0) {
4238 max_insns = CF_COUNT_MASK;
4245 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4249 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4252 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4253 gen_opc_cc_op[lj] = dc.cc_op;
4254 tcg_ctx.gen_opc_instr_start[lj] = 1;
4255 tcg_ctx.gen_opc_icount[lj] = num_insns;
4257 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4261 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4262 tcg_gen_debug_insn_start(dc.pc);
4266 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4267 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4268 if (bp->pc == dc.pc) {
4269 status = EXIT_PC_STALE;
4275 if (status == NO_EXIT) {
4276 status = translate_one(env, &dc);
4279 /* If we reach a page boundary, are single stepping,
4280 or exhaust instruction count, stop generation. */
4281 if (status == NO_EXIT
4282 && (dc.pc >= next_page_start
4283 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4284 || num_insns >= max_insns
4286 || env->singlestep_enabled)) {
4287 status = EXIT_PC_STALE;
4289 } while (status == NO_EXIT);
4291 if (tb->cflags & CF_LAST_IO) {
4300 update_psw_addr(&dc);
4302 case EXIT_PC_UPDATED:
4303 if (singlestep && dc.cc_op != CC_OP_DYNAMIC) {
4304 gen_op_calc_cc(&dc);
4306 /* Next TB starts off with CC_OP_DYNAMIC,
4307 so make sure the cc op type is in env */
4308 gen_op_set_cc_op(&dc);
4311 gen_exception(EXCP_DEBUG);
4313 /* Generate the return instruction */
4321 gen_icount_end(tb, num_insns);
4322 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4324 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4327 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4330 tb->size = dc.pc - pc_start;
4331 tb->icount = num_insns;
4334 #if defined(S390X_DEBUG_DISAS)
4335 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4336 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4337 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4343 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4345 gen_intermediate_code_internal(env, tb, 0);
4348 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4350 gen_intermediate_code_internal(env, tb, 1);
4353 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4356 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4357 cc_op = gen_opc_cc_op[pc_pos];
4358 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {