4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
33 #include "disas/disas.h"
34 #include "exec/exec-all.h"
37 #include "qemu/host-utils.h"
38 #include "exec/cpu_ldst.h"
40 /* global register indexes */
41 static TCGv_env cpu_env;
43 #include "exec/gen-icount.h"
44 #include "exec/helper-proto.h"
45 #include "exec/helper-gen.h"
47 #include "trace-tcg.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
57 struct TranslationBlock *tb;
58 const DisasInsn *insn;
64 bool singlestep_enabled;
67 /* Information carried about a condition to be evaluated. */
74 struct { TCGv_i64 a, b; } s64;
75 struct { TCGv_i32 a, b; } s32;
81 #ifdef DEBUG_INLINE_BRANCHES
82 static uint64_t inline_branch_hit[CC_OP_MAX];
83 static uint64_t inline_branch_miss[CC_OP_MAX];
86 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
88 if (!(s->tb->flags & FLAG_MASK_64)) {
89 if (s->tb->flags & FLAG_MASK_32) {
90 return pc | 0x80000000;
96 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
99 S390CPU *cpu = S390_CPU(cs);
100 CPUS390XState *env = &cpu->env;
103 if (env->cc_op > 3) {
104 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
105 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
107 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
108 env->psw.mask, env->psw.addr, env->cc_op);
111 for (i = 0; i < 16; i++) {
112 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
114 cpu_fprintf(f, "\n");
120 for (i = 0; i < 16; i++) {
121 cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
123 cpu_fprintf(f, "\n");
129 for (i = 0; i < 32; i++) {
130 cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
131 env->vregs[i][0].ll, env->vregs[i][1].ll);
132 cpu_fprintf(f, (i % 2) ? "\n" : " ");
135 #ifndef CONFIG_USER_ONLY
136 for (i = 0; i < 16; i++) {
137 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
139 cpu_fprintf(f, "\n");
146 #ifdef DEBUG_INLINE_BRANCHES
147 for (i = 0; i < CC_OP_MAX; i++) {
148 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
149 inline_branch_miss[i], inline_branch_hit[i]);
153 cpu_fprintf(f, "\n");
156 static TCGv_i64 psw_addr;
157 static TCGv_i64 psw_mask;
158 static TCGv_i64 gbea;
160 static TCGv_i32 cc_op;
161 static TCGv_i64 cc_src;
162 static TCGv_i64 cc_dst;
163 static TCGv_i64 cc_vr;
165 static char cpu_reg_names[32][4];
166 static TCGv_i64 regs[16];
167 static TCGv_i64 fregs[16];
169 void s390x_translate_init(void)
173 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
174 tcg_ctx.tcg_env = cpu_env;
175 psw_addr = tcg_global_mem_new_i64(cpu_env,
176 offsetof(CPUS390XState, psw.addr),
178 psw_mask = tcg_global_mem_new_i64(cpu_env,
179 offsetof(CPUS390XState, psw.mask),
181 gbea = tcg_global_mem_new_i64(cpu_env,
182 offsetof(CPUS390XState, gbea),
185 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
187 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
189 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
191 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
194 for (i = 0; i < 16; i++) {
195 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
196 regs[i] = tcg_global_mem_new(cpu_env,
197 offsetof(CPUS390XState, regs[i]),
201 for (i = 0; i < 16; i++) {
202 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
203 fregs[i] = tcg_global_mem_new(cpu_env,
204 offsetof(CPUS390XState, vregs[i][0].d),
205 cpu_reg_names[i + 16]);
209 static TCGv_i64 load_reg(int reg)
211 TCGv_i64 r = tcg_temp_new_i64();
212 tcg_gen_mov_i64(r, regs[reg]);
216 static TCGv_i64 load_freg32_i64(int reg)
218 TCGv_i64 r = tcg_temp_new_i64();
219 tcg_gen_shri_i64(r, fregs[reg], 32);
223 static void store_reg(int reg, TCGv_i64 v)
225 tcg_gen_mov_i64(regs[reg], v);
228 static void store_freg(int reg, TCGv_i64 v)
230 tcg_gen_mov_i64(fregs[reg], v);
233 static void store_reg32_i64(int reg, TCGv_i64 v)
235 /* 32 bit register writes keep the upper half */
236 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
239 static void store_reg32h_i64(int reg, TCGv_i64 v)
241 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
244 static void store_freg32_i64(int reg, TCGv_i64 v)
246 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
249 static void return_low128(TCGv_i64 dest)
251 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
254 static void update_psw_addr(DisasContext *s)
257 tcg_gen_movi_i64(psw_addr, s->pc);
260 static void per_branch(DisasContext *s, bool to_next)
262 #ifndef CONFIG_USER_ONLY
263 tcg_gen_movi_i64(gbea, s->pc);
265 if (s->tb->flags & FLAG_MASK_PER) {
266 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
267 gen_helper_per_branch(cpu_env, gbea, next_pc);
269 tcg_temp_free_i64(next_pc);
275 static void per_branch_cond(DisasContext *s, TCGCond cond,
276 TCGv_i64 arg1, TCGv_i64 arg2)
278 #ifndef CONFIG_USER_ONLY
279 if (s->tb->flags & FLAG_MASK_PER) {
280 TCGLabel *lab = gen_new_label();
281 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
283 tcg_gen_movi_i64(gbea, s->pc);
284 gen_helper_per_branch(cpu_env, gbea, psw_addr);
288 TCGv_i64 pc = tcg_const_i64(s->pc);
289 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
290 tcg_temp_free_i64(pc);
295 static void per_breaking_event(DisasContext *s)
297 tcg_gen_movi_i64(gbea, s->pc);
300 static void update_cc_op(DisasContext *s)
302 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
303 tcg_gen_movi_i32(cc_op, s->cc_op);
307 static void potential_page_fault(DisasContext *s)
313 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
315 return (uint64_t)cpu_lduw_code(env, pc);
318 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
320 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
323 static int get_mem_index(DisasContext *s)
325 switch (s->tb->flags & FLAG_MASK_ASC) {
326 case PSW_ASC_PRIMARY >> 32:
328 case PSW_ASC_SECONDARY >> 32:
330 case PSW_ASC_HOME >> 32:
338 static void gen_exception(int excp)
340 TCGv_i32 tmp = tcg_const_i32(excp);
341 gen_helper_exception(cpu_env, tmp);
342 tcg_temp_free_i32(tmp);
345 static void gen_program_exception(DisasContext *s, int code)
349 /* Remember what pgm exeption this was. */
350 tmp = tcg_const_i32(code);
351 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
352 tcg_temp_free_i32(tmp);
354 tmp = tcg_const_i32(s->ilen);
355 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
356 tcg_temp_free_i32(tmp);
358 /* Advance past instruction. */
365 /* Trigger exception. */
366 gen_exception(EXCP_PGM);
369 static inline void gen_illegal_opcode(DisasContext *s)
371 gen_program_exception(s, PGM_OPERATION);
374 static inline void gen_trap(DisasContext *s)
378 /* Set DXC to 0xff. */
379 t = tcg_temp_new_i32();
380 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
381 tcg_gen_ori_i32(t, t, 0xff00);
382 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
383 tcg_temp_free_i32(t);
385 gen_program_exception(s, PGM_DATA);
388 #ifndef CONFIG_USER_ONLY
389 static void check_privileged(DisasContext *s)
391 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
392 gen_program_exception(s, PGM_PRIVILEGED);
397 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
399 TCGv_i64 tmp = tcg_temp_new_i64();
400 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
402 /* Note that d2 is limited to 20 bits, signed. If we crop negative
403 displacements early we create larger immedate addends. */
405 /* Note that addi optimizes the imm==0 case. */
407 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
408 tcg_gen_addi_i64(tmp, tmp, d2);
410 tcg_gen_addi_i64(tmp, regs[b2], d2);
412 tcg_gen_addi_i64(tmp, regs[x2], d2);
418 tcg_gen_movi_i64(tmp, d2);
421 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
427 static inline bool live_cc_data(DisasContext *s)
429 return (s->cc_op != CC_OP_DYNAMIC
430 && s->cc_op != CC_OP_STATIC
434 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
436 if (live_cc_data(s)) {
437 tcg_gen_discard_i64(cc_src);
438 tcg_gen_discard_i64(cc_dst);
439 tcg_gen_discard_i64(cc_vr);
441 s->cc_op = CC_OP_CONST0 + val;
444 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
446 if (live_cc_data(s)) {
447 tcg_gen_discard_i64(cc_src);
448 tcg_gen_discard_i64(cc_vr);
450 tcg_gen_mov_i64(cc_dst, dst);
454 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
457 if (live_cc_data(s)) {
458 tcg_gen_discard_i64(cc_vr);
460 tcg_gen_mov_i64(cc_src, src);
461 tcg_gen_mov_i64(cc_dst, dst);
465 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
466 TCGv_i64 dst, TCGv_i64 vr)
468 tcg_gen_mov_i64(cc_src, src);
469 tcg_gen_mov_i64(cc_dst, dst);
470 tcg_gen_mov_i64(cc_vr, vr);
474 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
476 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
479 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
481 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
484 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
486 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
489 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
491 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
494 /* CC value is in env->cc_op */
495 static void set_cc_static(DisasContext *s)
497 if (live_cc_data(s)) {
498 tcg_gen_discard_i64(cc_src);
499 tcg_gen_discard_i64(cc_dst);
500 tcg_gen_discard_i64(cc_vr);
502 s->cc_op = CC_OP_STATIC;
505 /* calculates cc into cc_op */
506 static void gen_op_calc_cc(DisasContext *s)
508 TCGv_i32 local_cc_op;
511 TCGV_UNUSED_I32(local_cc_op);
512 TCGV_UNUSED_I64(dummy);
515 dummy = tcg_const_i64(0);
529 local_cc_op = tcg_const_i32(s->cc_op);
545 /* s->cc_op is the cc value */
546 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
549 /* env->cc_op already is the cc value */
564 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
569 case CC_OP_LTUGTU_32:
570 case CC_OP_LTUGTU_64:
577 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
592 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
595 /* unknown operation - assume 3 arguments and cc_op in env */
596 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
602 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
603 tcg_temp_free_i32(local_cc_op);
605 if (!TCGV_IS_UNUSED_I64(dummy)) {
606 tcg_temp_free_i64(dummy);
609 /* We now have cc in cc_op as constant */
613 static bool use_exit_tb(DisasContext *s)
615 return (s->singlestep_enabled ||
616 (s->tb->cflags & CF_LAST_IO) ||
617 (s->tb->flags & FLAG_MASK_PER));
620 static bool use_goto_tb(DisasContext *s, uint64_t dest)
622 if (unlikely(use_exit_tb(s))) {
625 #ifndef CONFIG_USER_ONLY
626 return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
627 (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
633 static void account_noninline_branch(DisasContext *s, int cc_op)
635 #ifdef DEBUG_INLINE_BRANCHES
636 inline_branch_miss[cc_op]++;
640 static void account_inline_branch(DisasContext *s, int cc_op)
642 #ifdef DEBUG_INLINE_BRANCHES
643 inline_branch_hit[cc_op]++;
647 /* Table of mask values to comparison codes, given a comparison as input.
648 For such, CC=3 should not be possible. */
649 static const TCGCond ltgt_cond[16] = {
650 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
651 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
652 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
653 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
654 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
655 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
656 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
657 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
660 /* Table of mask values to comparison codes, given a logic op as input.
661 For such, only CC=0 and CC=1 should be possible. */
662 static const TCGCond nz_cond[16] = {
663 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
664 TCG_COND_NEVER, TCG_COND_NEVER,
665 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
666 TCG_COND_NE, TCG_COND_NE,
667 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
668 TCG_COND_EQ, TCG_COND_EQ,
669 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
670 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
673 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
674 details required to generate a TCG comparison. */
675 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
678 enum cc_op old_cc_op = s->cc_op;
680 if (mask == 15 || mask == 0) {
681 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
684 c->g1 = c->g2 = true;
689 /* Find the TCG condition for the mask + cc op. */
695 cond = ltgt_cond[mask];
696 if (cond == TCG_COND_NEVER) {
699 account_inline_branch(s, old_cc_op);
702 case CC_OP_LTUGTU_32:
703 case CC_OP_LTUGTU_64:
704 cond = tcg_unsigned_cond(ltgt_cond[mask]);
705 if (cond == TCG_COND_NEVER) {
708 account_inline_branch(s, old_cc_op);
712 cond = nz_cond[mask];
713 if (cond == TCG_COND_NEVER) {
716 account_inline_branch(s, old_cc_op);
731 account_inline_branch(s, old_cc_op);
746 account_inline_branch(s, old_cc_op);
750 switch (mask & 0xa) {
751 case 8: /* src == 0 -> no one bit found */
754 case 2: /* src != 0 -> one bit found */
760 account_inline_branch(s, old_cc_op);
766 case 8 | 2: /* vr == 0 */
769 case 4 | 1: /* vr != 0 */
772 case 8 | 4: /* no carry -> vr >= src */
775 case 2 | 1: /* carry -> vr < src */
781 account_inline_branch(s, old_cc_op);
786 /* Note that CC=0 is impossible; treat it as dont-care. */
788 case 2: /* zero -> op1 == op2 */
791 case 4 | 1: /* !zero -> op1 != op2 */
794 case 4: /* borrow (!carry) -> op1 < op2 */
797 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
803 account_inline_branch(s, old_cc_op);
808 /* Calculate cc value. */
813 /* Jump based on CC. We'll load up the real cond below;
814 the assignment here merely avoids a compiler warning. */
815 account_noninline_branch(s, old_cc_op);
816 old_cc_op = CC_OP_STATIC;
817 cond = TCG_COND_NEVER;
821 /* Load up the arguments of the comparison. */
823 c->g1 = c->g2 = false;
827 c->u.s32.a = tcg_temp_new_i32();
828 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
829 c->u.s32.b = tcg_const_i32(0);
832 case CC_OP_LTUGTU_32:
835 c->u.s32.a = tcg_temp_new_i32();
836 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
837 c->u.s32.b = tcg_temp_new_i32();
838 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
845 c->u.s64.b = tcg_const_i64(0);
849 case CC_OP_LTUGTU_64:
853 c->g1 = c->g2 = true;
859 c->u.s64.a = tcg_temp_new_i64();
860 c->u.s64.b = tcg_const_i64(0);
861 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
866 c->u.s32.a = tcg_temp_new_i32();
867 c->u.s32.b = tcg_temp_new_i32();
868 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
869 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
870 tcg_gen_movi_i32(c->u.s32.b, 0);
872 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
879 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
880 c->u.s64.b = tcg_const_i64(0);
892 case 0x8 | 0x4 | 0x2: /* cc != 3 */
894 c->u.s32.b = tcg_const_i32(3);
896 case 0x8 | 0x4 | 0x1: /* cc != 2 */
898 c->u.s32.b = tcg_const_i32(2);
900 case 0x8 | 0x2 | 0x1: /* cc != 1 */
902 c->u.s32.b = tcg_const_i32(1);
904 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
907 c->u.s32.a = tcg_temp_new_i32();
908 c->u.s32.b = tcg_const_i32(0);
909 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
911 case 0x8 | 0x4: /* cc < 2 */
913 c->u.s32.b = tcg_const_i32(2);
915 case 0x8: /* cc == 0 */
917 c->u.s32.b = tcg_const_i32(0);
919 case 0x4 | 0x2 | 0x1: /* cc != 0 */
921 c->u.s32.b = tcg_const_i32(0);
923 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
926 c->u.s32.a = tcg_temp_new_i32();
927 c->u.s32.b = tcg_const_i32(0);
928 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
930 case 0x4: /* cc == 1 */
932 c->u.s32.b = tcg_const_i32(1);
934 case 0x2 | 0x1: /* cc > 1 */
936 c->u.s32.b = tcg_const_i32(1);
938 case 0x2: /* cc == 2 */
940 c->u.s32.b = tcg_const_i32(2);
942 case 0x1: /* cc == 3 */
944 c->u.s32.b = tcg_const_i32(3);
947 /* CC is masked by something else: (8 >> cc) & mask. */
950 c->u.s32.a = tcg_const_i32(8);
951 c->u.s32.b = tcg_const_i32(0);
952 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
953 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
964 static void free_compare(DisasCompare *c)
968 tcg_temp_free_i64(c->u.s64.a);
970 tcg_temp_free_i32(c->u.s32.a);
975 tcg_temp_free_i64(c->u.s64.b);
977 tcg_temp_free_i32(c->u.s32.b);
982 /* ====================================================================== */
983 /* Define the insn format enumeration. */
984 #define F0(N) FMT_##N,
985 #define F1(N, X1) F0(N)
986 #define F2(N, X1, X2) F0(N)
987 #define F3(N, X1, X2, X3) F0(N)
988 #define F4(N, X1, X2, X3, X4) F0(N)
989 #define F5(N, X1, X2, X3, X4, X5) F0(N)
992 #include "insn-format.def"
1002 /* Define a structure to hold the decoded fields. We'll store each inside
1003 an array indexed by an enum. In order to conserve memory, we'll arrange
1004 for fields that do not exist at the same time to overlap, thus the "C"
1005 for compact. For checking purposes there is an "O" for original index
1006 as well that will be applied to availability bitmaps. */
1008 enum DisasFieldIndexO {
1031 enum DisasFieldIndexC {
1062 struct DisasFields {
1066 unsigned presentC:16;
1067 unsigned int presentO;
1071 /* This is the way fields are to be accessed out of DisasFields. */
1072 #define have_field(S, F) have_field1((S), FLD_O_##F)
1073 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1075 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1077 return (f->presentO >> c) & 1;
1080 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1081 enum DisasFieldIndexC c)
1083 assert(have_field1(f, o));
1087 /* Describe the layout of each field in each format. */
1088 typedef struct DisasField {
1090 unsigned int size:8;
1091 unsigned int type:2;
1092 unsigned int indexC:6;
1093 enum DisasFieldIndexO indexO:8;
1096 typedef struct DisasFormatInfo {
1097 DisasField op[NUM_C_FIELD];
1100 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1101 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1102 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1103 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1104 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1105 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1106 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1107 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1108 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1109 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1110 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1111 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1112 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1113 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1115 #define F0(N) { { } },
1116 #define F1(N, X1) { { X1 } },
1117 #define F2(N, X1, X2) { { X1, X2 } },
1118 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1119 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1120 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1122 static const DisasFormatInfo format_info[] = {
1123 #include "insn-format.def"
1141 /* Generally, we'll extract operands into this structures, operate upon
1142 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1143 of routines below for more details. */
1145 bool g_out, g_out2, g_in1, g_in2;
1146 TCGv_i64 out, out2, in1, in2;
1150 /* Instructions can place constraints on their operands, raising specification
1151 exceptions if they are violated. To make this easy to automate, each "in1",
1152 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1153 of the following, or 0. To make this easy to document, we'll put the
1154 SPEC_<name> defines next to <name>. */
1156 #define SPEC_r1_even 1
1157 #define SPEC_r2_even 2
1158 #define SPEC_r3_even 4
1159 #define SPEC_r1_f128 8
1160 #define SPEC_r2_f128 16
1162 /* Return values from translate_one, indicating the state of the TB. */
1164 /* Continue the TB. */
1166 /* We have emitted one or more goto_tb. No fixup required. */
1168 /* We are not using a goto_tb (for whatever reason), but have updated
1169 the PC (for whatever reason), so there's no need to do it again on
1172 /* We have updated the PC and CC values. */
1174 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1175 updated the PC for the next instruction to be executed. */
1177 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1178 No following code will be executed. */
1182 typedef enum DisasFacility {
1183 FAC_Z, /* zarch (default) */
1184 FAC_CASS, /* compare and swap and store */
1185 FAC_CASS2, /* compare and swap and store 2*/
1186 FAC_DFP, /* decimal floating point */
1187 FAC_DFPR, /* decimal floating point rounding */
1188 FAC_DO, /* distinct operands */
1189 FAC_EE, /* execute extensions */
1190 FAC_EI, /* extended immediate */
1191 FAC_FPE, /* floating point extension */
1192 FAC_FPSSH, /* floating point support sign handling */
1193 FAC_FPRGR, /* FPR-GR transfer */
1194 FAC_GIE, /* general instructions extension */
1195 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1196 FAC_HW, /* high-word */
1197 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1198 FAC_MIE, /* miscellaneous-instruction-extensions */
1199 FAC_LAT, /* load-and-trap */
1200 FAC_LOC, /* load/store on condition */
1201 FAC_LD, /* long displacement */
1202 FAC_PC, /* population count */
1203 FAC_SCF, /* store clock fast */
1204 FAC_SFLE, /* store facility list extended */
1205 FAC_ILA, /* interlocked access facility 1 */
1206 FAC_LPP, /* load-program-parameter */
1207 FAC_DAT_ENH, /* DAT-enhancement */
1208 FAC_E2, /* extended-translation facility 2 */
1214 DisasFacility fac:8;
1219 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1220 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1221 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1222 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1223 void (*help_cout)(DisasContext *, DisasOps *);
1224 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1229 /* ====================================================================== */
1230 /* Miscellaneous helpers, used by several operations. */
1232 static void help_l2_shift(DisasContext *s, DisasFields *f,
1233 DisasOps *o, int mask)
1235 int b2 = get_field(f, b2);
1236 int d2 = get_field(f, d2);
1239 o->in2 = tcg_const_i64(d2 & mask);
1241 o->in2 = get_address(s, 0, b2, d2);
1242 tcg_gen_andi_i64(o->in2, o->in2, mask);
1246 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1248 if (dest == s->next_pc) {
1249 per_branch(s, true);
1252 if (use_goto_tb(s, dest)) {
1254 per_breaking_event(s);
1256 tcg_gen_movi_i64(psw_addr, dest);
1257 tcg_gen_exit_tb((uintptr_t)s->tb);
1258 return EXIT_GOTO_TB;
1260 tcg_gen_movi_i64(psw_addr, dest);
1261 per_branch(s, false);
1262 return EXIT_PC_UPDATED;
1266 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1267 bool is_imm, int imm, TCGv_i64 cdest)
1270 uint64_t dest = s->pc + 2 * imm;
1273 /* Take care of the special cases first. */
1274 if (c->cond == TCG_COND_NEVER) {
1279 if (dest == s->next_pc) {
1280 /* Branch to next. */
1281 per_branch(s, true);
1285 if (c->cond == TCG_COND_ALWAYS) {
1286 ret = help_goto_direct(s, dest);
1290 if (TCGV_IS_UNUSED_I64(cdest)) {
1291 /* E.g. bcr %r0 -> no branch. */
1295 if (c->cond == TCG_COND_ALWAYS) {
1296 tcg_gen_mov_i64(psw_addr, cdest);
1297 per_branch(s, false);
1298 ret = EXIT_PC_UPDATED;
1303 if (use_goto_tb(s, s->next_pc)) {
1304 if (is_imm && use_goto_tb(s, dest)) {
1305 /* Both exits can use goto_tb. */
1308 lab = gen_new_label();
1310 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1312 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1315 /* Branch not taken. */
1317 tcg_gen_movi_i64(psw_addr, s->next_pc);
1318 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1322 per_breaking_event(s);
1324 tcg_gen_movi_i64(psw_addr, dest);
1325 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1329 /* Fallthru can use goto_tb, but taken branch cannot. */
1330 /* Store taken branch destination before the brcond. This
1331 avoids having to allocate a new local temp to hold it.
1332 We'll overwrite this in the not taken case anyway. */
1334 tcg_gen_mov_i64(psw_addr, cdest);
1337 lab = gen_new_label();
1339 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1341 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1344 /* Branch not taken. */
1347 tcg_gen_movi_i64(psw_addr, s->next_pc);
1348 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1352 tcg_gen_movi_i64(psw_addr, dest);
1354 per_breaking_event(s);
1355 ret = EXIT_PC_UPDATED;
1358 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1359 Most commonly we're single-stepping or some other condition that
1360 disables all use of goto_tb. Just update the PC and exit. */
1362 TCGv_i64 next = tcg_const_i64(s->next_pc);
1364 cdest = tcg_const_i64(dest);
1368 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1370 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1372 TCGv_i32 t0 = tcg_temp_new_i32();
1373 TCGv_i64 t1 = tcg_temp_new_i64();
1374 TCGv_i64 z = tcg_const_i64(0);
1375 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1376 tcg_gen_extu_i32_i64(t1, t0);
1377 tcg_temp_free_i32(t0);
1378 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1379 per_branch_cond(s, TCG_COND_NE, t1, z);
1380 tcg_temp_free_i64(t1);
1381 tcg_temp_free_i64(z);
1385 tcg_temp_free_i64(cdest);
1387 tcg_temp_free_i64(next);
1389 ret = EXIT_PC_UPDATED;
1397 /* ====================================================================== */
1398 /* The operations. These perform the bulk of the work for any insn,
1399 usually after the operands have been loaded and output initialized. */
1401 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1404 z = tcg_const_i64(0);
1405 n = tcg_temp_new_i64();
1406 tcg_gen_neg_i64(n, o->in2);
1407 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1408 tcg_temp_free_i64(n);
1409 tcg_temp_free_i64(z);
1413 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1415 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1419 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1421 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1425 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1427 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1428 tcg_gen_mov_i64(o->out2, o->in2);
1432 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1434 tcg_gen_add_i64(o->out, o->in1, o->in2);
1438 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1443 tcg_gen_add_i64(o->out, o->in1, o->in2);
1445 /* The carry flag is the msb of CC, therefore the branch mask that would
1446 create that comparison is 3. Feeding the generated comparison to
1447 setcond produces the carry flag that we desire. */
1448 disas_jcc(s, &cmp, 3);
1449 carry = tcg_temp_new_i64();
1451 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1453 TCGv_i32 t = tcg_temp_new_i32();
1454 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1455 tcg_gen_extu_i32_i64(carry, t);
1456 tcg_temp_free_i32(t);
1460 tcg_gen_add_i64(o->out, o->out, carry);
1461 tcg_temp_free_i64(carry);
1465 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1467 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1471 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1473 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1477 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1479 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1480 return_low128(o->out2);
1484 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1486 tcg_gen_and_i64(o->out, o->in1, o->in2);
1490 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1492 int shift = s->insn->data & 0xff;
1493 int size = s->insn->data >> 8;
1494 uint64_t mask = ((1ull << size) - 1) << shift;
1497 tcg_gen_shli_i64(o->in2, o->in2, shift);
1498 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1499 tcg_gen_and_i64(o->out, o->in1, o->in2);
1501 /* Produce the CC from only the bits manipulated. */
1502 tcg_gen_andi_i64(cc_dst, o->out, mask);
1503 set_cc_nz_u64(s, cc_dst);
1507 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1509 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1510 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1511 tcg_gen_mov_i64(psw_addr, o->in2);
1512 per_branch(s, false);
1513 return EXIT_PC_UPDATED;
1519 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1521 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1522 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1525 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1527 int m1 = get_field(s->fields, m1);
1528 bool is_imm = have_field(s->fields, i2);
1529 int imm = is_imm ? get_field(s->fields, i2) : 0;
1532 /* BCR with R2 = 0 causes no branching */
1533 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1535 /* Perform serialization */
1536 /* FIXME: check for fast-BCR-serialization facility */
1537 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1540 /* Perform serialization */
1541 /* FIXME: perform checkpoint-synchronisation */
1542 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1547 disas_jcc(s, &c, m1);
1548 return help_branch(s, &c, is_imm, imm, o->in2);
1551 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1553 int r1 = get_field(s->fields, r1);
1554 bool is_imm = have_field(s->fields, i2);
1555 int imm = is_imm ? get_field(s->fields, i2) : 0;
1559 c.cond = TCG_COND_NE;
1564 t = tcg_temp_new_i64();
1565 tcg_gen_subi_i64(t, regs[r1], 1);
1566 store_reg32_i64(r1, t);
1567 c.u.s32.a = tcg_temp_new_i32();
1568 c.u.s32.b = tcg_const_i32(0);
1569 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1570 tcg_temp_free_i64(t);
1572 return help_branch(s, &c, is_imm, imm, o->in2);
1575 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1577 int r1 = get_field(s->fields, r1);
1578 int imm = get_field(s->fields, i2);
1582 c.cond = TCG_COND_NE;
1587 t = tcg_temp_new_i64();
1588 tcg_gen_shri_i64(t, regs[r1], 32);
1589 tcg_gen_subi_i64(t, t, 1);
1590 store_reg32h_i64(r1, t);
1591 c.u.s32.a = tcg_temp_new_i32();
1592 c.u.s32.b = tcg_const_i32(0);
1593 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1594 tcg_temp_free_i64(t);
1596 return help_branch(s, &c, 1, imm, o->in2);
1599 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1601 int r1 = get_field(s->fields, r1);
1602 bool is_imm = have_field(s->fields, i2);
1603 int imm = is_imm ? get_field(s->fields, i2) : 0;
1606 c.cond = TCG_COND_NE;
1611 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1612 c.u.s64.a = regs[r1];
1613 c.u.s64.b = tcg_const_i64(0);
1615 return help_branch(s, &c, is_imm, imm, o->in2);
1618 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1620 int r1 = get_field(s->fields, r1);
1621 int r3 = get_field(s->fields, r3);
1622 bool is_imm = have_field(s->fields, i2);
1623 int imm = is_imm ? get_field(s->fields, i2) : 0;
1627 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1632 t = tcg_temp_new_i64();
1633 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1634 c.u.s32.a = tcg_temp_new_i32();
1635 c.u.s32.b = tcg_temp_new_i32();
1636 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1637 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1638 store_reg32_i64(r1, t);
1639 tcg_temp_free_i64(t);
1641 return help_branch(s, &c, is_imm, imm, o->in2);
1644 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1646 int r1 = get_field(s->fields, r1);
1647 int r3 = get_field(s->fields, r3);
1648 bool is_imm = have_field(s->fields, i2);
1649 int imm = is_imm ? get_field(s->fields, i2) : 0;
1652 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1655 if (r1 == (r3 | 1)) {
1656 c.u.s64.b = load_reg(r3 | 1);
1659 c.u.s64.b = regs[r3 | 1];
1663 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1664 c.u.s64.a = regs[r1];
1667 return help_branch(s, &c, is_imm, imm, o->in2);
1670 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1672 int imm, m3 = get_field(s->fields, m3);
1676 c.cond = ltgt_cond[m3];
1677 if (s->insn->data) {
1678 c.cond = tcg_unsigned_cond(c.cond);
1680 c.is_64 = c.g1 = c.g2 = true;
1684 is_imm = have_field(s->fields, i4);
1686 imm = get_field(s->fields, i4);
1689 o->out = get_address(s, 0, get_field(s->fields, b4),
1690 get_field(s->fields, d4));
1693 return help_branch(s, &c, is_imm, imm, o->out);
1696 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1698 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1703 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1705 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1710 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1712 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1717 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1719 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1720 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1721 tcg_temp_free_i32(m3);
1722 gen_set_cc_nz_f32(s, o->in2);
1726 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1728 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1729 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1730 tcg_temp_free_i32(m3);
1731 gen_set_cc_nz_f64(s, o->in2);
1735 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1737 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1738 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1739 tcg_temp_free_i32(m3);
1740 gen_set_cc_nz_f128(s, o->in1, o->in2);
1744 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1746 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1747 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1748 tcg_temp_free_i32(m3);
1749 gen_set_cc_nz_f32(s, o->in2);
1753 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1755 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1756 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1757 tcg_temp_free_i32(m3);
1758 gen_set_cc_nz_f64(s, o->in2);
1762 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1764 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1765 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1766 tcg_temp_free_i32(m3);
1767 gen_set_cc_nz_f128(s, o->in1, o->in2);
1771 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1773 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1774 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1775 tcg_temp_free_i32(m3);
1776 gen_set_cc_nz_f32(s, o->in2);
1780 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1782 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1783 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1784 tcg_temp_free_i32(m3);
1785 gen_set_cc_nz_f64(s, o->in2);
1789 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1791 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1792 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1793 tcg_temp_free_i32(m3);
1794 gen_set_cc_nz_f128(s, o->in1, o->in2);
1798 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1800 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1801 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1802 tcg_temp_free_i32(m3);
1803 gen_set_cc_nz_f32(s, o->in2);
1807 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1809 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1810 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1811 tcg_temp_free_i32(m3);
1812 gen_set_cc_nz_f64(s, o->in2);
1816 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1818 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1819 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1820 tcg_temp_free_i32(m3);
1821 gen_set_cc_nz_f128(s, o->in1, o->in2);
1825 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1827 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1828 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1829 tcg_temp_free_i32(m3);
1833 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1835 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1836 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1837 tcg_temp_free_i32(m3);
1841 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1843 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1844 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1845 tcg_temp_free_i32(m3);
1846 return_low128(o->out2);
1850 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1852 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1853 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1854 tcg_temp_free_i32(m3);
1858 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1860 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1861 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1862 tcg_temp_free_i32(m3);
1866 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1868 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1869 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1870 tcg_temp_free_i32(m3);
1871 return_low128(o->out2);
1875 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1877 int r2 = get_field(s->fields, r2);
1878 TCGv_i64 len = tcg_temp_new_i64();
1880 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1882 return_low128(o->out);
1884 tcg_gen_add_i64(regs[r2], regs[r2], len);
1885 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1886 tcg_temp_free_i64(len);
1891 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1893 int l = get_field(s->fields, l1);
1898 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1899 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1902 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1903 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1906 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1907 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1910 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1911 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1914 vl = tcg_const_i32(l);
1915 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1916 tcg_temp_free_i32(vl);
1920 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1924 static ExitStatus op_clcl(DisasContext *s, DisasOps *o)
1926 int r1 = get_field(s->fields, r1);
1927 int r2 = get_field(s->fields, r2);
1930 /* r1 and r2 must be even. */
1931 if (r1 & 1 || r2 & 1) {
1932 gen_program_exception(s, PGM_SPECIFICATION);
1933 return EXIT_NORETURN;
1936 t1 = tcg_const_i32(r1);
1937 t2 = tcg_const_i32(r2);
1938 gen_helper_clcl(cc_op, cpu_env, t1, t2);
1939 tcg_temp_free_i32(t1);
1940 tcg_temp_free_i32(t2);
1945 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1947 int r1 = get_field(s->fields, r1);
1948 int r3 = get_field(s->fields, r3);
1951 /* r1 and r3 must be even. */
1952 if (r1 & 1 || r3 & 1) {
1953 gen_program_exception(s, PGM_SPECIFICATION);
1954 return EXIT_NORETURN;
1957 t1 = tcg_const_i32(r1);
1958 t3 = tcg_const_i32(r3);
1959 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
1960 tcg_temp_free_i32(t1);
1961 tcg_temp_free_i32(t3);
1966 static ExitStatus op_clclu(DisasContext *s, DisasOps *o)
1968 int r1 = get_field(s->fields, r1);
1969 int r3 = get_field(s->fields, r3);
1972 /* r1 and r3 must be even. */
1973 if (r1 & 1 || r3 & 1) {
1974 gen_program_exception(s, PGM_SPECIFICATION);
1975 return EXIT_NORETURN;
1978 t1 = tcg_const_i32(r1);
1979 t3 = tcg_const_i32(r3);
1980 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
1981 tcg_temp_free_i32(t1);
1982 tcg_temp_free_i32(t3);
1987 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1989 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1990 TCGv_i32 t1 = tcg_temp_new_i32();
1991 tcg_gen_extrl_i64_i32(t1, o->in1);
1992 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1994 tcg_temp_free_i32(t1);
1995 tcg_temp_free_i32(m3);
1999 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
2001 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2003 return_low128(o->in2);
2007 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
2009 TCGv_i64 t = tcg_temp_new_i64();
2010 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2011 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2012 tcg_gen_or_i64(o->out, o->out, t);
2013 tcg_temp_free_i64(t);
2017 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
2019 int d2 = get_field(s->fields, d2);
2020 int b2 = get_field(s->fields, b2);
2023 /* Note that in1 = R3 (new value) and
2024 in2 = (zero-extended) R1 (expected value). */
2026 addr = get_address(s, 0, b2, d2);
2027 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2028 get_mem_index(s), s->insn->data | MO_ALIGN);
2029 tcg_temp_free_i64(addr);
2031 /* Are the memory and expected values (un)equal? Note that this setcond
2032 produces the output CC value, thus the NE sense of the test. */
2033 cc = tcg_temp_new_i64();
2034 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2035 tcg_gen_extrl_i64_i32(cc_op, cc);
2036 tcg_temp_free_i64(cc);
2042 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
2044 int r1 = get_field(s->fields, r1);
2045 int r3 = get_field(s->fields, r3);
2046 int d2 = get_field(s->fields, d2);
2047 int b2 = get_field(s->fields, b2);
2049 TCGv_i32 t_r1, t_r3;
2051 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2052 addr = get_address(s, 0, b2, d2);
2053 t_r1 = tcg_const_i32(r1);
2054 t_r3 = tcg_const_i32(r3);
2055 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2056 tcg_temp_free_i64(addr);
2057 tcg_temp_free_i32(t_r1);
2058 tcg_temp_free_i32(t_r3);
2064 #ifndef CONFIG_USER_ONLY
2065 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
2067 TCGMemOp mop = s->insn->data;
2068 TCGv_i64 addr, old, cc;
2069 TCGLabel *lab = gen_new_label();
2071 /* Note that in1 = R1 (zero-extended expected value),
2072 out = R1 (original reg), out2 = R1+1 (new value). */
2074 check_privileged(s);
2075 addr = tcg_temp_new_i64();
2076 old = tcg_temp_new_i64();
2077 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2078 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2079 get_mem_index(s), mop | MO_ALIGN);
2080 tcg_temp_free_i64(addr);
2082 /* Are the memory and expected values (un)equal? */
2083 cc = tcg_temp_new_i64();
2084 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2085 tcg_gen_extrl_i64_i32(cc_op, cc);
2087 /* Write back the output now, so that it happens before the
2088 following branch, so that we don't need local temps. */
2089 if ((mop & MO_SIZE) == MO_32) {
2090 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2092 tcg_gen_mov_i64(o->out, old);
2094 tcg_temp_free_i64(old);
2096 /* If the comparison was equal, and the LSB of R2 was set,
2097 then we need to flush the TLB (for all cpus). */
2098 tcg_gen_xori_i64(cc, cc, 1);
2099 tcg_gen_and_i64(cc, cc, o->in2);
2100 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2101 tcg_temp_free_i64(cc);
2103 gen_helper_purge(cpu_env);
2110 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2112 TCGv_i64 t1 = tcg_temp_new_i64();
2113 TCGv_i32 t2 = tcg_temp_new_i32();
2114 tcg_gen_extrl_i64_i32(t2, o->in1);
2115 gen_helper_cvd(t1, t2);
2116 tcg_temp_free_i32(t2);
2117 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2118 tcg_temp_free_i64(t1);
2122 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2124 int m3 = get_field(s->fields, m3);
2125 TCGLabel *lab = gen_new_label();
2128 c = tcg_invert_cond(ltgt_cond[m3]);
2129 if (s->insn->data) {
2130 c = tcg_unsigned_cond(c);
2132 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2141 #ifndef CONFIG_USER_ONLY
2142 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2144 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2145 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2146 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2148 check_privileged(s);
2152 gen_helper_diag(cpu_env, r1, r3, func_code);
2154 tcg_temp_free_i32(func_code);
2155 tcg_temp_free_i32(r3);
2156 tcg_temp_free_i32(r1);
2161 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2163 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2164 return_low128(o->out);
2168 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2170 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2171 return_low128(o->out);
2175 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2177 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2178 return_low128(o->out);
2182 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2184 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2185 return_low128(o->out);
2189 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2191 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2195 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2197 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2201 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2203 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2204 return_low128(o->out2);
2208 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2210 int r2 = get_field(s->fields, r2);
2211 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2215 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2217 /* No cache information provided. */
2218 tcg_gen_movi_i64(o->out, -1);
2222 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2224 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2228 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2230 int r1 = get_field(s->fields, r1);
2231 int r2 = get_field(s->fields, r2);
2232 TCGv_i64 t = tcg_temp_new_i64();
2234 /* Note the "subsequently" in the PoO, which implies a defined result
2235 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2236 tcg_gen_shri_i64(t, psw_mask, 32);
2237 store_reg32_i64(r1, t);
2239 store_reg32_i64(r2, psw_mask);
2242 tcg_temp_free_i64(t);
2246 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2248 int r1 = get_field(s->fields, r1);
2252 /* Nested EXECUTE is not allowed. */
2253 if (unlikely(s->ex_value)) {
2254 gen_program_exception(s, PGM_EXECUTE);
2255 return EXIT_NORETURN;
2262 v1 = tcg_const_i64(0);
2267 ilen = tcg_const_i32(s->ilen);
2268 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2269 tcg_temp_free_i32(ilen);
2272 tcg_temp_free_i64(v1);
2275 return EXIT_PC_CC_UPDATED;
2278 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2280 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2281 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2282 tcg_temp_free_i32(m3);
2286 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2288 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2289 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2290 tcg_temp_free_i32(m3);
2294 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2296 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2297 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2298 return_low128(o->out2);
2299 tcg_temp_free_i32(m3);
2303 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2305 /* We'll use the original input for cc computation, since we get to
2306 compare that against 0, which ought to be better than comparing
2307 the real output against 64. It also lets cc_dst be a convenient
2308 temporary during our computation. */
2309 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2311 /* R1 = IN ? CLZ(IN) : 64. */
2312 tcg_gen_clzi_i64(o->out, o->in2, 64);
2314 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2315 value by 64, which is undefined. But since the shift is 64 iff the
2316 input is zero, we still get the correct result after and'ing. */
2317 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2318 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2319 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2323 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2325 int m3 = get_field(s->fields, m3);
2326 int pos, len, base = s->insn->data;
2327 TCGv_i64 tmp = tcg_temp_new_i64();
2332 /* Effectively a 32-bit load. */
2333 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2340 /* Effectively a 16-bit load. */
2341 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2349 /* Effectively an 8-bit load. */
2350 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2355 pos = base + ctz32(m3) * 8;
2356 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2357 ccm = ((1ull << len) - 1) << pos;
2361 /* This is going to be a sequence of loads and inserts. */
2362 pos = base + 32 - 8;
2366 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2367 tcg_gen_addi_i64(o->in2, o->in2, 1);
2368 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2371 m3 = (m3 << 1) & 0xf;
2377 tcg_gen_movi_i64(tmp, ccm);
2378 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2379 tcg_temp_free_i64(tmp);
2383 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2385 int shift = s->insn->data & 0xff;
2386 int size = s->insn->data >> 8;
2387 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2391 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2396 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2398 t1 = tcg_temp_new_i64();
2399 tcg_gen_shli_i64(t1, psw_mask, 20);
2400 tcg_gen_shri_i64(t1, t1, 36);
2401 tcg_gen_or_i64(o->out, o->out, t1);
2403 tcg_gen_extu_i32_i64(t1, cc_op);
2404 tcg_gen_shli_i64(t1, t1, 28);
2405 tcg_gen_or_i64(o->out, o->out, t1);
2406 tcg_temp_free_i64(t1);
2410 #ifndef CONFIG_USER_ONLY
2411 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2415 check_privileged(s);
2416 m4 = tcg_const_i32(get_field(s->fields, m4));
2417 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2418 tcg_temp_free_i32(m4);
2422 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2424 check_privileged(s);
2425 gen_helper_iske(o->out, cpu_env, o->in2);
2430 static ExitStatus op_keb(DisasContext *s, DisasOps *o)
2432 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2437 static ExitStatus op_kdb(DisasContext *s, DisasOps *o)
2439 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2444 static ExitStatus op_kxb(DisasContext *s, DisasOps *o)
2446 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2451 static ExitStatus op_laa(DisasContext *s, DisasOps *o)
2453 /* The real output is indeed the original value in memory;
2454 recompute the addition for the computation of CC. */
2455 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2456 s->insn->data | MO_ALIGN);
2457 /* However, we need to recompute the addition for setting CC. */
2458 tcg_gen_add_i64(o->out, o->in1, o->in2);
2462 static ExitStatus op_lan(DisasContext *s, DisasOps *o)
2464 /* The real output is indeed the original value in memory;
2465 recompute the addition for the computation of CC. */
2466 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2467 s->insn->data | MO_ALIGN);
2468 /* However, we need to recompute the operation for setting CC. */
2469 tcg_gen_and_i64(o->out, o->in1, o->in2);
2473 static ExitStatus op_lao(DisasContext *s, DisasOps *o)
2475 /* The real output is indeed the original value in memory;
2476 recompute the addition for the computation of CC. */
2477 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2478 s->insn->data | MO_ALIGN);
2479 /* However, we need to recompute the operation for setting CC. */
2480 tcg_gen_or_i64(o->out, o->in1, o->in2);
2484 static ExitStatus op_lax(DisasContext *s, DisasOps *o)
2486 /* The real output is indeed the original value in memory;
2487 recompute the addition for the computation of CC. */
2488 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2489 s->insn->data | MO_ALIGN);
2490 /* However, we need to recompute the operation for setting CC. */
2491 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2495 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2497 gen_helper_ldeb(o->out, cpu_env, o->in2);
2501 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2503 gen_helper_ledb(o->out, cpu_env, o->in2);
2507 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2509 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2513 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2515 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2519 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2521 gen_helper_lxdb(o->out, cpu_env, o->in2);
2522 return_low128(o->out2);
2526 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2528 gen_helper_lxeb(o->out, cpu_env, o->in2);
2529 return_low128(o->out2);
2533 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2535 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2539 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2541 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2545 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2547 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2551 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2553 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2557 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2559 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2563 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2565 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2569 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2571 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2575 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2577 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2581 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2583 TCGLabel *lab = gen_new_label();
2584 store_reg32_i64(get_field(s->fields, r1), o->in2);
2585 /* The value is stored even in case of trap. */
2586 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2592 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2594 TCGLabel *lab = gen_new_label();
2595 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2596 /* The value is stored even in case of trap. */
2597 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2603 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2605 TCGLabel *lab = gen_new_label();
2606 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2607 /* The value is stored even in case of trap. */
2608 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2614 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2616 TCGLabel *lab = gen_new_label();
2617 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2618 /* The value is stored even in case of trap. */
2619 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2625 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2627 TCGLabel *lab = gen_new_label();
2628 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2629 /* The value is stored even in case of trap. */
2630 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2636 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2640 disas_jcc(s, &c, get_field(s->fields, m3));
2643 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2647 TCGv_i32 t32 = tcg_temp_new_i32();
2650 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2653 t = tcg_temp_new_i64();
2654 tcg_gen_extu_i32_i64(t, t32);
2655 tcg_temp_free_i32(t32);
2657 z = tcg_const_i64(0);
2658 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2659 tcg_temp_free_i64(t);
2660 tcg_temp_free_i64(z);
2666 #ifndef CONFIG_USER_ONLY
2667 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2669 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2670 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2671 check_privileged(s);
2672 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2673 tcg_temp_free_i32(r1);
2674 tcg_temp_free_i32(r3);
2678 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2680 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2681 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2682 check_privileged(s);
2683 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2684 tcg_temp_free_i32(r1);
2685 tcg_temp_free_i32(r3);
2689 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2691 check_privileged(s);
2692 gen_helper_lra(o->out, cpu_env, o->in2);
2697 static ExitStatus op_lpp(DisasContext *s, DisasOps *o)
2699 check_privileged(s);
2701 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2705 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2709 check_privileged(s);
2710 per_breaking_event(s);
2712 t1 = tcg_temp_new_i64();
2713 t2 = tcg_temp_new_i64();
2714 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2715 tcg_gen_addi_i64(o->in2, o->in2, 4);
2716 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2717 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2718 tcg_gen_shli_i64(t1, t1, 32);
2719 gen_helper_load_psw(cpu_env, t1, t2);
2720 tcg_temp_free_i64(t1);
2721 tcg_temp_free_i64(t2);
2722 return EXIT_NORETURN;
2725 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2729 check_privileged(s);
2730 per_breaking_event(s);
2732 t1 = tcg_temp_new_i64();
2733 t2 = tcg_temp_new_i64();
2734 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2735 tcg_gen_addi_i64(o->in2, o->in2, 8);
2736 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2737 gen_helper_load_psw(cpu_env, t1, t2);
2738 tcg_temp_free_i64(t1);
2739 tcg_temp_free_i64(t2);
2740 return EXIT_NORETURN;
2744 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2746 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2747 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2748 gen_helper_lam(cpu_env, r1, o->in2, r3);
2749 tcg_temp_free_i32(r1);
2750 tcg_temp_free_i32(r3);
2754 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2756 int r1 = get_field(s->fields, r1);
2757 int r3 = get_field(s->fields, r3);
2760 /* Only one register to read. */
2761 t1 = tcg_temp_new_i64();
2762 if (unlikely(r1 == r3)) {
2763 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2764 store_reg32_i64(r1, t1);
2769 /* First load the values of the first and last registers to trigger
2770 possible page faults. */
2771 t2 = tcg_temp_new_i64();
2772 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2773 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2774 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2775 store_reg32_i64(r1, t1);
2776 store_reg32_i64(r3, t2);
2778 /* Only two registers to read. */
2779 if (((r1 + 1) & 15) == r3) {
2785 /* Then load the remaining registers. Page fault can't occur. */
2787 tcg_gen_movi_i64(t2, 4);
2790 tcg_gen_add_i64(o->in2, o->in2, t2);
2791 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2792 store_reg32_i64(r1, t1);
2800 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2802 int r1 = get_field(s->fields, r1);
2803 int r3 = get_field(s->fields, r3);
2806 /* Only one register to read. */
2807 t1 = tcg_temp_new_i64();
2808 if (unlikely(r1 == r3)) {
2809 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2810 store_reg32h_i64(r1, t1);
2815 /* First load the values of the first and last registers to trigger
2816 possible page faults. */
2817 t2 = tcg_temp_new_i64();
2818 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2819 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2820 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2821 store_reg32h_i64(r1, t1);
2822 store_reg32h_i64(r3, t2);
2824 /* Only two registers to read. */
2825 if (((r1 + 1) & 15) == r3) {
2831 /* Then load the remaining registers. Page fault can't occur. */
2833 tcg_gen_movi_i64(t2, 4);
2836 tcg_gen_add_i64(o->in2, o->in2, t2);
2837 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2838 store_reg32h_i64(r1, t1);
2846 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2848 int r1 = get_field(s->fields, r1);
2849 int r3 = get_field(s->fields, r3);
2852 /* Only one register to read. */
2853 if (unlikely(r1 == r3)) {
2854 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2858 /* First load the values of the first and last registers to trigger
2859 possible page faults. */
2860 t1 = tcg_temp_new_i64();
2861 t2 = tcg_temp_new_i64();
2862 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2863 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2864 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2865 tcg_gen_mov_i64(regs[r1], t1);
2868 /* Only two registers to read. */
2869 if (((r1 + 1) & 15) == r3) {
2874 /* Then load the remaining registers. Page fault can't occur. */
2876 tcg_gen_movi_i64(t1, 8);
2879 tcg_gen_add_i64(o->in2, o->in2, t1);
2880 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2887 static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
2890 TCGMemOp mop = s->insn->data;
2892 /* In a parallel context, stop the world and single step. */
2893 if (parallel_cpus) {
2894 potential_page_fault(s);
2895 gen_exception(EXCP_ATOMIC);
2896 return EXIT_NORETURN;
2899 /* In a serial context, perform the two loads ... */
2900 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
2901 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2902 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
2903 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
2904 tcg_temp_free_i64(a1);
2905 tcg_temp_free_i64(a2);
2907 /* ... and indicate that we performed them while interlocked. */
2908 gen_op_movi_cc(s, 0);
2912 #ifndef CONFIG_USER_ONLY
2913 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2915 check_privileged(s);
2916 potential_page_fault(s);
2917 gen_helper_lura(o->out, cpu_env, o->in2);
2921 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2923 check_privileged(s);
2924 potential_page_fault(s);
2925 gen_helper_lurag(o->out, cpu_env, o->in2);
2930 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2933 o->g_out = o->g_in2;
2934 TCGV_UNUSED_I64(o->in2);
2939 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2941 int b2 = get_field(s->fields, b2);
2942 TCGv ar1 = tcg_temp_new_i64();
2945 o->g_out = o->g_in2;
2946 TCGV_UNUSED_I64(o->in2);
2949 switch (s->tb->flags & FLAG_MASK_ASC) {
2950 case PSW_ASC_PRIMARY >> 32:
2951 tcg_gen_movi_i64(ar1, 0);
2953 case PSW_ASC_ACCREG >> 32:
2954 tcg_gen_movi_i64(ar1, 1);
2956 case PSW_ASC_SECONDARY >> 32:
2958 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
2960 tcg_gen_movi_i64(ar1, 0);
2963 case PSW_ASC_HOME >> 32:
2964 tcg_gen_movi_i64(ar1, 2);
2968 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
2969 tcg_temp_free_i64(ar1);
2974 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2978 o->g_out = o->g_in1;
2979 o->g_out2 = o->g_in2;
2980 TCGV_UNUSED_I64(o->in1);
2981 TCGV_UNUSED_I64(o->in2);
2982 o->g_in1 = o->g_in2 = false;
2986 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2988 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2989 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2990 tcg_temp_free_i32(l);
2994 static ExitStatus op_mvcin(DisasContext *s, DisasOps *o)
2996 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2997 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
2998 tcg_temp_free_i32(l);
3002 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
3004 int r1 = get_field(s->fields, r1);
3005 int r2 = get_field(s->fields, r2);
3008 /* r1 and r2 must be even. */
3009 if (r1 & 1 || r2 & 1) {
3010 gen_program_exception(s, PGM_SPECIFICATION);
3011 return EXIT_NORETURN;
3014 t1 = tcg_const_i32(r1);
3015 t2 = tcg_const_i32(r2);
3016 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3017 tcg_temp_free_i32(t1);
3018 tcg_temp_free_i32(t2);
3023 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
3025 int r1 = get_field(s->fields, r1);
3026 int r3 = get_field(s->fields, r3);
3029 /* r1 and r3 must be even. */
3030 if (r1 & 1 || r3 & 1) {
3031 gen_program_exception(s, PGM_SPECIFICATION);
3032 return EXIT_NORETURN;
3035 t1 = tcg_const_i32(r1);
3036 t3 = tcg_const_i32(r3);
3037 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3038 tcg_temp_free_i32(t1);
3039 tcg_temp_free_i32(t3);
3044 static ExitStatus op_mvclu(DisasContext *s, DisasOps *o)
3046 int r1 = get_field(s->fields, r1);
3047 int r3 = get_field(s->fields, r3);
3050 /* r1 and r3 must be even. */
3051 if (r1 & 1 || r3 & 1) {
3052 gen_program_exception(s, PGM_SPECIFICATION);
3053 return EXIT_NORETURN;
3056 t1 = tcg_const_i32(r1);
3057 t3 = tcg_const_i32(r3);
3058 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3059 tcg_temp_free_i32(t1);
3060 tcg_temp_free_i32(t3);
3065 #ifndef CONFIG_USER_ONLY
3066 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
3068 int r1 = get_field(s->fields, l1);
3069 check_privileged(s);
3070 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3075 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
3077 int r1 = get_field(s->fields, l1);
3078 check_privileged(s);
3079 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3085 static ExitStatus op_mvn(DisasContext *s, DisasOps *o)
3087 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3088 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3089 tcg_temp_free_i32(l);
3093 static ExitStatus op_mvo(DisasContext *s, DisasOps *o)
3095 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3096 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3097 tcg_temp_free_i32(l);
3101 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
3103 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3108 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
3110 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3112 return_low128(o->in2);
3116 static ExitStatus op_mvz(DisasContext *s, DisasOps *o)
3118 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3119 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3120 tcg_temp_free_i32(l);
3124 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
3126 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3130 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
3132 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3136 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
3138 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3142 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
3144 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3148 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
3150 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3154 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
3156 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3157 return_low128(o->out2);
3161 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
3163 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3164 return_low128(o->out2);
3168 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
3170 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3171 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3172 tcg_temp_free_i64(r3);
3176 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
3178 int r3 = get_field(s->fields, r3);
3179 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3183 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
3185 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3186 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3187 tcg_temp_free_i64(r3);
3191 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
3193 int r3 = get_field(s->fields, r3);
3194 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3198 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3201 z = tcg_const_i64(0);
3202 n = tcg_temp_new_i64();
3203 tcg_gen_neg_i64(n, o->in2);
3204 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3205 tcg_temp_free_i64(n);
3206 tcg_temp_free_i64(z);
3210 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
3212 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3216 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
3218 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3222 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
3224 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3225 tcg_gen_mov_i64(o->out2, o->in2);
3229 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3231 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3232 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3233 tcg_temp_free_i32(l);
3238 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3240 tcg_gen_neg_i64(o->out, o->in2);
3244 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3246 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3250 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3252 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3256 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3258 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3259 tcg_gen_mov_i64(o->out2, o->in2);
3263 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3265 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3266 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3267 tcg_temp_free_i32(l);
3272 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3274 tcg_gen_or_i64(o->out, o->in1, o->in2);
3278 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3280 int shift = s->insn->data & 0xff;
3281 int size = s->insn->data >> 8;
3282 uint64_t mask = ((1ull << size) - 1) << shift;
3285 tcg_gen_shli_i64(o->in2, o->in2, shift);
3286 tcg_gen_or_i64(o->out, o->in1, o->in2);
3288 /* Produce the CC from only the bits manipulated. */
3289 tcg_gen_andi_i64(cc_dst, o->out, mask);
3290 set_cc_nz_u64(s, cc_dst);
3294 static ExitStatus op_pack(DisasContext *s, DisasOps *o)
3296 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3297 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3298 tcg_temp_free_i32(l);
3302 static ExitStatus op_pka(DisasContext *s, DisasOps *o)
3304 int l2 = get_field(s->fields, l2) + 1;
3307 /* The length must not exceed 32 bytes. */
3309 gen_program_exception(s, PGM_SPECIFICATION);
3310 return EXIT_NORETURN;
3312 l = tcg_const_i32(l2);
3313 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3314 tcg_temp_free_i32(l);
3318 static ExitStatus op_pku(DisasContext *s, DisasOps *o)
3320 int l2 = get_field(s->fields, l2) + 1;
3323 /* The length must be even and should not exceed 64 bytes. */
3324 if ((l2 & 1) || (l2 > 64)) {
3325 gen_program_exception(s, PGM_SPECIFICATION);
3326 return EXIT_NORETURN;
3328 l = tcg_const_i32(l2);
3329 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3330 tcg_temp_free_i32(l);
3334 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3336 gen_helper_popcnt(o->out, o->in2);
3340 #ifndef CONFIG_USER_ONLY
3341 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3343 check_privileged(s);
3344 gen_helper_ptlb(cpu_env);
3349 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3351 int i3 = get_field(s->fields, i3);
3352 int i4 = get_field(s->fields, i4);
3353 int i5 = get_field(s->fields, i5);
3354 int do_zero = i4 & 0x80;
3355 uint64_t mask, imask, pmask;
3358 /* Adjust the arguments for the specific insn. */
3359 switch (s->fields->op2) {
3360 case 0x55: /* risbg */
3365 case 0x5d: /* risbhg */
3368 pmask = 0xffffffff00000000ull;
3370 case 0x51: /* risblg */
3373 pmask = 0x00000000ffffffffull;
3379 /* MASK is the set of bits to be inserted from R2.
3380 Take care for I3/I4 wraparound. */
3383 mask ^= pmask >> i4 >> 1;
3385 mask |= ~(pmask >> i4 >> 1);
3389 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3390 insns, we need to keep the other half of the register. */
3391 imask = ~mask | ~pmask;
3393 if (s->fields->op2 == 0x55) {
3403 if (s->fields->op2 == 0x5d) {
3407 /* In some cases we can implement this with extract. */
3408 if (imask == 0 && pos == 0 && len > 0 && rot + len <= 64) {
3409 tcg_gen_extract_i64(o->out, o->in2, rot, len);
3413 /* In some cases we can implement this with deposit. */
3414 if (len > 0 && (imask == 0 || ~mask == imask)) {
3415 /* Note that we rotate the bits to be inserted to the lsb, not to
3416 the position as described in the PoO. */
3417 rot = (rot - pos) & 63;
3422 /* Rotate the input as necessary. */
3423 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3425 /* Insert the selected bits into the output. */
3428 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3430 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3432 } else if (imask == 0) {
3433 tcg_gen_andi_i64(o->out, o->in2, mask);
3435 tcg_gen_andi_i64(o->in2, o->in2, mask);
3436 tcg_gen_andi_i64(o->out, o->out, imask);
3437 tcg_gen_or_i64(o->out, o->out, o->in2);
3442 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3444 int i3 = get_field(s->fields, i3);
3445 int i4 = get_field(s->fields, i4);
3446 int i5 = get_field(s->fields, i5);
3449 /* If this is a test-only form, arrange to discard the result. */
3451 o->out = tcg_temp_new_i64();
3459 /* MASK is the set of bits to be operated on from R2.
3460 Take care for I3/I4 wraparound. */
3463 mask ^= ~0ull >> i4 >> 1;
3465 mask |= ~(~0ull >> i4 >> 1);
3468 /* Rotate the input as necessary. */
3469 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3472 switch (s->fields->op2) {
3473 case 0x55: /* AND */
3474 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3475 tcg_gen_and_i64(o->out, o->out, o->in2);
3478 tcg_gen_andi_i64(o->in2, o->in2, mask);
3479 tcg_gen_or_i64(o->out, o->out, o->in2);
3481 case 0x57: /* XOR */
3482 tcg_gen_andi_i64(o->in2, o->in2, mask);
3483 tcg_gen_xor_i64(o->out, o->out, o->in2);
3490 tcg_gen_andi_i64(cc_dst, o->out, mask);
3491 set_cc_nz_u64(s, cc_dst);
3495 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3497 tcg_gen_bswap16_i64(o->out, o->in2);
3501 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3503 tcg_gen_bswap32_i64(o->out, o->in2);
3507 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3509 tcg_gen_bswap64_i64(o->out, o->in2);
3513 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3515 TCGv_i32 t1 = tcg_temp_new_i32();
3516 TCGv_i32 t2 = tcg_temp_new_i32();
3517 TCGv_i32 to = tcg_temp_new_i32();
3518 tcg_gen_extrl_i64_i32(t1, o->in1);
3519 tcg_gen_extrl_i64_i32(t2, o->in2);
3520 tcg_gen_rotl_i32(to, t1, t2);
3521 tcg_gen_extu_i32_i64(o->out, to);
3522 tcg_temp_free_i32(t1);
3523 tcg_temp_free_i32(t2);
3524 tcg_temp_free_i32(to);
3528 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3530 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3534 #ifndef CONFIG_USER_ONLY
3535 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3537 check_privileged(s);
3538 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3543 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3545 check_privileged(s);
3546 gen_helper_sacf(cpu_env, o->in2);
3547 /* Addressing mode has changed, so end the block. */
3548 return EXIT_PC_STALE;
3552 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3554 int sam = s->insn->data;
3570 /* Bizarre but true, we check the address of the current insn for the
3571 specification exception, not the next to be executed. Thus the PoO
3572 documents that Bad Things Happen two bytes before the end. */
3573 if (s->pc & ~mask) {
3574 gen_program_exception(s, PGM_SPECIFICATION);
3575 return EXIT_NORETURN;
3579 tsam = tcg_const_i64(sam);
3580 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3581 tcg_temp_free_i64(tsam);
3583 /* Always exit the TB, since we (may have) changed execution mode. */
3584 return EXIT_PC_STALE;
3587 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3589 int r1 = get_field(s->fields, r1);
3590 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3594 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3596 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3600 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3602 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3606 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3608 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3609 return_low128(o->out2);
3613 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3615 gen_helper_sqeb(o->out, cpu_env, o->in2);
3619 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3621 gen_helper_sqdb(o->out, cpu_env, o->in2);
3625 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3627 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3628 return_low128(o->out2);
3632 #ifndef CONFIG_USER_ONLY
3633 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3635 check_privileged(s);
3636 potential_page_fault(s);
3637 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3642 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3644 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3645 check_privileged(s);
3646 potential_page_fault(s);
3647 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3649 tcg_temp_free_i32(r1);
3654 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3661 disas_jcc(s, &c, get_field(s->fields, m3));
3663 /* We want to store when the condition is fulfilled, so branch
3664 out when it's not */
3665 c.cond = tcg_invert_cond(c.cond);
3667 lab = gen_new_label();
3669 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3671 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3675 r1 = get_field(s->fields, r1);
3676 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3677 if (s->insn->data) {
3678 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3680 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3682 tcg_temp_free_i64(a);
3688 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3690 uint64_t sign = 1ull << s->insn->data;
3691 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3692 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3693 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3694 /* The arithmetic left shift is curious in that it does not affect
3695 the sign bit. Copy that over from the source unchanged. */
3696 tcg_gen_andi_i64(o->out, o->out, ~sign);
3697 tcg_gen_andi_i64(o->in1, o->in1, sign);
3698 tcg_gen_or_i64(o->out, o->out, o->in1);
3702 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3704 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3708 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3710 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3714 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3716 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3720 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3722 gen_helper_sfpc(cpu_env, o->in2);
3726 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3728 gen_helper_sfas(cpu_env, o->in2);
3732 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3734 int b2 = get_field(s->fields, b2);
3735 int d2 = get_field(s->fields, d2);
3736 TCGv_i64 t1 = tcg_temp_new_i64();
3737 TCGv_i64 t2 = tcg_temp_new_i64();
3740 switch (s->fields->op2) {
3741 case 0x99: /* SRNM */
3744 case 0xb8: /* SRNMB */
3747 case 0xb9: /* SRNMT */
3753 mask = (1 << len) - 1;
3755 /* Insert the value into the appropriate field of the FPC. */
3757 tcg_gen_movi_i64(t1, d2 & mask);
3759 tcg_gen_addi_i64(t1, regs[b2], d2);
3760 tcg_gen_andi_i64(t1, t1, mask);
3762 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3763 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3764 tcg_temp_free_i64(t1);
3766 /* Then install the new FPC to set the rounding mode in fpu_status. */
3767 gen_helper_sfpc(cpu_env, t2);
3768 tcg_temp_free_i64(t2);
3772 #ifndef CONFIG_USER_ONLY
3773 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3775 check_privileged(s);
3776 tcg_gen_shri_i64(o->in2, o->in2, 4);
3777 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3781 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3783 check_privileged(s);
3784 gen_helper_sske(cpu_env, o->in1, o->in2);
3788 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3790 check_privileged(s);
3791 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3795 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3797 check_privileged(s);
3798 /* ??? Surely cpu address != cpu number. In any case the previous
3799 version of this stored more than the required half-word, so it
3800 is unlikely this has ever been tested. */
3801 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3805 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3807 gen_helper_stck(o->out, cpu_env);
3808 /* ??? We don't implement clock states. */
3809 gen_op_movi_cc(s, 0);
3813 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3815 TCGv_i64 c1 = tcg_temp_new_i64();
3816 TCGv_i64 c2 = tcg_temp_new_i64();
3817 gen_helper_stck(c1, cpu_env);
3818 /* Shift the 64-bit value into its place as a zero-extended
3819 104-bit value. Note that "bit positions 64-103 are always
3820 non-zero so that they compare differently to STCK"; we set
3821 the least significant bit to 1. */
3822 tcg_gen_shli_i64(c2, c1, 56);
3823 tcg_gen_shri_i64(c1, c1, 8);
3824 tcg_gen_ori_i64(c2, c2, 0x10000);
3825 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3826 tcg_gen_addi_i64(o->in2, o->in2, 8);
3827 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3828 tcg_temp_free_i64(c1);
3829 tcg_temp_free_i64(c2);
3830 /* ??? We don't implement clock states. */
3831 gen_op_movi_cc(s, 0);
3835 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3837 check_privileged(s);
3838 gen_helper_sckc(cpu_env, o->in2);
3842 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3844 check_privileged(s);
3845 gen_helper_stckc(o->out, cpu_env);
3849 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3851 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3852 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3853 check_privileged(s);
3854 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3855 tcg_temp_free_i32(r1);
3856 tcg_temp_free_i32(r3);
3860 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3862 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3863 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3864 check_privileged(s);
3865 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3866 tcg_temp_free_i32(r1);
3867 tcg_temp_free_i32(r3);
3871 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3873 TCGv_i64 t1 = tcg_temp_new_i64();
3875 check_privileged(s);
3876 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3877 tcg_gen_ld32u_i64(t1, cpu_env, offsetof(CPUS390XState, machine_type));
3878 tcg_gen_deposit_i64(o->out, o->out, t1, 32, 32);
3879 tcg_temp_free_i64(t1);
3884 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3886 check_privileged(s);
3887 gen_helper_spt(cpu_env, o->in2);
3891 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3893 check_privileged(s);
3894 gen_helper_stfl(cpu_env);
3898 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3900 check_privileged(s);
3901 gen_helper_stpt(o->out, cpu_env);
3905 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3907 check_privileged(s);
3908 potential_page_fault(s);
3909 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3914 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3916 check_privileged(s);
3917 gen_helper_spx(cpu_env, o->in2);
3921 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
3923 check_privileged(s);
3924 potential_page_fault(s);
3925 gen_helper_xsch(cpu_env, regs[1]);
3930 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
3932 check_privileged(s);
3933 potential_page_fault(s);
3934 gen_helper_csch(cpu_env, regs[1]);
3939 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
3941 check_privileged(s);
3942 potential_page_fault(s);
3943 gen_helper_hsch(cpu_env, regs[1]);
3948 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
3950 check_privileged(s);
3951 potential_page_fault(s);
3952 gen_helper_msch(cpu_env, regs[1], o->in2);
3957 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
3959 check_privileged(s);
3960 potential_page_fault(s);
3961 gen_helper_rchp(cpu_env, regs[1]);
3966 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
3968 check_privileged(s);
3969 potential_page_fault(s);
3970 gen_helper_rsch(cpu_env, regs[1]);
3975 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
3977 check_privileged(s);
3978 potential_page_fault(s);
3979 gen_helper_ssch(cpu_env, regs[1], o->in2);
3984 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
3986 check_privileged(s);
3987 potential_page_fault(s);
3988 gen_helper_stsch(cpu_env, regs[1], o->in2);
3993 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
3995 check_privileged(s);
3996 potential_page_fault(s);
3997 gen_helper_tsch(cpu_env, regs[1], o->in2);
4002 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
4004 check_privileged(s);
4005 potential_page_fault(s);
4006 gen_helper_chsc(cpu_env, o->in2);
4011 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
4013 check_privileged(s);
4014 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4015 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4019 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
4021 uint64_t i2 = get_field(s->fields, i2);
4024 check_privileged(s);
4026 /* It is important to do what the instruction name says: STORE THEN.
4027 If we let the output hook perform the store then if we fault and
4028 restart, we'll have the wrong SYSTEM MASK in place. */
4029 t = tcg_temp_new_i64();
4030 tcg_gen_shri_i64(t, psw_mask, 56);
4031 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4032 tcg_temp_free_i64(t);
4034 if (s->fields->op == 0xac) {
4035 tcg_gen_andi_i64(psw_mask, psw_mask,
4036 (i2 << 56) | 0x00ffffffffffffffull);
4038 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4043 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
4045 check_privileged(s);
4046 potential_page_fault(s);
4047 gen_helper_stura(cpu_env, o->in2, o->in1);
4051 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
4053 check_privileged(s);
4054 potential_page_fault(s);
4055 gen_helper_sturg(cpu_env, o->in2, o->in1);
4060 static ExitStatus op_stfle(DisasContext *s, DisasOps *o)
4062 potential_page_fault(s);
4063 gen_helper_stfle(cc_op, cpu_env, o->in2);
4068 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
4070 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4074 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
4076 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4080 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
4082 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4086 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
4088 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4092 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
4094 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4095 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4096 gen_helper_stam(cpu_env, r1, o->in2, r3);
4097 tcg_temp_free_i32(r1);
4098 tcg_temp_free_i32(r3);
4102 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
4104 int m3 = get_field(s->fields, m3);
4105 int pos, base = s->insn->data;
4106 TCGv_i64 tmp = tcg_temp_new_i64();
4108 pos = base + ctz32(m3) * 8;
4111 /* Effectively a 32-bit store. */
4112 tcg_gen_shri_i64(tmp, o->in1, pos);
4113 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4119 /* Effectively a 16-bit store. */
4120 tcg_gen_shri_i64(tmp, o->in1, pos);
4121 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4128 /* Effectively an 8-bit store. */
4129 tcg_gen_shri_i64(tmp, o->in1, pos);
4130 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4134 /* This is going to be a sequence of shifts and stores. */
4135 pos = base + 32 - 8;
4138 tcg_gen_shri_i64(tmp, o->in1, pos);
4139 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4140 tcg_gen_addi_i64(o->in2, o->in2, 1);
4142 m3 = (m3 << 1) & 0xf;
4147 tcg_temp_free_i64(tmp);
4151 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
4153 int r1 = get_field(s->fields, r1);
4154 int r3 = get_field(s->fields, r3);
4155 int size = s->insn->data;
4156 TCGv_i64 tsize = tcg_const_i64(size);
4160 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4162 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4167 tcg_gen_add_i64(o->in2, o->in2, tsize);
4171 tcg_temp_free_i64(tsize);
4175 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
4177 int r1 = get_field(s->fields, r1);
4178 int r3 = get_field(s->fields, r3);
4179 TCGv_i64 t = tcg_temp_new_i64();
4180 TCGv_i64 t4 = tcg_const_i64(4);
4181 TCGv_i64 t32 = tcg_const_i64(32);
4184 tcg_gen_shl_i64(t, regs[r1], t32);
4185 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4189 tcg_gen_add_i64(o->in2, o->in2, t4);
4193 tcg_temp_free_i64(t);
4194 tcg_temp_free_i64(t4);
4195 tcg_temp_free_i64(t32);
4199 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
4201 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
4203 return_low128(o->in2);
4207 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
4209 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4213 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
4218 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4220 /* The !borrow flag is the msb of CC. Since we want the inverse of
4221 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4222 disas_jcc(s, &cmp, 8 | 4);
4223 borrow = tcg_temp_new_i64();
4225 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4227 TCGv_i32 t = tcg_temp_new_i32();
4228 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4229 tcg_gen_extu_i32_i64(borrow, t);
4230 tcg_temp_free_i32(t);
4234 tcg_gen_sub_i64(o->out, o->out, borrow);
4235 tcg_temp_free_i64(borrow);
4239 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
4246 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4247 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4248 tcg_temp_free_i32(t);
4250 t = tcg_const_i32(s->ilen);
4251 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4252 tcg_temp_free_i32(t);
4254 gen_exception(EXCP_SVC);
4255 return EXIT_NORETURN;
4258 static ExitStatus op_tam(DisasContext *s, DisasOps *o)
4262 cc |= (s->tb->flags & FLAG_MASK_64) ? 2 : 0;
4263 cc |= (s->tb->flags & FLAG_MASK_32) ? 1 : 0;
4264 gen_op_movi_cc(s, cc);
4268 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
4270 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4275 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
4277 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4282 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
4284 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4289 #ifndef CONFIG_USER_ONLY
4291 static ExitStatus op_testblock(DisasContext *s, DisasOps *o)
4293 check_privileged(s);
4294 gen_helper_testblock(cc_op, cpu_env, o->in2);
4299 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4301 gen_helper_tprot(cc_op, o->addr1, o->in2);
4308 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4310 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4311 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4312 tcg_temp_free_i32(l);
4317 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4319 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4320 return_low128(o->out2);
4325 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4327 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4328 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4329 tcg_temp_free_i32(l);
4334 static ExitStatus op_ts(DisasContext *s, DisasOps *o)
4336 TCGv_i32 t1 = tcg_const_i32(0xff);
4337 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4338 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4339 tcg_temp_free_i32(t1);
4344 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4346 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4347 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4348 tcg_temp_free_i32(l);
4352 static ExitStatus op_unpka(DisasContext *s, DisasOps *o)
4354 int l1 = get_field(s->fields, l1) + 1;
4357 /* The length must not exceed 32 bytes. */
4359 gen_program_exception(s, PGM_SPECIFICATION);
4360 return EXIT_NORETURN;
4362 l = tcg_const_i32(l1);
4363 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4364 tcg_temp_free_i32(l);
4369 static ExitStatus op_unpku(DisasContext *s, DisasOps *o)
4371 int l1 = get_field(s->fields, l1) + 1;
4374 /* The length must be even and should not exceed 64 bytes. */
4375 if ((l1 & 1) || (l1 > 64)) {
4376 gen_program_exception(s, PGM_SPECIFICATION);
4377 return EXIT_NORETURN;
4379 l = tcg_const_i32(l1);
4380 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4381 tcg_temp_free_i32(l);
4387 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4389 int d1 = get_field(s->fields, d1);
4390 int d2 = get_field(s->fields, d2);
4391 int b1 = get_field(s->fields, b1);
4392 int b2 = get_field(s->fields, b2);
4393 int l = get_field(s->fields, l1);
4396 o->addr1 = get_address(s, 0, b1, d1);
4398 /* If the addresses are identical, this is a store/memset of zero. */
4399 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4400 o->in2 = tcg_const_i64(0);
4404 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4407 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4411 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4414 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4418 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4421 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4425 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4427 gen_op_movi_cc(s, 0);
4431 /* But in general we'll defer to a helper. */
4432 o->in2 = get_address(s, 0, b2, d2);
4433 t32 = tcg_const_i32(l);
4434 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4435 tcg_temp_free_i32(t32);
4440 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4442 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4446 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4448 int shift = s->insn->data & 0xff;
4449 int size = s->insn->data >> 8;
4450 uint64_t mask = ((1ull << size) - 1) << shift;
4453 tcg_gen_shli_i64(o->in2, o->in2, shift);
4454 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4456 /* Produce the CC from only the bits manipulated. */
4457 tcg_gen_andi_i64(cc_dst, o->out, mask);
4458 set_cc_nz_u64(s, cc_dst);
4462 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4464 o->out = tcg_const_i64(0);
4468 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4470 o->out = tcg_const_i64(0);
4476 /* ====================================================================== */
4477 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4478 the original inputs), update the various cc data structures in order to
4479 be able to compute the new condition code. */
4481 static void cout_abs32(DisasContext *s, DisasOps *o)
4483 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4486 static void cout_abs64(DisasContext *s, DisasOps *o)
4488 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4491 static void cout_adds32(DisasContext *s, DisasOps *o)
4493 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4496 static void cout_adds64(DisasContext *s, DisasOps *o)
4498 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4501 static void cout_addu32(DisasContext *s, DisasOps *o)
4503 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4506 static void cout_addu64(DisasContext *s, DisasOps *o)
4508 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4511 static void cout_addc32(DisasContext *s, DisasOps *o)
4513 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4516 static void cout_addc64(DisasContext *s, DisasOps *o)
4518 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4521 static void cout_cmps32(DisasContext *s, DisasOps *o)
4523 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4526 static void cout_cmps64(DisasContext *s, DisasOps *o)
4528 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4531 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4533 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4536 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4538 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4541 static void cout_f32(DisasContext *s, DisasOps *o)
4543 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4546 static void cout_f64(DisasContext *s, DisasOps *o)
4548 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4551 static void cout_f128(DisasContext *s, DisasOps *o)
4553 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4556 static void cout_nabs32(DisasContext *s, DisasOps *o)
4558 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4561 static void cout_nabs64(DisasContext *s, DisasOps *o)
4563 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4566 static void cout_neg32(DisasContext *s, DisasOps *o)
4568 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4571 static void cout_neg64(DisasContext *s, DisasOps *o)
4573 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4576 static void cout_nz32(DisasContext *s, DisasOps *o)
4578 tcg_gen_ext32u_i64(cc_dst, o->out);
4579 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4582 static void cout_nz64(DisasContext *s, DisasOps *o)
4584 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4587 static void cout_s32(DisasContext *s, DisasOps *o)
4589 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4592 static void cout_s64(DisasContext *s, DisasOps *o)
4594 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4597 static void cout_subs32(DisasContext *s, DisasOps *o)
4599 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4602 static void cout_subs64(DisasContext *s, DisasOps *o)
4604 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4607 static void cout_subu32(DisasContext *s, DisasOps *o)
4609 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4612 static void cout_subu64(DisasContext *s, DisasOps *o)
4614 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4617 static void cout_subb32(DisasContext *s, DisasOps *o)
4619 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4622 static void cout_subb64(DisasContext *s, DisasOps *o)
4624 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4627 static void cout_tm32(DisasContext *s, DisasOps *o)
4629 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4632 static void cout_tm64(DisasContext *s, DisasOps *o)
4634 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4637 /* ====================================================================== */
4638 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4639 with the TCG register to which we will write. Used in combination with
4640 the "wout" generators, in some cases we need a new temporary, and in
4641 some cases we can write to a TCG global. */
4643 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4645 o->out = tcg_temp_new_i64();
4647 #define SPEC_prep_new 0
4649 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4651 o->out = tcg_temp_new_i64();
4652 o->out2 = tcg_temp_new_i64();
4654 #define SPEC_prep_new_P 0
4656 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4658 o->out = regs[get_field(f, r1)];
4661 #define SPEC_prep_r1 0
4663 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4665 int r1 = get_field(f, r1);
4667 o->out2 = regs[r1 + 1];
4668 o->g_out = o->g_out2 = true;
4670 #define SPEC_prep_r1_P SPEC_r1_even
4672 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4674 o->out = fregs[get_field(f, r1)];
4677 #define SPEC_prep_f1 0
4679 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4681 int r1 = get_field(f, r1);
4683 o->out2 = fregs[r1 + 2];
4684 o->g_out = o->g_out2 = true;
4686 #define SPEC_prep_x1 SPEC_r1_f128
4688 /* ====================================================================== */
4689 /* The "Write OUTput" generators. These generally perform some non-trivial
4690 copy of data to TCG globals, or to main memory. The trivial cases are
4691 generally handled by having a "prep" generator install the TCG global
4692 as the destination of the operation. */
4694 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4696 store_reg(get_field(f, r1), o->out);
4698 #define SPEC_wout_r1 0
4700 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4702 int r1 = get_field(f, r1);
4703 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4705 #define SPEC_wout_r1_8 0
4707 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4709 int r1 = get_field(f, r1);
4710 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4712 #define SPEC_wout_r1_16 0
4714 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4716 store_reg32_i64(get_field(f, r1), o->out);
4718 #define SPEC_wout_r1_32 0
4720 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4722 store_reg32h_i64(get_field(f, r1), o->out);
4724 #define SPEC_wout_r1_32h 0
4726 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4728 int r1 = get_field(f, r1);
4729 store_reg32_i64(r1, o->out);
4730 store_reg32_i64(r1 + 1, o->out2);
4732 #define SPEC_wout_r1_P32 SPEC_r1_even
4734 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4736 int r1 = get_field(f, r1);
4737 store_reg32_i64(r1 + 1, o->out);
4738 tcg_gen_shri_i64(o->out, o->out, 32);
4739 store_reg32_i64(r1, o->out);
4741 #define SPEC_wout_r1_D32 SPEC_r1_even
4743 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4745 int r3 = get_field(f, r3);
4746 store_reg32_i64(r3, o->out);
4747 store_reg32_i64(r3 + 1, o->out2);
4749 #define SPEC_wout_r3_P32 SPEC_r3_even
4751 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
4753 int r3 = get_field(f, r3);
4754 store_reg(r3, o->out);
4755 store_reg(r3 + 1, o->out2);
4757 #define SPEC_wout_r3_P64 SPEC_r3_even
4759 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4761 store_freg32_i64(get_field(f, r1), o->out);
4763 #define SPEC_wout_e1 0
4765 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4767 store_freg(get_field(f, r1), o->out);
4769 #define SPEC_wout_f1 0
4771 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4773 int f1 = get_field(s->fields, r1);
4774 store_freg(f1, o->out);
4775 store_freg(f1 + 2, o->out2);
4777 #define SPEC_wout_x1 SPEC_r1_f128
4779 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4781 if (get_field(f, r1) != get_field(f, r2)) {
4782 store_reg32_i64(get_field(f, r1), o->out);
4785 #define SPEC_wout_cond_r1r2_32 0
4787 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4789 if (get_field(f, r1) != get_field(f, r2)) {
4790 store_freg32_i64(get_field(f, r1), o->out);
4793 #define SPEC_wout_cond_e1e2 0
4795 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4797 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4799 #define SPEC_wout_m1_8 0
4801 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4803 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4805 #define SPEC_wout_m1_16 0
4807 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4809 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4811 #define SPEC_wout_m1_32 0
4813 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4815 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4817 #define SPEC_wout_m1_64 0
4819 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4821 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4823 #define SPEC_wout_m2_32 0
4825 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4827 store_reg(get_field(f, r1), o->in2);
4829 #define SPEC_wout_in2_r1 0
4831 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4833 store_reg32_i64(get_field(f, r1), o->in2);
4835 #define SPEC_wout_in2_r1_32 0
4837 /* ====================================================================== */
4838 /* The "INput 1" generators. These load the first operand to an insn. */
4840 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4842 o->in1 = load_reg(get_field(f, r1));
4844 #define SPEC_in1_r1 0
4846 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4848 o->in1 = regs[get_field(f, r1)];
4851 #define SPEC_in1_r1_o 0
4853 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4855 o->in1 = tcg_temp_new_i64();
4856 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4858 #define SPEC_in1_r1_32s 0
4860 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4862 o->in1 = tcg_temp_new_i64();
4863 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4865 #define SPEC_in1_r1_32u 0
4867 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4869 o->in1 = tcg_temp_new_i64();
4870 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4872 #define SPEC_in1_r1_sr32 0
4874 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4876 o->in1 = load_reg(get_field(f, r1) + 1);
4878 #define SPEC_in1_r1p1 SPEC_r1_even
4880 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4882 o->in1 = tcg_temp_new_i64();
4883 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4885 #define SPEC_in1_r1p1_32s SPEC_r1_even
4887 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4889 o->in1 = tcg_temp_new_i64();
4890 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4892 #define SPEC_in1_r1p1_32u SPEC_r1_even
4894 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4896 int r1 = get_field(f, r1);
4897 o->in1 = tcg_temp_new_i64();
4898 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4900 #define SPEC_in1_r1_D32 SPEC_r1_even
4902 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4904 o->in1 = load_reg(get_field(f, r2));
4906 #define SPEC_in1_r2 0
4908 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4910 o->in1 = tcg_temp_new_i64();
4911 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
4913 #define SPEC_in1_r2_sr32 0
4915 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4917 o->in1 = load_reg(get_field(f, r3));
4919 #define SPEC_in1_r3 0
4921 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4923 o->in1 = regs[get_field(f, r3)];
4926 #define SPEC_in1_r3_o 0
4928 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4930 o->in1 = tcg_temp_new_i64();
4931 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4933 #define SPEC_in1_r3_32s 0
4935 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4937 o->in1 = tcg_temp_new_i64();
4938 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4940 #define SPEC_in1_r3_32u 0
4942 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4944 int r3 = get_field(f, r3);
4945 o->in1 = tcg_temp_new_i64();
4946 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4948 #define SPEC_in1_r3_D32 SPEC_r3_even
4950 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4952 o->in1 = load_freg32_i64(get_field(f, r1));
4954 #define SPEC_in1_e1 0
4956 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4958 o->in1 = fregs[get_field(f, r1)];
4961 #define SPEC_in1_f1_o 0
4963 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4965 int r1 = get_field(f, r1);
4967 o->out2 = fregs[r1 + 2];
4968 o->g_out = o->g_out2 = true;
4970 #define SPEC_in1_x1_o SPEC_r1_f128
4972 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4974 o->in1 = fregs[get_field(f, r3)];
4977 #define SPEC_in1_f3_o 0
4979 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4981 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4983 #define SPEC_in1_la1 0
4985 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4987 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4988 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4990 #define SPEC_in1_la2 0
4992 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4995 o->in1 = tcg_temp_new_i64();
4996 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4998 #define SPEC_in1_m1_8u 0
5000 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5003 o->in1 = tcg_temp_new_i64();
5004 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5006 #define SPEC_in1_m1_16s 0
5008 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5011 o->in1 = tcg_temp_new_i64();
5012 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5014 #define SPEC_in1_m1_16u 0
5016 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5019 o->in1 = tcg_temp_new_i64();
5020 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5022 #define SPEC_in1_m1_32s 0
5024 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5027 o->in1 = tcg_temp_new_i64();
5028 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5030 #define SPEC_in1_m1_32u 0
5032 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5035 o->in1 = tcg_temp_new_i64();
5036 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5038 #define SPEC_in1_m1_64 0
5040 /* ====================================================================== */
5041 /* The "INput 2" generators. These load the second operand to an insn. */
5043 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5045 o->in2 = regs[get_field(f, r1)];
5048 #define SPEC_in2_r1_o 0
5050 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5052 o->in2 = tcg_temp_new_i64();
5053 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5055 #define SPEC_in2_r1_16u 0
5057 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5059 o->in2 = tcg_temp_new_i64();
5060 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5062 #define SPEC_in2_r1_32u 0
5064 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5066 int r1 = get_field(f, r1);
5067 o->in2 = tcg_temp_new_i64();
5068 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5070 #define SPEC_in2_r1_D32 SPEC_r1_even
5072 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5074 o->in2 = load_reg(get_field(f, r2));
5076 #define SPEC_in2_r2 0
5078 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5080 o->in2 = regs[get_field(f, r2)];
5083 #define SPEC_in2_r2_o 0
5085 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5087 int r2 = get_field(f, r2);
5089 o->in2 = load_reg(r2);
5092 #define SPEC_in2_r2_nz 0
5094 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5096 o->in2 = tcg_temp_new_i64();
5097 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5099 #define SPEC_in2_r2_8s 0
5101 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5103 o->in2 = tcg_temp_new_i64();
5104 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5106 #define SPEC_in2_r2_8u 0
5108 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5110 o->in2 = tcg_temp_new_i64();
5111 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5113 #define SPEC_in2_r2_16s 0
5115 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5117 o->in2 = tcg_temp_new_i64();
5118 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5120 #define SPEC_in2_r2_16u 0
5122 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5124 o->in2 = load_reg(get_field(f, r3));
5126 #define SPEC_in2_r3 0
5128 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5130 o->in2 = tcg_temp_new_i64();
5131 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5133 #define SPEC_in2_r3_sr32 0
5135 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5137 o->in2 = tcg_temp_new_i64();
5138 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5140 #define SPEC_in2_r2_32s 0
5142 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5144 o->in2 = tcg_temp_new_i64();
5145 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5147 #define SPEC_in2_r2_32u 0
5149 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5151 o->in2 = tcg_temp_new_i64();
5152 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5154 #define SPEC_in2_r2_sr32 0
5156 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5158 o->in2 = load_freg32_i64(get_field(f, r2));
5160 #define SPEC_in2_e2 0
5162 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5164 o->in2 = fregs[get_field(f, r2)];
5167 #define SPEC_in2_f2_o 0
5169 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5171 int r2 = get_field(f, r2);
5173 o->in2 = fregs[r2 + 2];
5174 o->g_in1 = o->g_in2 = true;
5176 #define SPEC_in2_x2_o SPEC_r2_f128
5178 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5180 o->in2 = get_address(s, 0, get_field(f, r2), 0);
5182 #define SPEC_in2_ra2 0
5184 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5186 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5187 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5189 #define SPEC_in2_a2 0
5191 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5193 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
5195 #define SPEC_in2_ri2 0
5197 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5199 help_l2_shift(s, f, o, 31);
5201 #define SPEC_in2_sh32 0
5203 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5205 help_l2_shift(s, f, o, 63);
5207 #define SPEC_in2_sh64 0
5209 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5212 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5214 #define SPEC_in2_m2_8u 0
5216 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5219 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5221 #define SPEC_in2_m2_16s 0
5223 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5226 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5228 #define SPEC_in2_m2_16u 0
5230 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5233 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5235 #define SPEC_in2_m2_32s 0
5237 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5240 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5242 #define SPEC_in2_m2_32u 0
5244 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5247 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5249 #define SPEC_in2_m2_64 0
5251 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5254 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5256 #define SPEC_in2_mri2_16u 0
5258 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5261 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5263 #define SPEC_in2_mri2_32s 0
5265 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5268 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5270 #define SPEC_in2_mri2_32u 0
5272 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5275 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5277 #define SPEC_in2_mri2_64 0
5279 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5281 o->in2 = tcg_const_i64(get_field(f, i2));
5283 #define SPEC_in2_i2 0
5285 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5287 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5289 #define SPEC_in2_i2_8u 0
5291 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5293 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5295 #define SPEC_in2_i2_16u 0
5297 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5299 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5301 #define SPEC_in2_i2_32u 0
5303 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5305 uint64_t i2 = (uint16_t)get_field(f, i2);
5306 o->in2 = tcg_const_i64(i2 << s->insn->data);
5308 #define SPEC_in2_i2_16u_shl 0
5310 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5312 uint64_t i2 = (uint32_t)get_field(f, i2);
5313 o->in2 = tcg_const_i64(i2 << s->insn->data);
5315 #define SPEC_in2_i2_32u_shl 0
5317 #ifndef CONFIG_USER_ONLY
5318 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5320 o->in2 = tcg_const_i64(s->fields->raw_insn);
5322 #define SPEC_in2_insn 0
5325 /* ====================================================================== */
5327 /* Find opc within the table of insns. This is formulated as a switch
5328 statement so that (1) we get compile-time notice of cut-paste errors
5329 for duplicated opcodes, and (2) the compiler generates the binary
5330 search tree, rather than us having to post-process the table. */
5332 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5333 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5335 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5337 enum DisasInsnEnum {
5338 #include "insn-data.def"
5342 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5346 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5348 .help_in1 = in1_##I1, \
5349 .help_in2 = in2_##I2, \
5350 .help_prep = prep_##P, \
5351 .help_wout = wout_##W, \
5352 .help_cout = cout_##CC, \
5353 .help_op = op_##OP, \
5357 /* Allow 0 to be used for NULL in the table below. */
5365 #define SPEC_in1_0 0
5366 #define SPEC_in2_0 0
5367 #define SPEC_prep_0 0
5368 #define SPEC_wout_0 0
5370 static const DisasInsn insn_info[] = {
5371 #include "insn-data.def"
5375 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5376 case OPC: return &insn_info[insn_ ## NM];
5378 static const DisasInsn *lookup_opc(uint16_t opc)
5381 #include "insn-data.def"
5390 /* Extract a field from the insn. The INSN should be left-aligned in
5391 the uint64_t so that we can more easily utilize the big-bit-endian
5392 definitions we extract from the Principals of Operation. */
5394 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5402 /* Zero extract the field from the insn. */
5403 r = (insn << f->beg) >> (64 - f->size);
5405 /* Sign-extend, or un-swap the field as necessary. */
5407 case 0: /* unsigned */
5409 case 1: /* signed */
5410 assert(f->size <= 32);
5411 m = 1u << (f->size - 1);
5414 case 2: /* dl+dh split, signed 20 bit. */
5415 r = ((int8_t)r << 12) | (r >> 8);
5421 /* Validate that the "compressed" encoding we selected above is valid.
5422 I.e. we havn't make two different original fields overlap. */
5423 assert(((o->presentC >> f->indexC) & 1) == 0);
5424 o->presentC |= 1 << f->indexC;
5425 o->presentO |= 1 << f->indexO;
5427 o->c[f->indexC] = r;
5430 /* Lookup the insn at the current PC, extracting the operands into O and
5431 returning the info struct for the insn. Returns NULL for invalid insn. */
5433 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5436 uint64_t insn, pc = s->pc;
5438 const DisasInsn *info;
5440 if (unlikely(s->ex_value)) {
5441 /* Drop the EX data now, so that it's clear on exception paths. */
5442 TCGv_i64 zero = tcg_const_i64(0);
5443 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
5444 tcg_temp_free_i64(zero);
5446 /* Extract the values saved by EXECUTE. */
5447 insn = s->ex_value & 0xffffffffffff0000ull;
5448 ilen = s->ex_value & 0xf;
5451 insn = ld_code2(env, pc);
5452 op = (insn >> 8) & 0xff;
5453 ilen = get_ilen(op);
5459 insn = ld_code4(env, pc) << 32;
5462 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5465 g_assert_not_reached();
5468 s->next_pc = s->pc + ilen;
5471 /* We can't actually determine the insn format until we've looked up
5472 the full insn opcode. Which we can't do without locating the
5473 secondary opcode. Assume by default that OP2 is at bit 40; for
5474 those smaller insns that don't actually have a secondary opcode
5475 this will correctly result in OP2 = 0. */
5481 case 0xb2: /* S, RRF, RRE */
5482 case 0xb3: /* RRE, RRD, RRF */
5483 case 0xb9: /* RRE, RRF */
5484 case 0xe5: /* SSE, SIL */
5485 op2 = (insn << 8) >> 56;
5489 case 0xc0: /* RIL */
5490 case 0xc2: /* RIL */
5491 case 0xc4: /* RIL */
5492 case 0xc6: /* RIL */
5493 case 0xc8: /* SSF */
5494 case 0xcc: /* RIL */
5495 op2 = (insn << 12) >> 60;
5497 case 0xd0 ... 0xdf: /* SS */
5503 case 0xee ... 0xf3: /* SS */
5504 case 0xf8 ... 0xfd: /* SS */
5508 op2 = (insn << 40) >> 56;
5512 memset(f, 0, sizeof(*f));
5517 /* Lookup the instruction. */
5518 info = lookup_opc(op << 8 | op2);
5520 /* If we found it, extract the operands. */
5522 DisasFormat fmt = info->fmt;
5525 for (i = 0; i < NUM_C_FIELD; ++i) {
5526 extract_field(f, &format_info[fmt].op[i], insn);
5532 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5534 const DisasInsn *insn;
5535 ExitStatus ret = NO_EXIT;
5539 /* Search for the insn in the table. */
5540 insn = extract_insn(env, s, &f);
5542 /* Not found means unimplemented/illegal opcode. */
5544 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5546 gen_illegal_opcode(s);
5547 return EXIT_NORETURN;
5550 #ifndef CONFIG_USER_ONLY
5551 if (s->tb->flags & FLAG_MASK_PER) {
5552 TCGv_i64 addr = tcg_const_i64(s->pc);
5553 gen_helper_per_ifetch(cpu_env, addr);
5554 tcg_temp_free_i64(addr);
5558 /* Check for insn specification exceptions. */
5560 int spec = insn->spec, excp = 0, r;
5562 if (spec & SPEC_r1_even) {
5563 r = get_field(&f, r1);
5565 excp = PGM_SPECIFICATION;
5568 if (spec & SPEC_r2_even) {
5569 r = get_field(&f, r2);
5571 excp = PGM_SPECIFICATION;
5574 if (spec & SPEC_r3_even) {
5575 r = get_field(&f, r3);
5577 excp = PGM_SPECIFICATION;
5580 if (spec & SPEC_r1_f128) {
5581 r = get_field(&f, r1);
5583 excp = PGM_SPECIFICATION;
5586 if (spec & SPEC_r2_f128) {
5587 r = get_field(&f, r2);
5589 excp = PGM_SPECIFICATION;
5593 gen_program_exception(s, excp);
5594 return EXIT_NORETURN;
5598 /* Set up the strutures we use to communicate with the helpers. */
5601 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5602 TCGV_UNUSED_I64(o.out);
5603 TCGV_UNUSED_I64(o.out2);
5604 TCGV_UNUSED_I64(o.in1);
5605 TCGV_UNUSED_I64(o.in2);
5606 TCGV_UNUSED_I64(o.addr1);
5608 /* Implement the instruction. */
5609 if (insn->help_in1) {
5610 insn->help_in1(s, &f, &o);
5612 if (insn->help_in2) {
5613 insn->help_in2(s, &f, &o);
5615 if (insn->help_prep) {
5616 insn->help_prep(s, &f, &o);
5618 if (insn->help_op) {
5619 ret = insn->help_op(s, &o);
5621 if (insn->help_wout) {
5622 insn->help_wout(s, &f, &o);
5624 if (insn->help_cout) {
5625 insn->help_cout(s, &o);
5628 /* Free any temporaries created by the helpers. */
5629 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5630 tcg_temp_free_i64(o.out);
5632 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5633 tcg_temp_free_i64(o.out2);
5635 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5636 tcg_temp_free_i64(o.in1);
5638 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5639 tcg_temp_free_i64(o.in2);
5641 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5642 tcg_temp_free_i64(o.addr1);
5645 #ifndef CONFIG_USER_ONLY
5646 if (s->tb->flags & FLAG_MASK_PER) {
5647 /* An exception might be triggered, save PSW if not already done. */
5648 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
5649 tcg_gen_movi_i64(psw_addr, s->next_pc);
5655 /* Call the helper to check for a possible PER exception. */
5656 gen_helper_per_check_exception(cpu_env);
5660 /* Advance to the next instruction. */
5665 void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
5667 S390CPU *cpu = s390_env_get_cpu(env);
5668 CPUState *cs = CPU(cpu);
5670 target_ulong pc_start;
5671 uint64_t next_page_start;
5672 int num_insns, max_insns;
5679 if (!(tb->flags & FLAG_MASK_64)) {
5680 pc_start &= 0x7fffffff;
5685 dc.cc_op = CC_OP_DYNAMIC;
5686 dc.ex_value = tb->cs_base;
5687 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5689 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5692 max_insns = tb->cflags & CF_COUNT_MASK;
5693 if (max_insns == 0) {
5694 max_insns = CF_COUNT_MASK;
5696 if (max_insns > TCG_MAX_INSNS) {
5697 max_insns = TCG_MAX_INSNS;
5703 tcg_gen_insn_start(dc.pc, dc.cc_op);
5706 if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
5707 status = EXIT_PC_STALE;
5709 /* The address covered by the breakpoint must be included in
5710 [tb->pc, tb->pc + tb->size) in order to for it to be
5711 properly cleared -- thus we increment the PC here so that
5712 the logic setting tb->size below does the right thing. */
5717 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5721 status = translate_one(env, &dc);
5723 /* If we reach a page boundary, are single stepping,
5724 or exhaust instruction count, stop generation. */
5725 if (status == NO_EXIT
5726 && (dc.pc >= next_page_start
5727 || tcg_op_buf_full()
5728 || num_insns >= max_insns
5730 || cs->singlestep_enabled
5732 status = EXIT_PC_STALE;
5734 } while (status == NO_EXIT);
5736 if (tb->cflags & CF_LAST_IO) {
5745 update_psw_addr(&dc);
5747 case EXIT_PC_UPDATED:
5748 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5749 cc op type is in env */
5752 case EXIT_PC_CC_UPDATED:
5753 /* Exit the TB, either by raising a debug exception or by return. */
5755 gen_exception(EXCP_DEBUG);
5756 } else if (use_exit_tb(&dc)) {
5759 tcg_gen_lookup_and_goto_ptr(psw_addr);
5766 gen_tb_end(tb, num_insns);
5768 tb->size = dc.pc - pc_start;
5769 tb->icount = num_insns;
5771 #if defined(S390X_DEBUG_DISAS)
5772 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5773 && qemu_log_in_addr_range(pc_start)) {
5775 if (unlikely(dc.ex_value)) {
5776 /* ??? Unfortunately log_target_disas can't use host memory. */
5777 qemu_log("IN: EXECUTE %016" PRIx64 "\n", dc.ex_value);
5779 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5780 log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
5788 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
5791 int cc_op = data[1];
5792 env->psw.addr = data[0];
5793 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {