4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
31 #include "qemu/osdep.h"
33 #include "disas/disas.h"
34 #include "exec/exec-all.h"
37 #include "qemu/host-utils.h"
38 #include "exec/cpu_ldst.h"
40 /* global register indexes */
41 static TCGv_env cpu_env;
43 #include "exec/gen-icount.h"
44 #include "exec/helper-proto.h"
45 #include "exec/helper-gen.h"
47 #include "trace-tcg.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
57 struct TranslationBlock *tb;
58 const DisasInsn *insn;
64 bool singlestep_enabled;
67 /* Information carried about a condition to be evaluated. */
74 struct { TCGv_i64 a, b; } s64;
75 struct { TCGv_i32 a, b; } s32;
81 #ifdef DEBUG_INLINE_BRANCHES
82 static uint64_t inline_branch_hit[CC_OP_MAX];
83 static uint64_t inline_branch_miss[CC_OP_MAX];
86 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
88 if (!(s->tb->flags & FLAG_MASK_64)) {
89 if (s->tb->flags & FLAG_MASK_32) {
90 return pc | 0x80000000;
96 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
99 S390CPU *cpu = S390_CPU(cs);
100 CPUS390XState *env = &cpu->env;
103 if (env->cc_op > 3) {
104 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
105 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
107 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
108 env->psw.mask, env->psw.addr, env->cc_op);
111 for (i = 0; i < 16; i++) {
112 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
114 cpu_fprintf(f, "\n");
120 for (i = 0; i < 16; i++) {
121 cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
123 cpu_fprintf(f, "\n");
129 for (i = 0; i < 32; i++) {
130 cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
131 env->vregs[i][0].ll, env->vregs[i][1].ll);
132 cpu_fprintf(f, (i % 2) ? "\n" : " ");
135 #ifndef CONFIG_USER_ONLY
136 for (i = 0; i < 16; i++) {
137 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
139 cpu_fprintf(f, "\n");
146 #ifdef DEBUG_INLINE_BRANCHES
147 for (i = 0; i < CC_OP_MAX; i++) {
148 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
149 inline_branch_miss[i], inline_branch_hit[i]);
153 cpu_fprintf(f, "\n");
156 static TCGv_i64 psw_addr;
157 static TCGv_i64 psw_mask;
158 static TCGv_i64 gbea;
160 static TCGv_i32 cc_op;
161 static TCGv_i64 cc_src;
162 static TCGv_i64 cc_dst;
163 static TCGv_i64 cc_vr;
165 static char cpu_reg_names[32][4];
166 static TCGv_i64 regs[16];
167 static TCGv_i64 fregs[16];
169 void s390x_translate_init(void)
173 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
174 tcg_ctx.tcg_env = cpu_env;
175 psw_addr = tcg_global_mem_new_i64(cpu_env,
176 offsetof(CPUS390XState, psw.addr),
178 psw_mask = tcg_global_mem_new_i64(cpu_env,
179 offsetof(CPUS390XState, psw.mask),
181 gbea = tcg_global_mem_new_i64(cpu_env,
182 offsetof(CPUS390XState, gbea),
185 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
187 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
189 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
191 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
194 for (i = 0; i < 16; i++) {
195 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
196 regs[i] = tcg_global_mem_new(cpu_env,
197 offsetof(CPUS390XState, regs[i]),
201 for (i = 0; i < 16; i++) {
202 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
203 fregs[i] = tcg_global_mem_new(cpu_env,
204 offsetof(CPUS390XState, vregs[i][0].d),
205 cpu_reg_names[i + 16]);
209 static TCGv_i64 load_reg(int reg)
211 TCGv_i64 r = tcg_temp_new_i64();
212 tcg_gen_mov_i64(r, regs[reg]);
216 static TCGv_i64 load_freg32_i64(int reg)
218 TCGv_i64 r = tcg_temp_new_i64();
219 tcg_gen_shri_i64(r, fregs[reg], 32);
223 static void store_reg(int reg, TCGv_i64 v)
225 tcg_gen_mov_i64(regs[reg], v);
228 static void store_freg(int reg, TCGv_i64 v)
230 tcg_gen_mov_i64(fregs[reg], v);
233 static void store_reg32_i64(int reg, TCGv_i64 v)
235 /* 32 bit register writes keep the upper half */
236 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
239 static void store_reg32h_i64(int reg, TCGv_i64 v)
241 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
244 static void store_freg32_i64(int reg, TCGv_i64 v)
246 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
249 static void return_low128(TCGv_i64 dest)
251 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
254 static void update_psw_addr(DisasContext *s)
257 tcg_gen_movi_i64(psw_addr, s->pc);
260 static void per_branch(DisasContext *s, bool to_next)
262 #ifndef CONFIG_USER_ONLY
263 tcg_gen_movi_i64(gbea, s->pc);
265 if (s->tb->flags & FLAG_MASK_PER) {
266 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
267 gen_helper_per_branch(cpu_env, gbea, next_pc);
269 tcg_temp_free_i64(next_pc);
275 static void per_branch_cond(DisasContext *s, TCGCond cond,
276 TCGv_i64 arg1, TCGv_i64 arg2)
278 #ifndef CONFIG_USER_ONLY
279 if (s->tb->flags & FLAG_MASK_PER) {
280 TCGLabel *lab = gen_new_label();
281 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
283 tcg_gen_movi_i64(gbea, s->pc);
284 gen_helper_per_branch(cpu_env, gbea, psw_addr);
288 TCGv_i64 pc = tcg_const_i64(s->pc);
289 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
290 tcg_temp_free_i64(pc);
295 static void per_breaking_event(DisasContext *s)
297 tcg_gen_movi_i64(gbea, s->pc);
300 static void update_cc_op(DisasContext *s)
302 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
303 tcg_gen_movi_i32(cc_op, s->cc_op);
307 static void potential_page_fault(DisasContext *s)
313 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
315 return (uint64_t)cpu_lduw_code(env, pc);
318 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
320 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
323 static int get_mem_index(DisasContext *s)
325 switch (s->tb->flags & FLAG_MASK_ASC) {
326 case PSW_ASC_PRIMARY >> 32:
328 case PSW_ASC_SECONDARY >> 32:
330 case PSW_ASC_HOME >> 32:
338 static void gen_exception(int excp)
340 TCGv_i32 tmp = tcg_const_i32(excp);
341 gen_helper_exception(cpu_env, tmp);
342 tcg_temp_free_i32(tmp);
345 static void gen_program_exception(DisasContext *s, int code)
349 /* Remember what pgm exeption this was. */
350 tmp = tcg_const_i32(code);
351 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
352 tcg_temp_free_i32(tmp);
354 tmp = tcg_const_i32(s->ilen);
355 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
356 tcg_temp_free_i32(tmp);
358 /* Advance past instruction. */
365 /* Trigger exception. */
366 gen_exception(EXCP_PGM);
369 static inline void gen_illegal_opcode(DisasContext *s)
371 gen_program_exception(s, PGM_OPERATION);
374 static inline void gen_trap(DisasContext *s)
378 /* Set DXC to 0xff. */
379 t = tcg_temp_new_i32();
380 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
381 tcg_gen_ori_i32(t, t, 0xff00);
382 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
383 tcg_temp_free_i32(t);
385 gen_program_exception(s, PGM_DATA);
388 #ifndef CONFIG_USER_ONLY
389 static void check_privileged(DisasContext *s)
391 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
392 gen_program_exception(s, PGM_PRIVILEGED);
397 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
399 TCGv_i64 tmp = tcg_temp_new_i64();
400 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
402 /* Note that d2 is limited to 20 bits, signed. If we crop negative
403 displacements early we create larger immedate addends. */
405 /* Note that addi optimizes the imm==0 case. */
407 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
408 tcg_gen_addi_i64(tmp, tmp, d2);
410 tcg_gen_addi_i64(tmp, regs[b2], d2);
412 tcg_gen_addi_i64(tmp, regs[x2], d2);
418 tcg_gen_movi_i64(tmp, d2);
421 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
427 static inline bool live_cc_data(DisasContext *s)
429 return (s->cc_op != CC_OP_DYNAMIC
430 && s->cc_op != CC_OP_STATIC
434 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
436 if (live_cc_data(s)) {
437 tcg_gen_discard_i64(cc_src);
438 tcg_gen_discard_i64(cc_dst);
439 tcg_gen_discard_i64(cc_vr);
441 s->cc_op = CC_OP_CONST0 + val;
444 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
446 if (live_cc_data(s)) {
447 tcg_gen_discard_i64(cc_src);
448 tcg_gen_discard_i64(cc_vr);
450 tcg_gen_mov_i64(cc_dst, dst);
454 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
457 if (live_cc_data(s)) {
458 tcg_gen_discard_i64(cc_vr);
460 tcg_gen_mov_i64(cc_src, src);
461 tcg_gen_mov_i64(cc_dst, dst);
465 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
466 TCGv_i64 dst, TCGv_i64 vr)
468 tcg_gen_mov_i64(cc_src, src);
469 tcg_gen_mov_i64(cc_dst, dst);
470 tcg_gen_mov_i64(cc_vr, vr);
474 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
476 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
479 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
481 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
484 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
486 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
489 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
491 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
494 /* CC value is in env->cc_op */
495 static void set_cc_static(DisasContext *s)
497 if (live_cc_data(s)) {
498 tcg_gen_discard_i64(cc_src);
499 tcg_gen_discard_i64(cc_dst);
500 tcg_gen_discard_i64(cc_vr);
502 s->cc_op = CC_OP_STATIC;
505 /* calculates cc into cc_op */
506 static void gen_op_calc_cc(DisasContext *s)
508 TCGv_i32 local_cc_op;
511 TCGV_UNUSED_I32(local_cc_op);
512 TCGV_UNUSED_I64(dummy);
515 dummy = tcg_const_i64(0);
529 local_cc_op = tcg_const_i32(s->cc_op);
545 /* s->cc_op is the cc value */
546 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
549 /* env->cc_op already is the cc value */
564 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
569 case CC_OP_LTUGTU_32:
570 case CC_OP_LTUGTU_64:
577 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
592 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
595 /* unknown operation - assume 3 arguments and cc_op in env */
596 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
602 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
603 tcg_temp_free_i32(local_cc_op);
605 if (!TCGV_IS_UNUSED_I64(dummy)) {
606 tcg_temp_free_i64(dummy);
609 /* We now have cc in cc_op as constant */
613 static bool use_exit_tb(DisasContext *s)
615 return (s->singlestep_enabled ||
616 (s->tb->cflags & CF_LAST_IO) ||
617 (s->tb->flags & FLAG_MASK_PER));
620 static bool use_goto_tb(DisasContext *s, uint64_t dest)
622 if (unlikely(use_exit_tb(s))) {
625 #ifndef CONFIG_USER_ONLY
626 return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
627 (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
633 static void account_noninline_branch(DisasContext *s, int cc_op)
635 #ifdef DEBUG_INLINE_BRANCHES
636 inline_branch_miss[cc_op]++;
640 static void account_inline_branch(DisasContext *s, int cc_op)
642 #ifdef DEBUG_INLINE_BRANCHES
643 inline_branch_hit[cc_op]++;
647 /* Table of mask values to comparison codes, given a comparison as input.
648 For such, CC=3 should not be possible. */
649 static const TCGCond ltgt_cond[16] = {
650 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
651 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
652 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
653 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
654 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
655 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
656 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
657 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
660 /* Table of mask values to comparison codes, given a logic op as input.
661 For such, only CC=0 and CC=1 should be possible. */
662 static const TCGCond nz_cond[16] = {
663 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
664 TCG_COND_NEVER, TCG_COND_NEVER,
665 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
666 TCG_COND_NE, TCG_COND_NE,
667 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
668 TCG_COND_EQ, TCG_COND_EQ,
669 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
670 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
673 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
674 details required to generate a TCG comparison. */
675 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
678 enum cc_op old_cc_op = s->cc_op;
680 if (mask == 15 || mask == 0) {
681 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
684 c->g1 = c->g2 = true;
689 /* Find the TCG condition for the mask + cc op. */
695 cond = ltgt_cond[mask];
696 if (cond == TCG_COND_NEVER) {
699 account_inline_branch(s, old_cc_op);
702 case CC_OP_LTUGTU_32:
703 case CC_OP_LTUGTU_64:
704 cond = tcg_unsigned_cond(ltgt_cond[mask]);
705 if (cond == TCG_COND_NEVER) {
708 account_inline_branch(s, old_cc_op);
712 cond = nz_cond[mask];
713 if (cond == TCG_COND_NEVER) {
716 account_inline_branch(s, old_cc_op);
731 account_inline_branch(s, old_cc_op);
746 account_inline_branch(s, old_cc_op);
750 switch (mask & 0xa) {
751 case 8: /* src == 0 -> no one bit found */
754 case 2: /* src != 0 -> one bit found */
760 account_inline_branch(s, old_cc_op);
766 case 8 | 2: /* vr == 0 */
769 case 4 | 1: /* vr != 0 */
772 case 8 | 4: /* no carry -> vr >= src */
775 case 2 | 1: /* carry -> vr < src */
781 account_inline_branch(s, old_cc_op);
786 /* Note that CC=0 is impossible; treat it as dont-care. */
788 case 2: /* zero -> op1 == op2 */
791 case 4 | 1: /* !zero -> op1 != op2 */
794 case 4: /* borrow (!carry) -> op1 < op2 */
797 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
803 account_inline_branch(s, old_cc_op);
808 /* Calculate cc value. */
813 /* Jump based on CC. We'll load up the real cond below;
814 the assignment here merely avoids a compiler warning. */
815 account_noninline_branch(s, old_cc_op);
816 old_cc_op = CC_OP_STATIC;
817 cond = TCG_COND_NEVER;
821 /* Load up the arguments of the comparison. */
823 c->g1 = c->g2 = false;
827 c->u.s32.a = tcg_temp_new_i32();
828 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
829 c->u.s32.b = tcg_const_i32(0);
832 case CC_OP_LTUGTU_32:
835 c->u.s32.a = tcg_temp_new_i32();
836 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
837 c->u.s32.b = tcg_temp_new_i32();
838 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
845 c->u.s64.b = tcg_const_i64(0);
849 case CC_OP_LTUGTU_64:
853 c->g1 = c->g2 = true;
859 c->u.s64.a = tcg_temp_new_i64();
860 c->u.s64.b = tcg_const_i64(0);
861 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
866 c->u.s32.a = tcg_temp_new_i32();
867 c->u.s32.b = tcg_temp_new_i32();
868 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
869 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
870 tcg_gen_movi_i32(c->u.s32.b, 0);
872 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
879 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
880 c->u.s64.b = tcg_const_i64(0);
892 case 0x8 | 0x4 | 0x2: /* cc != 3 */
894 c->u.s32.b = tcg_const_i32(3);
896 case 0x8 | 0x4 | 0x1: /* cc != 2 */
898 c->u.s32.b = tcg_const_i32(2);
900 case 0x8 | 0x2 | 0x1: /* cc != 1 */
902 c->u.s32.b = tcg_const_i32(1);
904 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
907 c->u.s32.a = tcg_temp_new_i32();
908 c->u.s32.b = tcg_const_i32(0);
909 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
911 case 0x8 | 0x4: /* cc < 2 */
913 c->u.s32.b = tcg_const_i32(2);
915 case 0x8: /* cc == 0 */
917 c->u.s32.b = tcg_const_i32(0);
919 case 0x4 | 0x2 | 0x1: /* cc != 0 */
921 c->u.s32.b = tcg_const_i32(0);
923 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
926 c->u.s32.a = tcg_temp_new_i32();
927 c->u.s32.b = tcg_const_i32(0);
928 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
930 case 0x4: /* cc == 1 */
932 c->u.s32.b = tcg_const_i32(1);
934 case 0x2 | 0x1: /* cc > 1 */
936 c->u.s32.b = tcg_const_i32(1);
938 case 0x2: /* cc == 2 */
940 c->u.s32.b = tcg_const_i32(2);
942 case 0x1: /* cc == 3 */
944 c->u.s32.b = tcg_const_i32(3);
947 /* CC is masked by something else: (8 >> cc) & mask. */
950 c->u.s32.a = tcg_const_i32(8);
951 c->u.s32.b = tcg_const_i32(0);
952 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
953 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
964 static void free_compare(DisasCompare *c)
968 tcg_temp_free_i64(c->u.s64.a);
970 tcg_temp_free_i32(c->u.s32.a);
975 tcg_temp_free_i64(c->u.s64.b);
977 tcg_temp_free_i32(c->u.s32.b);
982 /* ====================================================================== */
983 /* Define the insn format enumeration. */
984 #define F0(N) FMT_##N,
985 #define F1(N, X1) F0(N)
986 #define F2(N, X1, X2) F0(N)
987 #define F3(N, X1, X2, X3) F0(N)
988 #define F4(N, X1, X2, X3, X4) F0(N)
989 #define F5(N, X1, X2, X3, X4, X5) F0(N)
992 #include "insn-format.def"
1002 /* Define a structure to hold the decoded fields. We'll store each inside
1003 an array indexed by an enum. In order to conserve memory, we'll arrange
1004 for fields that do not exist at the same time to overlap, thus the "C"
1005 for compact. For checking purposes there is an "O" for original index
1006 as well that will be applied to availability bitmaps. */
1008 enum DisasFieldIndexO {
1031 enum DisasFieldIndexC {
1062 struct DisasFields {
1066 unsigned presentC:16;
1067 unsigned int presentO;
1071 /* This is the way fields are to be accessed out of DisasFields. */
1072 #define have_field(S, F) have_field1((S), FLD_O_##F)
1073 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1075 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1077 return (f->presentO >> c) & 1;
1080 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1081 enum DisasFieldIndexC c)
1083 assert(have_field1(f, o));
1087 /* Describe the layout of each field in each format. */
1088 typedef struct DisasField {
1090 unsigned int size:8;
1091 unsigned int type:2;
1092 unsigned int indexC:6;
1093 enum DisasFieldIndexO indexO:8;
1096 typedef struct DisasFormatInfo {
1097 DisasField op[NUM_C_FIELD];
1100 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1101 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1102 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1103 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1104 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1105 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1106 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1107 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1108 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1109 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1110 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1111 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1112 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1113 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1115 #define F0(N) { { } },
1116 #define F1(N, X1) { { X1 } },
1117 #define F2(N, X1, X2) { { X1, X2 } },
1118 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1119 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1120 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1122 static const DisasFormatInfo format_info[] = {
1123 #include "insn-format.def"
1141 /* Generally, we'll extract operands into this structures, operate upon
1142 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1143 of routines below for more details. */
1145 bool g_out, g_out2, g_in1, g_in2;
1146 TCGv_i64 out, out2, in1, in2;
1150 /* Instructions can place constraints on their operands, raising specification
1151 exceptions if they are violated. To make this easy to automate, each "in1",
1152 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1153 of the following, or 0. To make this easy to document, we'll put the
1154 SPEC_<name> defines next to <name>. */
1156 #define SPEC_r1_even 1
1157 #define SPEC_r2_even 2
1158 #define SPEC_r3_even 4
1159 #define SPEC_r1_f128 8
1160 #define SPEC_r2_f128 16
1162 /* Return values from translate_one, indicating the state of the TB. */
1164 /* Continue the TB. */
1166 /* We have emitted one or more goto_tb. No fixup required. */
1168 /* We are not using a goto_tb (for whatever reason), but have updated
1169 the PC (for whatever reason), so there's no need to do it again on
1172 /* We have updated the PC and CC values. */
1174 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1175 updated the PC for the next instruction to be executed. */
1177 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1178 No following code will be executed. */
1182 typedef enum DisasFacility {
1183 FAC_Z, /* zarch (default) */
1184 FAC_CASS, /* compare and swap and store */
1185 FAC_CASS2, /* compare and swap and store 2*/
1186 FAC_DFP, /* decimal floating point */
1187 FAC_DFPR, /* decimal floating point rounding */
1188 FAC_DO, /* distinct operands */
1189 FAC_EE, /* execute extensions */
1190 FAC_EI, /* extended immediate */
1191 FAC_FPE, /* floating point extension */
1192 FAC_FPSSH, /* floating point support sign handling */
1193 FAC_FPRGR, /* FPR-GR transfer */
1194 FAC_GIE, /* general instructions extension */
1195 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1196 FAC_HW, /* high-word */
1197 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1198 FAC_MIE, /* miscellaneous-instruction-extensions */
1199 FAC_LAT, /* load-and-trap */
1200 FAC_LOC, /* load/store on condition */
1201 FAC_LD, /* long displacement */
1202 FAC_PC, /* population count */
1203 FAC_SCF, /* store clock fast */
1204 FAC_SFLE, /* store facility list extended */
1205 FAC_ILA, /* interlocked access facility 1 */
1206 FAC_LPP, /* load-program-parameter */
1207 FAC_DAT_ENH, /* DAT-enhancement */
1213 DisasFacility fac:8;
1218 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1219 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1220 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1221 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1222 void (*help_cout)(DisasContext *, DisasOps *);
1223 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1228 /* ====================================================================== */
1229 /* Miscellaneous helpers, used by several operations. */
1231 static void help_l2_shift(DisasContext *s, DisasFields *f,
1232 DisasOps *o, int mask)
1234 int b2 = get_field(f, b2);
1235 int d2 = get_field(f, d2);
1238 o->in2 = tcg_const_i64(d2 & mask);
1240 o->in2 = get_address(s, 0, b2, d2);
1241 tcg_gen_andi_i64(o->in2, o->in2, mask);
1245 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1247 if (dest == s->next_pc) {
1248 per_branch(s, true);
1251 if (use_goto_tb(s, dest)) {
1253 per_breaking_event(s);
1255 tcg_gen_movi_i64(psw_addr, dest);
1256 tcg_gen_exit_tb((uintptr_t)s->tb);
1257 return EXIT_GOTO_TB;
1259 tcg_gen_movi_i64(psw_addr, dest);
1260 per_branch(s, false);
1261 return EXIT_PC_UPDATED;
1265 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1266 bool is_imm, int imm, TCGv_i64 cdest)
1269 uint64_t dest = s->pc + 2 * imm;
1272 /* Take care of the special cases first. */
1273 if (c->cond == TCG_COND_NEVER) {
1278 if (dest == s->next_pc) {
1279 /* Branch to next. */
1280 per_branch(s, true);
1284 if (c->cond == TCG_COND_ALWAYS) {
1285 ret = help_goto_direct(s, dest);
1289 if (TCGV_IS_UNUSED_I64(cdest)) {
1290 /* E.g. bcr %r0 -> no branch. */
1294 if (c->cond == TCG_COND_ALWAYS) {
1295 tcg_gen_mov_i64(psw_addr, cdest);
1296 per_branch(s, false);
1297 ret = EXIT_PC_UPDATED;
1302 if (use_goto_tb(s, s->next_pc)) {
1303 if (is_imm && use_goto_tb(s, dest)) {
1304 /* Both exits can use goto_tb. */
1307 lab = gen_new_label();
1309 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1311 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1314 /* Branch not taken. */
1316 tcg_gen_movi_i64(psw_addr, s->next_pc);
1317 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1321 per_breaking_event(s);
1323 tcg_gen_movi_i64(psw_addr, dest);
1324 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1328 /* Fallthru can use goto_tb, but taken branch cannot. */
1329 /* Store taken branch destination before the brcond. This
1330 avoids having to allocate a new local temp to hold it.
1331 We'll overwrite this in the not taken case anyway. */
1333 tcg_gen_mov_i64(psw_addr, cdest);
1336 lab = gen_new_label();
1338 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1340 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1343 /* Branch not taken. */
1346 tcg_gen_movi_i64(psw_addr, s->next_pc);
1347 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1351 tcg_gen_movi_i64(psw_addr, dest);
1353 per_breaking_event(s);
1354 ret = EXIT_PC_UPDATED;
1357 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1358 Most commonly we're single-stepping or some other condition that
1359 disables all use of goto_tb. Just update the PC and exit. */
1361 TCGv_i64 next = tcg_const_i64(s->next_pc);
1363 cdest = tcg_const_i64(dest);
1367 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1369 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1371 TCGv_i32 t0 = tcg_temp_new_i32();
1372 TCGv_i64 t1 = tcg_temp_new_i64();
1373 TCGv_i64 z = tcg_const_i64(0);
1374 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1375 tcg_gen_extu_i32_i64(t1, t0);
1376 tcg_temp_free_i32(t0);
1377 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1378 per_branch_cond(s, TCG_COND_NE, t1, z);
1379 tcg_temp_free_i64(t1);
1380 tcg_temp_free_i64(z);
1384 tcg_temp_free_i64(cdest);
1386 tcg_temp_free_i64(next);
1388 ret = EXIT_PC_UPDATED;
1396 /* ====================================================================== */
1397 /* The operations. These perform the bulk of the work for any insn,
1398 usually after the operands have been loaded and output initialized. */
1400 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1403 z = tcg_const_i64(0);
1404 n = tcg_temp_new_i64();
1405 tcg_gen_neg_i64(n, o->in2);
1406 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1407 tcg_temp_free_i64(n);
1408 tcg_temp_free_i64(z);
1412 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1414 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1418 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1420 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1424 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1426 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1427 tcg_gen_mov_i64(o->out2, o->in2);
1431 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1433 tcg_gen_add_i64(o->out, o->in1, o->in2);
1437 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1442 tcg_gen_add_i64(o->out, o->in1, o->in2);
1444 /* The carry flag is the msb of CC, therefore the branch mask that would
1445 create that comparison is 3. Feeding the generated comparison to
1446 setcond produces the carry flag that we desire. */
1447 disas_jcc(s, &cmp, 3);
1448 carry = tcg_temp_new_i64();
1450 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1452 TCGv_i32 t = tcg_temp_new_i32();
1453 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1454 tcg_gen_extu_i32_i64(carry, t);
1455 tcg_temp_free_i32(t);
1459 tcg_gen_add_i64(o->out, o->out, carry);
1460 tcg_temp_free_i64(carry);
1464 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1466 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1470 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1472 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1476 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1478 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1479 return_low128(o->out2);
1483 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1485 tcg_gen_and_i64(o->out, o->in1, o->in2);
1489 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1491 int shift = s->insn->data & 0xff;
1492 int size = s->insn->data >> 8;
1493 uint64_t mask = ((1ull << size) - 1) << shift;
1496 tcg_gen_shli_i64(o->in2, o->in2, shift);
1497 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1498 tcg_gen_and_i64(o->out, o->in1, o->in2);
1500 /* Produce the CC from only the bits manipulated. */
1501 tcg_gen_andi_i64(cc_dst, o->out, mask);
1502 set_cc_nz_u64(s, cc_dst);
1506 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1508 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1509 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1510 tcg_gen_mov_i64(psw_addr, o->in2);
1511 per_branch(s, false);
1512 return EXIT_PC_UPDATED;
1518 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1520 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1521 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1524 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1526 int m1 = get_field(s->fields, m1);
1527 bool is_imm = have_field(s->fields, i2);
1528 int imm = is_imm ? get_field(s->fields, i2) : 0;
1531 /* BCR with R2 = 0 causes no branching */
1532 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1534 /* Perform serialization */
1535 /* FIXME: check for fast-BCR-serialization facility */
1536 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1539 /* Perform serialization */
1540 /* FIXME: perform checkpoint-synchronisation */
1541 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1546 disas_jcc(s, &c, m1);
1547 return help_branch(s, &c, is_imm, imm, o->in2);
1550 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1552 int r1 = get_field(s->fields, r1);
1553 bool is_imm = have_field(s->fields, i2);
1554 int imm = is_imm ? get_field(s->fields, i2) : 0;
1558 c.cond = TCG_COND_NE;
1563 t = tcg_temp_new_i64();
1564 tcg_gen_subi_i64(t, regs[r1], 1);
1565 store_reg32_i64(r1, t);
1566 c.u.s32.a = tcg_temp_new_i32();
1567 c.u.s32.b = tcg_const_i32(0);
1568 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1569 tcg_temp_free_i64(t);
1571 return help_branch(s, &c, is_imm, imm, o->in2);
1574 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1576 int r1 = get_field(s->fields, r1);
1577 int imm = get_field(s->fields, i2);
1581 c.cond = TCG_COND_NE;
1586 t = tcg_temp_new_i64();
1587 tcg_gen_shri_i64(t, regs[r1], 32);
1588 tcg_gen_subi_i64(t, t, 1);
1589 store_reg32h_i64(r1, t);
1590 c.u.s32.a = tcg_temp_new_i32();
1591 c.u.s32.b = tcg_const_i32(0);
1592 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1593 tcg_temp_free_i64(t);
1595 return help_branch(s, &c, 1, imm, o->in2);
1598 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1600 int r1 = get_field(s->fields, r1);
1601 bool is_imm = have_field(s->fields, i2);
1602 int imm = is_imm ? get_field(s->fields, i2) : 0;
1605 c.cond = TCG_COND_NE;
1610 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1611 c.u.s64.a = regs[r1];
1612 c.u.s64.b = tcg_const_i64(0);
1614 return help_branch(s, &c, is_imm, imm, o->in2);
1617 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1619 int r1 = get_field(s->fields, r1);
1620 int r3 = get_field(s->fields, r3);
1621 bool is_imm = have_field(s->fields, i2);
1622 int imm = is_imm ? get_field(s->fields, i2) : 0;
1626 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1631 t = tcg_temp_new_i64();
1632 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1633 c.u.s32.a = tcg_temp_new_i32();
1634 c.u.s32.b = tcg_temp_new_i32();
1635 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1636 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1637 store_reg32_i64(r1, t);
1638 tcg_temp_free_i64(t);
1640 return help_branch(s, &c, is_imm, imm, o->in2);
1643 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1645 int r1 = get_field(s->fields, r1);
1646 int r3 = get_field(s->fields, r3);
1647 bool is_imm = have_field(s->fields, i2);
1648 int imm = is_imm ? get_field(s->fields, i2) : 0;
1651 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1654 if (r1 == (r3 | 1)) {
1655 c.u.s64.b = load_reg(r3 | 1);
1658 c.u.s64.b = regs[r3 | 1];
1662 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1663 c.u.s64.a = regs[r1];
1666 return help_branch(s, &c, is_imm, imm, o->in2);
1669 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1671 int imm, m3 = get_field(s->fields, m3);
1675 c.cond = ltgt_cond[m3];
1676 if (s->insn->data) {
1677 c.cond = tcg_unsigned_cond(c.cond);
1679 c.is_64 = c.g1 = c.g2 = true;
1683 is_imm = have_field(s->fields, i4);
1685 imm = get_field(s->fields, i4);
1688 o->out = get_address(s, 0, get_field(s->fields, b4),
1689 get_field(s->fields, d4));
1692 return help_branch(s, &c, is_imm, imm, o->out);
1695 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1697 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1702 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1704 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1709 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1711 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1716 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1718 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1719 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1720 tcg_temp_free_i32(m3);
1721 gen_set_cc_nz_f32(s, o->in2);
1725 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1727 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1728 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1729 tcg_temp_free_i32(m3);
1730 gen_set_cc_nz_f64(s, o->in2);
1734 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1736 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1737 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1738 tcg_temp_free_i32(m3);
1739 gen_set_cc_nz_f128(s, o->in1, o->in2);
1743 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1745 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1746 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1747 tcg_temp_free_i32(m3);
1748 gen_set_cc_nz_f32(s, o->in2);
1752 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1754 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1755 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1756 tcg_temp_free_i32(m3);
1757 gen_set_cc_nz_f64(s, o->in2);
1761 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1763 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1764 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1765 tcg_temp_free_i32(m3);
1766 gen_set_cc_nz_f128(s, o->in1, o->in2);
1770 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1772 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1773 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1774 tcg_temp_free_i32(m3);
1775 gen_set_cc_nz_f32(s, o->in2);
1779 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1781 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1782 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1783 tcg_temp_free_i32(m3);
1784 gen_set_cc_nz_f64(s, o->in2);
1788 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1790 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1791 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1792 tcg_temp_free_i32(m3);
1793 gen_set_cc_nz_f128(s, o->in1, o->in2);
1797 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1799 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1800 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1801 tcg_temp_free_i32(m3);
1802 gen_set_cc_nz_f32(s, o->in2);
1806 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1808 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1809 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1810 tcg_temp_free_i32(m3);
1811 gen_set_cc_nz_f64(s, o->in2);
1815 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1817 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1818 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1819 tcg_temp_free_i32(m3);
1820 gen_set_cc_nz_f128(s, o->in1, o->in2);
1824 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1826 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1827 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1828 tcg_temp_free_i32(m3);
1832 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1834 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1835 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1836 tcg_temp_free_i32(m3);
1840 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1842 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1843 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1844 tcg_temp_free_i32(m3);
1845 return_low128(o->out2);
1849 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1851 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1852 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1853 tcg_temp_free_i32(m3);
1857 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1859 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1860 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1861 tcg_temp_free_i32(m3);
1865 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1867 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1868 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1869 tcg_temp_free_i32(m3);
1870 return_low128(o->out2);
1874 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1876 int r2 = get_field(s->fields, r2);
1877 TCGv_i64 len = tcg_temp_new_i64();
1879 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1881 return_low128(o->out);
1883 tcg_gen_add_i64(regs[r2], regs[r2], len);
1884 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1885 tcg_temp_free_i64(len);
1890 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1892 int l = get_field(s->fields, l1);
1897 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1898 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1901 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1902 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1905 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1906 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1909 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1910 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1913 vl = tcg_const_i32(l);
1914 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1915 tcg_temp_free_i32(vl);
1919 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1923 static ExitStatus op_clcl(DisasContext *s, DisasOps *o)
1925 int r1 = get_field(s->fields, r1);
1926 int r2 = get_field(s->fields, r2);
1929 /* r1 and r2 must be even. */
1930 if (r1 & 1 || r2 & 1) {
1931 gen_program_exception(s, PGM_SPECIFICATION);
1932 return EXIT_NORETURN;
1935 t1 = tcg_const_i32(r1);
1936 t2 = tcg_const_i32(r2);
1937 gen_helper_clcl(cc_op, cpu_env, t1, t2);
1938 tcg_temp_free_i32(t1);
1939 tcg_temp_free_i32(t2);
1944 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1946 int r1 = get_field(s->fields, r1);
1947 int r3 = get_field(s->fields, r3);
1950 /* r1 and r3 must be even. */
1951 if (r1 & 1 || r3 & 1) {
1952 gen_program_exception(s, PGM_SPECIFICATION);
1953 return EXIT_NORETURN;
1956 t1 = tcg_const_i32(r1);
1957 t3 = tcg_const_i32(r3);
1958 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
1959 tcg_temp_free_i32(t1);
1960 tcg_temp_free_i32(t3);
1965 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1967 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1968 TCGv_i32 t1 = tcg_temp_new_i32();
1969 tcg_gen_extrl_i64_i32(t1, o->in1);
1970 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1972 tcg_temp_free_i32(t1);
1973 tcg_temp_free_i32(m3);
1977 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1979 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1981 return_low128(o->in2);
1985 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1987 TCGv_i64 t = tcg_temp_new_i64();
1988 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1989 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1990 tcg_gen_or_i64(o->out, o->out, t);
1991 tcg_temp_free_i64(t);
1995 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1997 int d2 = get_field(s->fields, d2);
1998 int b2 = get_field(s->fields, b2);
2001 /* Note that in1 = R3 (new value) and
2002 in2 = (zero-extended) R1 (expected value). */
2004 addr = get_address(s, 0, b2, d2);
2005 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2006 get_mem_index(s), s->insn->data | MO_ALIGN);
2007 tcg_temp_free_i64(addr);
2009 /* Are the memory and expected values (un)equal? Note that this setcond
2010 produces the output CC value, thus the NE sense of the test. */
2011 cc = tcg_temp_new_i64();
2012 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2013 tcg_gen_extrl_i64_i32(cc_op, cc);
2014 tcg_temp_free_i64(cc);
2020 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
2022 int r1 = get_field(s->fields, r1);
2023 int r3 = get_field(s->fields, r3);
2024 int d2 = get_field(s->fields, d2);
2025 int b2 = get_field(s->fields, b2);
2027 TCGv_i32 t_r1, t_r3;
2029 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2030 addr = get_address(s, 0, b2, d2);
2031 t_r1 = tcg_const_i32(r1);
2032 t_r3 = tcg_const_i32(r3);
2033 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2034 tcg_temp_free_i64(addr);
2035 tcg_temp_free_i32(t_r1);
2036 tcg_temp_free_i32(t_r3);
2042 #ifndef CONFIG_USER_ONLY
2043 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
2045 TCGMemOp mop = s->insn->data;
2046 TCGv_i64 addr, old, cc;
2047 TCGLabel *lab = gen_new_label();
2049 /* Note that in1 = R1 (zero-extended expected value),
2050 out = R1 (original reg), out2 = R1+1 (new value). */
2052 check_privileged(s);
2053 addr = tcg_temp_new_i64();
2054 old = tcg_temp_new_i64();
2055 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2056 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2057 get_mem_index(s), mop | MO_ALIGN);
2058 tcg_temp_free_i64(addr);
2060 /* Are the memory and expected values (un)equal? */
2061 cc = tcg_temp_new_i64();
2062 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2063 tcg_gen_extrl_i64_i32(cc_op, cc);
2065 /* Write back the output now, so that it happens before the
2066 following branch, so that we don't need local temps. */
2067 if ((mop & MO_SIZE) == MO_32) {
2068 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2070 tcg_gen_mov_i64(o->out, old);
2072 tcg_temp_free_i64(old);
2074 /* If the comparison was equal, and the LSB of R2 was set,
2075 then we need to flush the TLB (for all cpus). */
2076 tcg_gen_xori_i64(cc, cc, 1);
2077 tcg_gen_and_i64(cc, cc, o->in2);
2078 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2079 tcg_temp_free_i64(cc);
2081 gen_helper_purge(cpu_env);
2088 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2090 TCGv_i64 t1 = tcg_temp_new_i64();
2091 TCGv_i32 t2 = tcg_temp_new_i32();
2092 tcg_gen_extrl_i64_i32(t2, o->in1);
2093 gen_helper_cvd(t1, t2);
2094 tcg_temp_free_i32(t2);
2095 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2096 tcg_temp_free_i64(t1);
2100 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2102 int m3 = get_field(s->fields, m3);
2103 TCGLabel *lab = gen_new_label();
2106 c = tcg_invert_cond(ltgt_cond[m3]);
2107 if (s->insn->data) {
2108 c = tcg_unsigned_cond(c);
2110 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2119 #ifndef CONFIG_USER_ONLY
2120 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2122 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2123 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2124 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2126 check_privileged(s);
2130 gen_helper_diag(cpu_env, r1, r3, func_code);
2132 tcg_temp_free_i32(func_code);
2133 tcg_temp_free_i32(r3);
2134 tcg_temp_free_i32(r1);
2139 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2141 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2142 return_low128(o->out);
2146 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2148 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2149 return_low128(o->out);
2153 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2155 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2156 return_low128(o->out);
2160 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2162 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2163 return_low128(o->out);
2167 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2169 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2173 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2175 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2179 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2181 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2182 return_low128(o->out2);
2186 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2188 int r2 = get_field(s->fields, r2);
2189 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2193 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2195 /* No cache information provided. */
2196 tcg_gen_movi_i64(o->out, -1);
2200 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2202 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2206 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2208 int r1 = get_field(s->fields, r1);
2209 int r2 = get_field(s->fields, r2);
2210 TCGv_i64 t = tcg_temp_new_i64();
2212 /* Note the "subsequently" in the PoO, which implies a defined result
2213 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2214 tcg_gen_shri_i64(t, psw_mask, 32);
2215 store_reg32_i64(r1, t);
2217 store_reg32_i64(r2, psw_mask);
2220 tcg_temp_free_i64(t);
2224 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2226 int r1 = get_field(s->fields, r1);
2230 /* Nested EXECUTE is not allowed. */
2231 if (unlikely(s->ex_value)) {
2232 gen_program_exception(s, PGM_EXECUTE);
2233 return EXIT_NORETURN;
2240 v1 = tcg_const_i64(0);
2245 ilen = tcg_const_i32(s->ilen);
2246 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2247 tcg_temp_free_i32(ilen);
2250 tcg_temp_free_i64(v1);
2253 return EXIT_PC_CC_UPDATED;
2256 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2258 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2259 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2260 tcg_temp_free_i32(m3);
2264 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2266 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2267 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2268 tcg_temp_free_i32(m3);
2272 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2274 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2275 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2276 return_low128(o->out2);
2277 tcg_temp_free_i32(m3);
2281 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2283 /* We'll use the original input for cc computation, since we get to
2284 compare that against 0, which ought to be better than comparing
2285 the real output against 64. It also lets cc_dst be a convenient
2286 temporary during our computation. */
2287 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2289 /* R1 = IN ? CLZ(IN) : 64. */
2290 tcg_gen_clzi_i64(o->out, o->in2, 64);
2292 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2293 value by 64, which is undefined. But since the shift is 64 iff the
2294 input is zero, we still get the correct result after and'ing. */
2295 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2296 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2297 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2301 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2303 int m3 = get_field(s->fields, m3);
2304 int pos, len, base = s->insn->data;
2305 TCGv_i64 tmp = tcg_temp_new_i64();
2310 /* Effectively a 32-bit load. */
2311 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2318 /* Effectively a 16-bit load. */
2319 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2327 /* Effectively an 8-bit load. */
2328 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2333 pos = base + ctz32(m3) * 8;
2334 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2335 ccm = ((1ull << len) - 1) << pos;
2339 /* This is going to be a sequence of loads and inserts. */
2340 pos = base + 32 - 8;
2344 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2345 tcg_gen_addi_i64(o->in2, o->in2, 1);
2346 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2349 m3 = (m3 << 1) & 0xf;
2355 tcg_gen_movi_i64(tmp, ccm);
2356 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2357 tcg_temp_free_i64(tmp);
2361 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2363 int shift = s->insn->data & 0xff;
2364 int size = s->insn->data >> 8;
2365 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2369 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2374 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2376 t1 = tcg_temp_new_i64();
2377 tcg_gen_shli_i64(t1, psw_mask, 20);
2378 tcg_gen_shri_i64(t1, t1, 36);
2379 tcg_gen_or_i64(o->out, o->out, t1);
2381 tcg_gen_extu_i32_i64(t1, cc_op);
2382 tcg_gen_shli_i64(t1, t1, 28);
2383 tcg_gen_or_i64(o->out, o->out, t1);
2384 tcg_temp_free_i64(t1);
2388 #ifndef CONFIG_USER_ONLY
2389 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2393 check_privileged(s);
2394 m4 = tcg_const_i32(get_field(s->fields, m4));
2395 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2396 tcg_temp_free_i32(m4);
2400 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2402 check_privileged(s);
2403 gen_helper_iske(o->out, cpu_env, o->in2);
2408 static ExitStatus op_keb(DisasContext *s, DisasOps *o)
2410 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2415 static ExitStatus op_kdb(DisasContext *s, DisasOps *o)
2417 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2422 static ExitStatus op_kxb(DisasContext *s, DisasOps *o)
2424 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2429 static ExitStatus op_laa(DisasContext *s, DisasOps *o)
2431 /* The real output is indeed the original value in memory;
2432 recompute the addition for the computation of CC. */
2433 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2434 s->insn->data | MO_ALIGN);
2435 /* However, we need to recompute the addition for setting CC. */
2436 tcg_gen_add_i64(o->out, o->in1, o->in2);
2440 static ExitStatus op_lan(DisasContext *s, DisasOps *o)
2442 /* The real output is indeed the original value in memory;
2443 recompute the addition for the computation of CC. */
2444 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2445 s->insn->data | MO_ALIGN);
2446 /* However, we need to recompute the operation for setting CC. */
2447 tcg_gen_and_i64(o->out, o->in1, o->in2);
2451 static ExitStatus op_lao(DisasContext *s, DisasOps *o)
2453 /* The real output is indeed the original value in memory;
2454 recompute the addition for the computation of CC. */
2455 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2456 s->insn->data | MO_ALIGN);
2457 /* However, we need to recompute the operation for setting CC. */
2458 tcg_gen_or_i64(o->out, o->in1, o->in2);
2462 static ExitStatus op_lax(DisasContext *s, DisasOps *o)
2464 /* The real output is indeed the original value in memory;
2465 recompute the addition for the computation of CC. */
2466 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2467 s->insn->data | MO_ALIGN);
2468 /* However, we need to recompute the operation for setting CC. */
2469 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2473 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2475 gen_helper_ldeb(o->out, cpu_env, o->in2);
2479 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2481 gen_helper_ledb(o->out, cpu_env, o->in2);
2485 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2487 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2491 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2493 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2497 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2499 gen_helper_lxdb(o->out, cpu_env, o->in2);
2500 return_low128(o->out2);
2504 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2506 gen_helper_lxeb(o->out, cpu_env, o->in2);
2507 return_low128(o->out2);
2511 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2513 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2517 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2519 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2523 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2525 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2529 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2531 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2535 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2537 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2541 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2543 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2547 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2549 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2553 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2555 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2559 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2561 TCGLabel *lab = gen_new_label();
2562 store_reg32_i64(get_field(s->fields, r1), o->in2);
2563 /* The value is stored even in case of trap. */
2564 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2570 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2572 TCGLabel *lab = gen_new_label();
2573 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2574 /* The value is stored even in case of trap. */
2575 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2581 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2583 TCGLabel *lab = gen_new_label();
2584 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2585 /* The value is stored even in case of trap. */
2586 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2592 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2594 TCGLabel *lab = gen_new_label();
2595 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2596 /* The value is stored even in case of trap. */
2597 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2603 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2605 TCGLabel *lab = gen_new_label();
2606 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2607 /* The value is stored even in case of trap. */
2608 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2614 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2618 disas_jcc(s, &c, get_field(s->fields, m3));
2621 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2625 TCGv_i32 t32 = tcg_temp_new_i32();
2628 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2631 t = tcg_temp_new_i64();
2632 tcg_gen_extu_i32_i64(t, t32);
2633 tcg_temp_free_i32(t32);
2635 z = tcg_const_i64(0);
2636 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2637 tcg_temp_free_i64(t);
2638 tcg_temp_free_i64(z);
2644 #ifndef CONFIG_USER_ONLY
2645 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2647 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2648 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2649 check_privileged(s);
2650 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2651 tcg_temp_free_i32(r1);
2652 tcg_temp_free_i32(r3);
2656 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2658 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2659 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2660 check_privileged(s);
2661 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2662 tcg_temp_free_i32(r1);
2663 tcg_temp_free_i32(r3);
2667 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2669 check_privileged(s);
2670 gen_helper_lra(o->out, cpu_env, o->in2);
2675 static ExitStatus op_lpp(DisasContext *s, DisasOps *o)
2677 check_privileged(s);
2679 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2683 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2687 check_privileged(s);
2688 per_breaking_event(s);
2690 t1 = tcg_temp_new_i64();
2691 t2 = tcg_temp_new_i64();
2692 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2693 tcg_gen_addi_i64(o->in2, o->in2, 4);
2694 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2695 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2696 tcg_gen_shli_i64(t1, t1, 32);
2697 gen_helper_load_psw(cpu_env, t1, t2);
2698 tcg_temp_free_i64(t1);
2699 tcg_temp_free_i64(t2);
2700 return EXIT_NORETURN;
2703 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2707 check_privileged(s);
2708 per_breaking_event(s);
2710 t1 = tcg_temp_new_i64();
2711 t2 = tcg_temp_new_i64();
2712 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2713 tcg_gen_addi_i64(o->in2, o->in2, 8);
2714 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2715 gen_helper_load_psw(cpu_env, t1, t2);
2716 tcg_temp_free_i64(t1);
2717 tcg_temp_free_i64(t2);
2718 return EXIT_NORETURN;
2722 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2724 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2725 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2726 gen_helper_lam(cpu_env, r1, o->in2, r3);
2727 tcg_temp_free_i32(r1);
2728 tcg_temp_free_i32(r3);
2732 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2734 int r1 = get_field(s->fields, r1);
2735 int r3 = get_field(s->fields, r3);
2738 /* Only one register to read. */
2739 t1 = tcg_temp_new_i64();
2740 if (unlikely(r1 == r3)) {
2741 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2742 store_reg32_i64(r1, t1);
2747 /* First load the values of the first and last registers to trigger
2748 possible page faults. */
2749 t2 = tcg_temp_new_i64();
2750 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2751 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2752 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2753 store_reg32_i64(r1, t1);
2754 store_reg32_i64(r3, t2);
2756 /* Only two registers to read. */
2757 if (((r1 + 1) & 15) == r3) {
2763 /* Then load the remaining registers. Page fault can't occur. */
2765 tcg_gen_movi_i64(t2, 4);
2768 tcg_gen_add_i64(o->in2, o->in2, t2);
2769 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2770 store_reg32_i64(r1, t1);
2778 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2780 int r1 = get_field(s->fields, r1);
2781 int r3 = get_field(s->fields, r3);
2784 /* Only one register to read. */
2785 t1 = tcg_temp_new_i64();
2786 if (unlikely(r1 == r3)) {
2787 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2788 store_reg32h_i64(r1, t1);
2793 /* First load the values of the first and last registers to trigger
2794 possible page faults. */
2795 t2 = tcg_temp_new_i64();
2796 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2797 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2798 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2799 store_reg32h_i64(r1, t1);
2800 store_reg32h_i64(r3, t2);
2802 /* Only two registers to read. */
2803 if (((r1 + 1) & 15) == r3) {
2809 /* Then load the remaining registers. Page fault can't occur. */
2811 tcg_gen_movi_i64(t2, 4);
2814 tcg_gen_add_i64(o->in2, o->in2, t2);
2815 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2816 store_reg32h_i64(r1, t1);
2824 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2826 int r1 = get_field(s->fields, r1);
2827 int r3 = get_field(s->fields, r3);
2830 /* Only one register to read. */
2831 if (unlikely(r1 == r3)) {
2832 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2836 /* First load the values of the first and last registers to trigger
2837 possible page faults. */
2838 t1 = tcg_temp_new_i64();
2839 t2 = tcg_temp_new_i64();
2840 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2841 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2842 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2843 tcg_gen_mov_i64(regs[r1], t1);
2846 /* Only two registers to read. */
2847 if (((r1 + 1) & 15) == r3) {
2852 /* Then load the remaining registers. Page fault can't occur. */
2854 tcg_gen_movi_i64(t1, 8);
2857 tcg_gen_add_i64(o->in2, o->in2, t1);
2858 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2865 static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
2868 TCGMemOp mop = s->insn->data;
2870 /* In a parallel context, stop the world and single step. */
2871 if (parallel_cpus) {
2872 potential_page_fault(s);
2873 gen_exception(EXCP_ATOMIC);
2874 return EXIT_NORETURN;
2877 /* In a serial context, perform the two loads ... */
2878 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
2879 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2880 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
2881 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
2882 tcg_temp_free_i64(a1);
2883 tcg_temp_free_i64(a2);
2885 /* ... and indicate that we performed them while interlocked. */
2886 gen_op_movi_cc(s, 0);
2890 #ifndef CONFIG_USER_ONLY
2891 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2893 check_privileged(s);
2894 potential_page_fault(s);
2895 gen_helper_lura(o->out, cpu_env, o->in2);
2899 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2901 check_privileged(s);
2902 potential_page_fault(s);
2903 gen_helper_lurag(o->out, cpu_env, o->in2);
2908 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2911 o->g_out = o->g_in2;
2912 TCGV_UNUSED_I64(o->in2);
2917 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2919 int b2 = get_field(s->fields, b2);
2920 TCGv ar1 = tcg_temp_new_i64();
2923 o->g_out = o->g_in2;
2924 TCGV_UNUSED_I64(o->in2);
2927 switch (s->tb->flags & FLAG_MASK_ASC) {
2928 case PSW_ASC_PRIMARY >> 32:
2929 tcg_gen_movi_i64(ar1, 0);
2931 case PSW_ASC_ACCREG >> 32:
2932 tcg_gen_movi_i64(ar1, 1);
2934 case PSW_ASC_SECONDARY >> 32:
2936 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
2938 tcg_gen_movi_i64(ar1, 0);
2941 case PSW_ASC_HOME >> 32:
2942 tcg_gen_movi_i64(ar1, 2);
2946 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
2947 tcg_temp_free_i64(ar1);
2952 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2956 o->g_out = o->g_in1;
2957 o->g_out2 = o->g_in2;
2958 TCGV_UNUSED_I64(o->in1);
2959 TCGV_UNUSED_I64(o->in2);
2960 o->g_in1 = o->g_in2 = false;
2964 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2966 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2967 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2968 tcg_temp_free_i32(l);
2972 static ExitStatus op_mvcin(DisasContext *s, DisasOps *o)
2974 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2975 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
2976 tcg_temp_free_i32(l);
2980 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2982 int r1 = get_field(s->fields, r1);
2983 int r2 = get_field(s->fields, r2);
2986 /* r1 and r2 must be even. */
2987 if (r1 & 1 || r2 & 1) {
2988 gen_program_exception(s, PGM_SPECIFICATION);
2989 return EXIT_NORETURN;
2992 t1 = tcg_const_i32(r1);
2993 t2 = tcg_const_i32(r2);
2994 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
2995 tcg_temp_free_i32(t1);
2996 tcg_temp_free_i32(t2);
3001 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
3003 int r1 = get_field(s->fields, r1);
3004 int r3 = get_field(s->fields, r3);
3007 /* r1 and r3 must be even. */
3008 if (r1 & 1 || r3 & 1) {
3009 gen_program_exception(s, PGM_SPECIFICATION);
3010 return EXIT_NORETURN;
3013 t1 = tcg_const_i32(r1);
3014 t3 = tcg_const_i32(r3);
3015 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3016 tcg_temp_free_i32(t1);
3017 tcg_temp_free_i32(t3);
3022 #ifndef CONFIG_USER_ONLY
3023 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
3025 int r1 = get_field(s->fields, l1);
3026 check_privileged(s);
3027 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3032 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
3034 int r1 = get_field(s->fields, l1);
3035 check_privileged(s);
3036 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3042 static ExitStatus op_mvn(DisasContext *s, DisasOps *o)
3044 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3045 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3046 tcg_temp_free_i32(l);
3050 static ExitStatus op_mvo(DisasContext *s, DisasOps *o)
3052 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3053 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3054 tcg_temp_free_i32(l);
3058 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
3060 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3065 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
3067 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3069 return_low128(o->in2);
3073 static ExitStatus op_mvz(DisasContext *s, DisasOps *o)
3075 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3076 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3077 tcg_temp_free_i32(l);
3081 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
3083 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3087 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
3089 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3093 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
3095 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3099 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
3101 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3105 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
3107 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3111 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
3113 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3114 return_low128(o->out2);
3118 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
3120 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3121 return_low128(o->out2);
3125 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
3127 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3128 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3129 tcg_temp_free_i64(r3);
3133 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
3135 int r3 = get_field(s->fields, r3);
3136 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3140 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
3142 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3143 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3144 tcg_temp_free_i64(r3);
3148 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
3150 int r3 = get_field(s->fields, r3);
3151 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3155 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3158 z = tcg_const_i64(0);
3159 n = tcg_temp_new_i64();
3160 tcg_gen_neg_i64(n, o->in2);
3161 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3162 tcg_temp_free_i64(n);
3163 tcg_temp_free_i64(z);
3167 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
3169 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3173 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
3175 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3179 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
3181 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3182 tcg_gen_mov_i64(o->out2, o->in2);
3186 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3188 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3189 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3190 tcg_temp_free_i32(l);
3195 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3197 tcg_gen_neg_i64(o->out, o->in2);
3201 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3203 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3207 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3209 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3213 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3215 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3216 tcg_gen_mov_i64(o->out2, o->in2);
3220 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3222 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3223 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3224 tcg_temp_free_i32(l);
3229 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3231 tcg_gen_or_i64(o->out, o->in1, o->in2);
3235 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3237 int shift = s->insn->data & 0xff;
3238 int size = s->insn->data >> 8;
3239 uint64_t mask = ((1ull << size) - 1) << shift;
3242 tcg_gen_shli_i64(o->in2, o->in2, shift);
3243 tcg_gen_or_i64(o->out, o->in1, o->in2);
3245 /* Produce the CC from only the bits manipulated. */
3246 tcg_gen_andi_i64(cc_dst, o->out, mask);
3247 set_cc_nz_u64(s, cc_dst);
3251 static ExitStatus op_pack(DisasContext *s, DisasOps *o)
3253 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3254 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3255 tcg_temp_free_i32(l);
3259 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3261 gen_helper_popcnt(o->out, o->in2);
3265 #ifndef CONFIG_USER_ONLY
3266 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3268 check_privileged(s);
3269 gen_helper_ptlb(cpu_env);
3274 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3276 int i3 = get_field(s->fields, i3);
3277 int i4 = get_field(s->fields, i4);
3278 int i5 = get_field(s->fields, i5);
3279 int do_zero = i4 & 0x80;
3280 uint64_t mask, imask, pmask;
3283 /* Adjust the arguments for the specific insn. */
3284 switch (s->fields->op2) {
3285 case 0x55: /* risbg */
3290 case 0x5d: /* risbhg */
3293 pmask = 0xffffffff00000000ull;
3295 case 0x51: /* risblg */
3298 pmask = 0x00000000ffffffffull;
3304 /* MASK is the set of bits to be inserted from R2.
3305 Take care for I3/I4 wraparound. */
3308 mask ^= pmask >> i4 >> 1;
3310 mask |= ~(pmask >> i4 >> 1);
3314 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3315 insns, we need to keep the other half of the register. */
3316 imask = ~mask | ~pmask;
3318 if (s->fields->op2 == 0x55) {
3328 if (s->fields->op2 == 0x5d) {
3332 /* In some cases we can implement this with extract. */
3333 if (imask == 0 && pos == 0 && len > 0 && rot + len <= 64) {
3334 tcg_gen_extract_i64(o->out, o->in2, rot, len);
3338 /* In some cases we can implement this with deposit. */
3339 if (len > 0 && (imask == 0 || ~mask == imask)) {
3340 /* Note that we rotate the bits to be inserted to the lsb, not to
3341 the position as described in the PoO. */
3342 rot = (rot - pos) & 63;
3347 /* Rotate the input as necessary. */
3348 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3350 /* Insert the selected bits into the output. */
3353 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3355 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3357 } else if (imask == 0) {
3358 tcg_gen_andi_i64(o->out, o->in2, mask);
3360 tcg_gen_andi_i64(o->in2, o->in2, mask);
3361 tcg_gen_andi_i64(o->out, o->out, imask);
3362 tcg_gen_or_i64(o->out, o->out, o->in2);
3367 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3369 int i3 = get_field(s->fields, i3);
3370 int i4 = get_field(s->fields, i4);
3371 int i5 = get_field(s->fields, i5);
3374 /* If this is a test-only form, arrange to discard the result. */
3376 o->out = tcg_temp_new_i64();
3384 /* MASK is the set of bits to be operated on from R2.
3385 Take care for I3/I4 wraparound. */
3388 mask ^= ~0ull >> i4 >> 1;
3390 mask |= ~(~0ull >> i4 >> 1);
3393 /* Rotate the input as necessary. */
3394 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3397 switch (s->fields->op2) {
3398 case 0x55: /* AND */
3399 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3400 tcg_gen_and_i64(o->out, o->out, o->in2);
3403 tcg_gen_andi_i64(o->in2, o->in2, mask);
3404 tcg_gen_or_i64(o->out, o->out, o->in2);
3406 case 0x57: /* XOR */
3407 tcg_gen_andi_i64(o->in2, o->in2, mask);
3408 tcg_gen_xor_i64(o->out, o->out, o->in2);
3415 tcg_gen_andi_i64(cc_dst, o->out, mask);
3416 set_cc_nz_u64(s, cc_dst);
3420 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3422 tcg_gen_bswap16_i64(o->out, o->in2);
3426 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3428 tcg_gen_bswap32_i64(o->out, o->in2);
3432 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3434 tcg_gen_bswap64_i64(o->out, o->in2);
3438 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3440 TCGv_i32 t1 = tcg_temp_new_i32();
3441 TCGv_i32 t2 = tcg_temp_new_i32();
3442 TCGv_i32 to = tcg_temp_new_i32();
3443 tcg_gen_extrl_i64_i32(t1, o->in1);
3444 tcg_gen_extrl_i64_i32(t2, o->in2);
3445 tcg_gen_rotl_i32(to, t1, t2);
3446 tcg_gen_extu_i32_i64(o->out, to);
3447 tcg_temp_free_i32(t1);
3448 tcg_temp_free_i32(t2);
3449 tcg_temp_free_i32(to);
3453 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3455 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3459 #ifndef CONFIG_USER_ONLY
3460 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3462 check_privileged(s);
3463 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3468 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3470 check_privileged(s);
3471 gen_helper_sacf(cpu_env, o->in2);
3472 /* Addressing mode has changed, so end the block. */
3473 return EXIT_PC_STALE;
3477 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3479 int sam = s->insn->data;
3495 /* Bizarre but true, we check the address of the current insn for the
3496 specification exception, not the next to be executed. Thus the PoO
3497 documents that Bad Things Happen two bytes before the end. */
3498 if (s->pc & ~mask) {
3499 gen_program_exception(s, PGM_SPECIFICATION);
3500 return EXIT_NORETURN;
3504 tsam = tcg_const_i64(sam);
3505 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3506 tcg_temp_free_i64(tsam);
3508 /* Always exit the TB, since we (may have) changed execution mode. */
3509 return EXIT_PC_STALE;
3512 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3514 int r1 = get_field(s->fields, r1);
3515 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3519 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3521 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3525 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3527 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3531 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3533 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3534 return_low128(o->out2);
3538 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3540 gen_helper_sqeb(o->out, cpu_env, o->in2);
3544 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3546 gen_helper_sqdb(o->out, cpu_env, o->in2);
3550 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3552 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3553 return_low128(o->out2);
3557 #ifndef CONFIG_USER_ONLY
3558 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3560 check_privileged(s);
3561 potential_page_fault(s);
3562 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3567 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3569 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3570 check_privileged(s);
3571 potential_page_fault(s);
3572 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3574 tcg_temp_free_i32(r1);
3579 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3586 disas_jcc(s, &c, get_field(s->fields, m3));
3588 /* We want to store when the condition is fulfilled, so branch
3589 out when it's not */
3590 c.cond = tcg_invert_cond(c.cond);
3592 lab = gen_new_label();
3594 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3596 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3600 r1 = get_field(s->fields, r1);
3601 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3602 if (s->insn->data) {
3603 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3605 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3607 tcg_temp_free_i64(a);
3613 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3615 uint64_t sign = 1ull << s->insn->data;
3616 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3617 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3618 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3619 /* The arithmetic left shift is curious in that it does not affect
3620 the sign bit. Copy that over from the source unchanged. */
3621 tcg_gen_andi_i64(o->out, o->out, ~sign);
3622 tcg_gen_andi_i64(o->in1, o->in1, sign);
3623 tcg_gen_or_i64(o->out, o->out, o->in1);
3627 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3629 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3633 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3635 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3639 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3641 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3645 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3647 gen_helper_sfpc(cpu_env, o->in2);
3651 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3653 gen_helper_sfas(cpu_env, o->in2);
3657 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3659 int b2 = get_field(s->fields, b2);
3660 int d2 = get_field(s->fields, d2);
3661 TCGv_i64 t1 = tcg_temp_new_i64();
3662 TCGv_i64 t2 = tcg_temp_new_i64();
3665 switch (s->fields->op2) {
3666 case 0x99: /* SRNM */
3669 case 0xb8: /* SRNMB */
3672 case 0xb9: /* SRNMT */
3678 mask = (1 << len) - 1;
3680 /* Insert the value into the appropriate field of the FPC. */
3682 tcg_gen_movi_i64(t1, d2 & mask);
3684 tcg_gen_addi_i64(t1, regs[b2], d2);
3685 tcg_gen_andi_i64(t1, t1, mask);
3687 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3688 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3689 tcg_temp_free_i64(t1);
3691 /* Then install the new FPC to set the rounding mode in fpu_status. */
3692 gen_helper_sfpc(cpu_env, t2);
3693 tcg_temp_free_i64(t2);
3697 #ifndef CONFIG_USER_ONLY
3698 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3700 check_privileged(s);
3701 tcg_gen_shri_i64(o->in2, o->in2, 4);
3702 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3706 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3708 check_privileged(s);
3709 gen_helper_sske(cpu_env, o->in1, o->in2);
3713 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3715 check_privileged(s);
3716 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3720 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3722 check_privileged(s);
3723 /* ??? Surely cpu address != cpu number. In any case the previous
3724 version of this stored more than the required half-word, so it
3725 is unlikely this has ever been tested. */
3726 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3730 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3732 gen_helper_stck(o->out, cpu_env);
3733 /* ??? We don't implement clock states. */
3734 gen_op_movi_cc(s, 0);
3738 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3740 TCGv_i64 c1 = tcg_temp_new_i64();
3741 TCGv_i64 c2 = tcg_temp_new_i64();
3742 gen_helper_stck(c1, cpu_env);
3743 /* Shift the 64-bit value into its place as a zero-extended
3744 104-bit value. Note that "bit positions 64-103 are always
3745 non-zero so that they compare differently to STCK"; we set
3746 the least significant bit to 1. */
3747 tcg_gen_shli_i64(c2, c1, 56);
3748 tcg_gen_shri_i64(c1, c1, 8);
3749 tcg_gen_ori_i64(c2, c2, 0x10000);
3750 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3751 tcg_gen_addi_i64(o->in2, o->in2, 8);
3752 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3753 tcg_temp_free_i64(c1);
3754 tcg_temp_free_i64(c2);
3755 /* ??? We don't implement clock states. */
3756 gen_op_movi_cc(s, 0);
3760 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3762 check_privileged(s);
3763 gen_helper_sckc(cpu_env, o->in2);
3767 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3769 check_privileged(s);
3770 gen_helper_stckc(o->out, cpu_env);
3774 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3776 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3777 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3778 check_privileged(s);
3779 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3780 tcg_temp_free_i32(r1);
3781 tcg_temp_free_i32(r3);
3785 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3787 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3788 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3789 check_privileged(s);
3790 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3791 tcg_temp_free_i32(r1);
3792 tcg_temp_free_i32(r3);
3796 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3798 TCGv_i64 t1 = tcg_temp_new_i64();
3800 check_privileged(s);
3801 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3802 tcg_gen_ld32u_i64(t1, cpu_env, offsetof(CPUS390XState, machine_type));
3803 tcg_gen_deposit_i64(o->out, o->out, t1, 32, 32);
3804 tcg_temp_free_i64(t1);
3809 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3811 check_privileged(s);
3812 gen_helper_spt(cpu_env, o->in2);
3816 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3818 check_privileged(s);
3819 gen_helper_stfl(cpu_env);
3823 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3825 check_privileged(s);
3826 gen_helper_stpt(o->out, cpu_env);
3830 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3832 check_privileged(s);
3833 potential_page_fault(s);
3834 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3839 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3841 check_privileged(s);
3842 gen_helper_spx(cpu_env, o->in2);
3846 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
3848 check_privileged(s);
3849 potential_page_fault(s);
3850 gen_helper_xsch(cpu_env, regs[1]);
3855 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
3857 check_privileged(s);
3858 potential_page_fault(s);
3859 gen_helper_csch(cpu_env, regs[1]);
3864 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
3866 check_privileged(s);
3867 potential_page_fault(s);
3868 gen_helper_hsch(cpu_env, regs[1]);
3873 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
3875 check_privileged(s);
3876 potential_page_fault(s);
3877 gen_helper_msch(cpu_env, regs[1], o->in2);
3882 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
3884 check_privileged(s);
3885 potential_page_fault(s);
3886 gen_helper_rchp(cpu_env, regs[1]);
3891 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
3893 check_privileged(s);
3894 potential_page_fault(s);
3895 gen_helper_rsch(cpu_env, regs[1]);
3900 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
3902 check_privileged(s);
3903 potential_page_fault(s);
3904 gen_helper_ssch(cpu_env, regs[1], o->in2);
3909 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
3911 check_privileged(s);
3912 potential_page_fault(s);
3913 gen_helper_stsch(cpu_env, regs[1], o->in2);
3918 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
3920 check_privileged(s);
3921 potential_page_fault(s);
3922 gen_helper_tsch(cpu_env, regs[1], o->in2);
3927 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
3929 check_privileged(s);
3930 potential_page_fault(s);
3931 gen_helper_chsc(cpu_env, o->in2);
3936 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3938 check_privileged(s);
3939 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3940 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3944 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3946 uint64_t i2 = get_field(s->fields, i2);
3949 check_privileged(s);
3951 /* It is important to do what the instruction name says: STORE THEN.
3952 If we let the output hook perform the store then if we fault and
3953 restart, we'll have the wrong SYSTEM MASK in place. */
3954 t = tcg_temp_new_i64();
3955 tcg_gen_shri_i64(t, psw_mask, 56);
3956 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3957 tcg_temp_free_i64(t);
3959 if (s->fields->op == 0xac) {
3960 tcg_gen_andi_i64(psw_mask, psw_mask,
3961 (i2 << 56) | 0x00ffffffffffffffull);
3963 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3968 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3970 check_privileged(s);
3971 potential_page_fault(s);
3972 gen_helper_stura(cpu_env, o->in2, o->in1);
3976 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
3978 check_privileged(s);
3979 potential_page_fault(s);
3980 gen_helper_sturg(cpu_env, o->in2, o->in1);
3985 static ExitStatus op_stfle(DisasContext *s, DisasOps *o)
3987 potential_page_fault(s);
3988 gen_helper_stfle(cc_op, cpu_env, o->in2);
3993 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3995 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3999 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
4001 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4005 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
4007 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4011 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
4013 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4017 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
4019 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4020 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4021 gen_helper_stam(cpu_env, r1, o->in2, r3);
4022 tcg_temp_free_i32(r1);
4023 tcg_temp_free_i32(r3);
4027 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
4029 int m3 = get_field(s->fields, m3);
4030 int pos, base = s->insn->data;
4031 TCGv_i64 tmp = tcg_temp_new_i64();
4033 pos = base + ctz32(m3) * 8;
4036 /* Effectively a 32-bit store. */
4037 tcg_gen_shri_i64(tmp, o->in1, pos);
4038 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4044 /* Effectively a 16-bit store. */
4045 tcg_gen_shri_i64(tmp, o->in1, pos);
4046 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4053 /* Effectively an 8-bit store. */
4054 tcg_gen_shri_i64(tmp, o->in1, pos);
4055 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4059 /* This is going to be a sequence of shifts and stores. */
4060 pos = base + 32 - 8;
4063 tcg_gen_shri_i64(tmp, o->in1, pos);
4064 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4065 tcg_gen_addi_i64(o->in2, o->in2, 1);
4067 m3 = (m3 << 1) & 0xf;
4072 tcg_temp_free_i64(tmp);
4076 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
4078 int r1 = get_field(s->fields, r1);
4079 int r3 = get_field(s->fields, r3);
4080 int size = s->insn->data;
4081 TCGv_i64 tsize = tcg_const_i64(size);
4085 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4087 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4092 tcg_gen_add_i64(o->in2, o->in2, tsize);
4096 tcg_temp_free_i64(tsize);
4100 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
4102 int r1 = get_field(s->fields, r1);
4103 int r3 = get_field(s->fields, r3);
4104 TCGv_i64 t = tcg_temp_new_i64();
4105 TCGv_i64 t4 = tcg_const_i64(4);
4106 TCGv_i64 t32 = tcg_const_i64(32);
4109 tcg_gen_shl_i64(t, regs[r1], t32);
4110 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4114 tcg_gen_add_i64(o->in2, o->in2, t4);
4118 tcg_temp_free_i64(t);
4119 tcg_temp_free_i64(t4);
4120 tcg_temp_free_i64(t32);
4124 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
4126 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
4128 return_low128(o->in2);
4132 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
4134 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4138 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
4143 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4145 /* The !borrow flag is the msb of CC. Since we want the inverse of
4146 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4147 disas_jcc(s, &cmp, 8 | 4);
4148 borrow = tcg_temp_new_i64();
4150 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4152 TCGv_i32 t = tcg_temp_new_i32();
4153 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4154 tcg_gen_extu_i32_i64(borrow, t);
4155 tcg_temp_free_i32(t);
4159 tcg_gen_sub_i64(o->out, o->out, borrow);
4160 tcg_temp_free_i64(borrow);
4164 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
4171 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4172 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4173 tcg_temp_free_i32(t);
4175 t = tcg_const_i32(s->ilen);
4176 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4177 tcg_temp_free_i32(t);
4179 gen_exception(EXCP_SVC);
4180 return EXIT_NORETURN;
4183 static ExitStatus op_tam(DisasContext *s, DisasOps *o)
4187 cc |= (s->tb->flags & FLAG_MASK_64) ? 2 : 0;
4188 cc |= (s->tb->flags & FLAG_MASK_32) ? 1 : 0;
4189 gen_op_movi_cc(s, cc);
4193 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
4195 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4200 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
4202 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4207 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
4209 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4214 #ifndef CONFIG_USER_ONLY
4216 static ExitStatus op_testblock(DisasContext *s, DisasOps *o)
4218 check_privileged(s);
4219 gen_helper_testblock(cc_op, cpu_env, o->in2);
4224 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4226 gen_helper_tprot(cc_op, o->addr1, o->in2);
4233 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4235 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4236 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4237 tcg_temp_free_i32(l);
4242 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4244 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4245 return_low128(o->out2);
4250 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4252 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4253 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4254 tcg_temp_free_i32(l);
4259 static ExitStatus op_ts(DisasContext *s, DisasOps *o)
4261 TCGv_i32 t1 = tcg_const_i32(0xff);
4262 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4263 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4264 tcg_temp_free_i32(t1);
4269 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4271 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4272 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4273 tcg_temp_free_i32(l);
4277 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4279 int d1 = get_field(s->fields, d1);
4280 int d2 = get_field(s->fields, d2);
4281 int b1 = get_field(s->fields, b1);
4282 int b2 = get_field(s->fields, b2);
4283 int l = get_field(s->fields, l1);
4286 o->addr1 = get_address(s, 0, b1, d1);
4288 /* If the addresses are identical, this is a store/memset of zero. */
4289 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4290 o->in2 = tcg_const_i64(0);
4294 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4297 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4301 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4304 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4308 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4311 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4315 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4317 gen_op_movi_cc(s, 0);
4321 /* But in general we'll defer to a helper. */
4322 o->in2 = get_address(s, 0, b2, d2);
4323 t32 = tcg_const_i32(l);
4324 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4325 tcg_temp_free_i32(t32);
4330 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4332 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4336 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4338 int shift = s->insn->data & 0xff;
4339 int size = s->insn->data >> 8;
4340 uint64_t mask = ((1ull << size) - 1) << shift;
4343 tcg_gen_shli_i64(o->in2, o->in2, shift);
4344 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4346 /* Produce the CC from only the bits manipulated. */
4347 tcg_gen_andi_i64(cc_dst, o->out, mask);
4348 set_cc_nz_u64(s, cc_dst);
4352 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4354 o->out = tcg_const_i64(0);
4358 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4360 o->out = tcg_const_i64(0);
4366 /* ====================================================================== */
4367 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4368 the original inputs), update the various cc data structures in order to
4369 be able to compute the new condition code. */
4371 static void cout_abs32(DisasContext *s, DisasOps *o)
4373 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4376 static void cout_abs64(DisasContext *s, DisasOps *o)
4378 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4381 static void cout_adds32(DisasContext *s, DisasOps *o)
4383 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4386 static void cout_adds64(DisasContext *s, DisasOps *o)
4388 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4391 static void cout_addu32(DisasContext *s, DisasOps *o)
4393 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4396 static void cout_addu64(DisasContext *s, DisasOps *o)
4398 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4401 static void cout_addc32(DisasContext *s, DisasOps *o)
4403 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4406 static void cout_addc64(DisasContext *s, DisasOps *o)
4408 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4411 static void cout_cmps32(DisasContext *s, DisasOps *o)
4413 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4416 static void cout_cmps64(DisasContext *s, DisasOps *o)
4418 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4421 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4423 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4426 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4428 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4431 static void cout_f32(DisasContext *s, DisasOps *o)
4433 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4436 static void cout_f64(DisasContext *s, DisasOps *o)
4438 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4441 static void cout_f128(DisasContext *s, DisasOps *o)
4443 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4446 static void cout_nabs32(DisasContext *s, DisasOps *o)
4448 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4451 static void cout_nabs64(DisasContext *s, DisasOps *o)
4453 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4456 static void cout_neg32(DisasContext *s, DisasOps *o)
4458 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4461 static void cout_neg64(DisasContext *s, DisasOps *o)
4463 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4466 static void cout_nz32(DisasContext *s, DisasOps *o)
4468 tcg_gen_ext32u_i64(cc_dst, o->out);
4469 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4472 static void cout_nz64(DisasContext *s, DisasOps *o)
4474 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4477 static void cout_s32(DisasContext *s, DisasOps *o)
4479 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4482 static void cout_s64(DisasContext *s, DisasOps *o)
4484 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4487 static void cout_subs32(DisasContext *s, DisasOps *o)
4489 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4492 static void cout_subs64(DisasContext *s, DisasOps *o)
4494 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4497 static void cout_subu32(DisasContext *s, DisasOps *o)
4499 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4502 static void cout_subu64(DisasContext *s, DisasOps *o)
4504 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4507 static void cout_subb32(DisasContext *s, DisasOps *o)
4509 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4512 static void cout_subb64(DisasContext *s, DisasOps *o)
4514 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4517 static void cout_tm32(DisasContext *s, DisasOps *o)
4519 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4522 static void cout_tm64(DisasContext *s, DisasOps *o)
4524 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4527 /* ====================================================================== */
4528 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4529 with the TCG register to which we will write. Used in combination with
4530 the "wout" generators, in some cases we need a new temporary, and in
4531 some cases we can write to a TCG global. */
4533 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4535 o->out = tcg_temp_new_i64();
4537 #define SPEC_prep_new 0
4539 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4541 o->out = tcg_temp_new_i64();
4542 o->out2 = tcg_temp_new_i64();
4544 #define SPEC_prep_new_P 0
4546 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4548 o->out = regs[get_field(f, r1)];
4551 #define SPEC_prep_r1 0
4553 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4555 int r1 = get_field(f, r1);
4557 o->out2 = regs[r1 + 1];
4558 o->g_out = o->g_out2 = true;
4560 #define SPEC_prep_r1_P SPEC_r1_even
4562 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4564 o->out = fregs[get_field(f, r1)];
4567 #define SPEC_prep_f1 0
4569 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4571 int r1 = get_field(f, r1);
4573 o->out2 = fregs[r1 + 2];
4574 o->g_out = o->g_out2 = true;
4576 #define SPEC_prep_x1 SPEC_r1_f128
4578 /* ====================================================================== */
4579 /* The "Write OUTput" generators. These generally perform some non-trivial
4580 copy of data to TCG globals, or to main memory. The trivial cases are
4581 generally handled by having a "prep" generator install the TCG global
4582 as the destination of the operation. */
4584 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4586 store_reg(get_field(f, r1), o->out);
4588 #define SPEC_wout_r1 0
4590 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4592 int r1 = get_field(f, r1);
4593 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4595 #define SPEC_wout_r1_8 0
4597 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4599 int r1 = get_field(f, r1);
4600 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4602 #define SPEC_wout_r1_16 0
4604 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4606 store_reg32_i64(get_field(f, r1), o->out);
4608 #define SPEC_wout_r1_32 0
4610 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4612 store_reg32h_i64(get_field(f, r1), o->out);
4614 #define SPEC_wout_r1_32h 0
4616 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4618 int r1 = get_field(f, r1);
4619 store_reg32_i64(r1, o->out);
4620 store_reg32_i64(r1 + 1, o->out2);
4622 #define SPEC_wout_r1_P32 SPEC_r1_even
4624 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4626 int r1 = get_field(f, r1);
4627 store_reg32_i64(r1 + 1, o->out);
4628 tcg_gen_shri_i64(o->out, o->out, 32);
4629 store_reg32_i64(r1, o->out);
4631 #define SPEC_wout_r1_D32 SPEC_r1_even
4633 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4635 int r3 = get_field(f, r3);
4636 store_reg32_i64(r3, o->out);
4637 store_reg32_i64(r3 + 1, o->out2);
4639 #define SPEC_wout_r3_P32 SPEC_r3_even
4641 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
4643 int r3 = get_field(f, r3);
4644 store_reg(r3, o->out);
4645 store_reg(r3 + 1, o->out2);
4647 #define SPEC_wout_r3_P64 SPEC_r3_even
4649 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4651 store_freg32_i64(get_field(f, r1), o->out);
4653 #define SPEC_wout_e1 0
4655 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4657 store_freg(get_field(f, r1), o->out);
4659 #define SPEC_wout_f1 0
4661 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4663 int f1 = get_field(s->fields, r1);
4664 store_freg(f1, o->out);
4665 store_freg(f1 + 2, o->out2);
4667 #define SPEC_wout_x1 SPEC_r1_f128
4669 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4671 if (get_field(f, r1) != get_field(f, r2)) {
4672 store_reg32_i64(get_field(f, r1), o->out);
4675 #define SPEC_wout_cond_r1r2_32 0
4677 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4679 if (get_field(f, r1) != get_field(f, r2)) {
4680 store_freg32_i64(get_field(f, r1), o->out);
4683 #define SPEC_wout_cond_e1e2 0
4685 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4687 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4689 #define SPEC_wout_m1_8 0
4691 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4693 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4695 #define SPEC_wout_m1_16 0
4697 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4699 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4701 #define SPEC_wout_m1_32 0
4703 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4705 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4707 #define SPEC_wout_m1_64 0
4709 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4711 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4713 #define SPEC_wout_m2_32 0
4715 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4717 store_reg(get_field(f, r1), o->in2);
4719 #define SPEC_wout_in2_r1 0
4721 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4723 store_reg32_i64(get_field(f, r1), o->in2);
4725 #define SPEC_wout_in2_r1_32 0
4727 /* ====================================================================== */
4728 /* The "INput 1" generators. These load the first operand to an insn. */
4730 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4732 o->in1 = load_reg(get_field(f, r1));
4734 #define SPEC_in1_r1 0
4736 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4738 o->in1 = regs[get_field(f, r1)];
4741 #define SPEC_in1_r1_o 0
4743 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4745 o->in1 = tcg_temp_new_i64();
4746 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4748 #define SPEC_in1_r1_32s 0
4750 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4752 o->in1 = tcg_temp_new_i64();
4753 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4755 #define SPEC_in1_r1_32u 0
4757 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4759 o->in1 = tcg_temp_new_i64();
4760 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4762 #define SPEC_in1_r1_sr32 0
4764 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4766 o->in1 = load_reg(get_field(f, r1) + 1);
4768 #define SPEC_in1_r1p1 SPEC_r1_even
4770 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4772 o->in1 = tcg_temp_new_i64();
4773 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4775 #define SPEC_in1_r1p1_32s SPEC_r1_even
4777 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4779 o->in1 = tcg_temp_new_i64();
4780 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4782 #define SPEC_in1_r1p1_32u SPEC_r1_even
4784 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4786 int r1 = get_field(f, r1);
4787 o->in1 = tcg_temp_new_i64();
4788 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4790 #define SPEC_in1_r1_D32 SPEC_r1_even
4792 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4794 o->in1 = load_reg(get_field(f, r2));
4796 #define SPEC_in1_r2 0
4798 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4800 o->in1 = tcg_temp_new_i64();
4801 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
4803 #define SPEC_in1_r2_sr32 0
4805 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4807 o->in1 = load_reg(get_field(f, r3));
4809 #define SPEC_in1_r3 0
4811 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4813 o->in1 = regs[get_field(f, r3)];
4816 #define SPEC_in1_r3_o 0
4818 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4820 o->in1 = tcg_temp_new_i64();
4821 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4823 #define SPEC_in1_r3_32s 0
4825 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4827 o->in1 = tcg_temp_new_i64();
4828 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4830 #define SPEC_in1_r3_32u 0
4832 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4834 int r3 = get_field(f, r3);
4835 o->in1 = tcg_temp_new_i64();
4836 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4838 #define SPEC_in1_r3_D32 SPEC_r3_even
4840 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4842 o->in1 = load_freg32_i64(get_field(f, r1));
4844 #define SPEC_in1_e1 0
4846 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4848 o->in1 = fregs[get_field(f, r1)];
4851 #define SPEC_in1_f1_o 0
4853 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4855 int r1 = get_field(f, r1);
4857 o->out2 = fregs[r1 + 2];
4858 o->g_out = o->g_out2 = true;
4860 #define SPEC_in1_x1_o SPEC_r1_f128
4862 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4864 o->in1 = fregs[get_field(f, r3)];
4867 #define SPEC_in1_f3_o 0
4869 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4871 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4873 #define SPEC_in1_la1 0
4875 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4877 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4878 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4880 #define SPEC_in1_la2 0
4882 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4885 o->in1 = tcg_temp_new_i64();
4886 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4888 #define SPEC_in1_m1_8u 0
4890 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4893 o->in1 = tcg_temp_new_i64();
4894 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4896 #define SPEC_in1_m1_16s 0
4898 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4901 o->in1 = tcg_temp_new_i64();
4902 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4904 #define SPEC_in1_m1_16u 0
4906 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4909 o->in1 = tcg_temp_new_i64();
4910 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4912 #define SPEC_in1_m1_32s 0
4914 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4917 o->in1 = tcg_temp_new_i64();
4918 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4920 #define SPEC_in1_m1_32u 0
4922 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4925 o->in1 = tcg_temp_new_i64();
4926 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4928 #define SPEC_in1_m1_64 0
4930 /* ====================================================================== */
4931 /* The "INput 2" generators. These load the second operand to an insn. */
4933 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4935 o->in2 = regs[get_field(f, r1)];
4938 #define SPEC_in2_r1_o 0
4940 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4942 o->in2 = tcg_temp_new_i64();
4943 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4945 #define SPEC_in2_r1_16u 0
4947 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4949 o->in2 = tcg_temp_new_i64();
4950 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4952 #define SPEC_in2_r1_32u 0
4954 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4956 int r1 = get_field(f, r1);
4957 o->in2 = tcg_temp_new_i64();
4958 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4960 #define SPEC_in2_r1_D32 SPEC_r1_even
4962 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4964 o->in2 = load_reg(get_field(f, r2));
4966 #define SPEC_in2_r2 0
4968 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4970 o->in2 = regs[get_field(f, r2)];
4973 #define SPEC_in2_r2_o 0
4975 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4977 int r2 = get_field(f, r2);
4979 o->in2 = load_reg(r2);
4982 #define SPEC_in2_r2_nz 0
4984 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4986 o->in2 = tcg_temp_new_i64();
4987 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4989 #define SPEC_in2_r2_8s 0
4991 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4993 o->in2 = tcg_temp_new_i64();
4994 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4996 #define SPEC_in2_r2_8u 0
4998 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5000 o->in2 = tcg_temp_new_i64();
5001 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5003 #define SPEC_in2_r2_16s 0
5005 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5007 o->in2 = tcg_temp_new_i64();
5008 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5010 #define SPEC_in2_r2_16u 0
5012 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5014 o->in2 = load_reg(get_field(f, r3));
5016 #define SPEC_in2_r3 0
5018 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5020 o->in2 = tcg_temp_new_i64();
5021 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5023 #define SPEC_in2_r3_sr32 0
5025 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5027 o->in2 = tcg_temp_new_i64();
5028 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5030 #define SPEC_in2_r2_32s 0
5032 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5034 o->in2 = tcg_temp_new_i64();
5035 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5037 #define SPEC_in2_r2_32u 0
5039 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5041 o->in2 = tcg_temp_new_i64();
5042 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5044 #define SPEC_in2_r2_sr32 0
5046 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5048 o->in2 = load_freg32_i64(get_field(f, r2));
5050 #define SPEC_in2_e2 0
5052 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5054 o->in2 = fregs[get_field(f, r2)];
5057 #define SPEC_in2_f2_o 0
5059 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5061 int r2 = get_field(f, r2);
5063 o->in2 = fregs[r2 + 2];
5064 o->g_in1 = o->g_in2 = true;
5066 #define SPEC_in2_x2_o SPEC_r2_f128
5068 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5070 o->in2 = get_address(s, 0, get_field(f, r2), 0);
5072 #define SPEC_in2_ra2 0
5074 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5076 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5077 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5079 #define SPEC_in2_a2 0
5081 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5083 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
5085 #define SPEC_in2_ri2 0
5087 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5089 help_l2_shift(s, f, o, 31);
5091 #define SPEC_in2_sh32 0
5093 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5095 help_l2_shift(s, f, o, 63);
5097 #define SPEC_in2_sh64 0
5099 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5102 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5104 #define SPEC_in2_m2_8u 0
5106 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5109 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5111 #define SPEC_in2_m2_16s 0
5113 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5116 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5118 #define SPEC_in2_m2_16u 0
5120 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5123 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5125 #define SPEC_in2_m2_32s 0
5127 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5130 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5132 #define SPEC_in2_m2_32u 0
5134 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5137 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5139 #define SPEC_in2_m2_64 0
5141 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5144 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5146 #define SPEC_in2_mri2_16u 0
5148 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5151 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5153 #define SPEC_in2_mri2_32s 0
5155 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5158 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5160 #define SPEC_in2_mri2_32u 0
5162 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5165 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5167 #define SPEC_in2_mri2_64 0
5169 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5171 o->in2 = tcg_const_i64(get_field(f, i2));
5173 #define SPEC_in2_i2 0
5175 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5177 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5179 #define SPEC_in2_i2_8u 0
5181 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5183 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5185 #define SPEC_in2_i2_16u 0
5187 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5189 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5191 #define SPEC_in2_i2_32u 0
5193 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5195 uint64_t i2 = (uint16_t)get_field(f, i2);
5196 o->in2 = tcg_const_i64(i2 << s->insn->data);
5198 #define SPEC_in2_i2_16u_shl 0
5200 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5202 uint64_t i2 = (uint32_t)get_field(f, i2);
5203 o->in2 = tcg_const_i64(i2 << s->insn->data);
5205 #define SPEC_in2_i2_32u_shl 0
5207 #ifndef CONFIG_USER_ONLY
5208 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5210 o->in2 = tcg_const_i64(s->fields->raw_insn);
5212 #define SPEC_in2_insn 0
5215 /* ====================================================================== */
5217 /* Find opc within the table of insns. This is formulated as a switch
5218 statement so that (1) we get compile-time notice of cut-paste errors
5219 for duplicated opcodes, and (2) the compiler generates the binary
5220 search tree, rather than us having to post-process the table. */
5222 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5223 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5225 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5227 enum DisasInsnEnum {
5228 #include "insn-data.def"
5232 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5236 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5238 .help_in1 = in1_##I1, \
5239 .help_in2 = in2_##I2, \
5240 .help_prep = prep_##P, \
5241 .help_wout = wout_##W, \
5242 .help_cout = cout_##CC, \
5243 .help_op = op_##OP, \
5247 /* Allow 0 to be used for NULL in the table below. */
5255 #define SPEC_in1_0 0
5256 #define SPEC_in2_0 0
5257 #define SPEC_prep_0 0
5258 #define SPEC_wout_0 0
5260 static const DisasInsn insn_info[] = {
5261 #include "insn-data.def"
5265 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5266 case OPC: return &insn_info[insn_ ## NM];
5268 static const DisasInsn *lookup_opc(uint16_t opc)
5271 #include "insn-data.def"
5280 /* Extract a field from the insn. The INSN should be left-aligned in
5281 the uint64_t so that we can more easily utilize the big-bit-endian
5282 definitions we extract from the Principals of Operation. */
5284 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5292 /* Zero extract the field from the insn. */
5293 r = (insn << f->beg) >> (64 - f->size);
5295 /* Sign-extend, or un-swap the field as necessary. */
5297 case 0: /* unsigned */
5299 case 1: /* signed */
5300 assert(f->size <= 32);
5301 m = 1u << (f->size - 1);
5304 case 2: /* dl+dh split, signed 20 bit. */
5305 r = ((int8_t)r << 12) | (r >> 8);
5311 /* Validate that the "compressed" encoding we selected above is valid.
5312 I.e. we havn't make two different original fields overlap. */
5313 assert(((o->presentC >> f->indexC) & 1) == 0);
5314 o->presentC |= 1 << f->indexC;
5315 o->presentO |= 1 << f->indexO;
5317 o->c[f->indexC] = r;
5320 /* Lookup the insn at the current PC, extracting the operands into O and
5321 returning the info struct for the insn. Returns NULL for invalid insn. */
5323 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5326 uint64_t insn, pc = s->pc;
5328 const DisasInsn *info;
5330 if (unlikely(s->ex_value)) {
5331 /* Drop the EX data now, so that it's clear on exception paths. */
5332 TCGv_i64 zero = tcg_const_i64(0);
5333 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
5334 tcg_temp_free_i64(zero);
5336 /* Extract the values saved by EXECUTE. */
5337 insn = s->ex_value & 0xffffffffffff0000ull;
5338 ilen = s->ex_value & 0xf;
5341 insn = ld_code2(env, pc);
5342 op = (insn >> 8) & 0xff;
5343 ilen = get_ilen(op);
5349 insn = ld_code4(env, pc) << 32;
5352 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5355 g_assert_not_reached();
5358 s->next_pc = s->pc + ilen;
5361 /* We can't actually determine the insn format until we've looked up
5362 the full insn opcode. Which we can't do without locating the
5363 secondary opcode. Assume by default that OP2 is at bit 40; for
5364 those smaller insns that don't actually have a secondary opcode
5365 this will correctly result in OP2 = 0. */
5371 case 0xb2: /* S, RRF, RRE */
5372 case 0xb3: /* RRE, RRD, RRF */
5373 case 0xb9: /* RRE, RRF */
5374 case 0xe5: /* SSE, SIL */
5375 op2 = (insn << 8) >> 56;
5379 case 0xc0: /* RIL */
5380 case 0xc2: /* RIL */
5381 case 0xc4: /* RIL */
5382 case 0xc6: /* RIL */
5383 case 0xc8: /* SSF */
5384 case 0xcc: /* RIL */
5385 op2 = (insn << 12) >> 60;
5387 case 0xd0 ... 0xdf: /* SS */
5393 case 0xee ... 0xf3: /* SS */
5394 case 0xf8 ... 0xfd: /* SS */
5398 op2 = (insn << 40) >> 56;
5402 memset(f, 0, sizeof(*f));
5407 /* Lookup the instruction. */
5408 info = lookup_opc(op << 8 | op2);
5410 /* If we found it, extract the operands. */
5412 DisasFormat fmt = info->fmt;
5415 for (i = 0; i < NUM_C_FIELD; ++i) {
5416 extract_field(f, &format_info[fmt].op[i], insn);
5422 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5424 const DisasInsn *insn;
5425 ExitStatus ret = NO_EXIT;
5429 /* Search for the insn in the table. */
5430 insn = extract_insn(env, s, &f);
5432 /* Not found means unimplemented/illegal opcode. */
5434 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5436 gen_illegal_opcode(s);
5437 return EXIT_NORETURN;
5440 #ifndef CONFIG_USER_ONLY
5441 if (s->tb->flags & FLAG_MASK_PER) {
5442 TCGv_i64 addr = tcg_const_i64(s->pc);
5443 gen_helper_per_ifetch(cpu_env, addr);
5444 tcg_temp_free_i64(addr);
5448 /* Check for insn specification exceptions. */
5450 int spec = insn->spec, excp = 0, r;
5452 if (spec & SPEC_r1_even) {
5453 r = get_field(&f, r1);
5455 excp = PGM_SPECIFICATION;
5458 if (spec & SPEC_r2_even) {
5459 r = get_field(&f, r2);
5461 excp = PGM_SPECIFICATION;
5464 if (spec & SPEC_r3_even) {
5465 r = get_field(&f, r3);
5467 excp = PGM_SPECIFICATION;
5470 if (spec & SPEC_r1_f128) {
5471 r = get_field(&f, r1);
5473 excp = PGM_SPECIFICATION;
5476 if (spec & SPEC_r2_f128) {
5477 r = get_field(&f, r2);
5479 excp = PGM_SPECIFICATION;
5483 gen_program_exception(s, excp);
5484 return EXIT_NORETURN;
5488 /* Set up the strutures we use to communicate with the helpers. */
5491 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5492 TCGV_UNUSED_I64(o.out);
5493 TCGV_UNUSED_I64(o.out2);
5494 TCGV_UNUSED_I64(o.in1);
5495 TCGV_UNUSED_I64(o.in2);
5496 TCGV_UNUSED_I64(o.addr1);
5498 /* Implement the instruction. */
5499 if (insn->help_in1) {
5500 insn->help_in1(s, &f, &o);
5502 if (insn->help_in2) {
5503 insn->help_in2(s, &f, &o);
5505 if (insn->help_prep) {
5506 insn->help_prep(s, &f, &o);
5508 if (insn->help_op) {
5509 ret = insn->help_op(s, &o);
5511 if (insn->help_wout) {
5512 insn->help_wout(s, &f, &o);
5514 if (insn->help_cout) {
5515 insn->help_cout(s, &o);
5518 /* Free any temporaries created by the helpers. */
5519 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5520 tcg_temp_free_i64(o.out);
5522 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5523 tcg_temp_free_i64(o.out2);
5525 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5526 tcg_temp_free_i64(o.in1);
5528 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5529 tcg_temp_free_i64(o.in2);
5531 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5532 tcg_temp_free_i64(o.addr1);
5535 #ifndef CONFIG_USER_ONLY
5536 if (s->tb->flags & FLAG_MASK_PER) {
5537 /* An exception might be triggered, save PSW if not already done. */
5538 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
5539 tcg_gen_movi_i64(psw_addr, s->next_pc);
5545 /* Call the helper to check for a possible PER exception. */
5546 gen_helper_per_check_exception(cpu_env);
5550 /* Advance to the next instruction. */
5555 void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
5557 S390CPU *cpu = s390_env_get_cpu(env);
5558 CPUState *cs = CPU(cpu);
5560 target_ulong pc_start;
5561 uint64_t next_page_start;
5562 int num_insns, max_insns;
5569 if (!(tb->flags & FLAG_MASK_64)) {
5570 pc_start &= 0x7fffffff;
5575 dc.cc_op = CC_OP_DYNAMIC;
5576 dc.ex_value = tb->cs_base;
5577 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5579 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5582 max_insns = tb->cflags & CF_COUNT_MASK;
5583 if (max_insns == 0) {
5584 max_insns = CF_COUNT_MASK;
5586 if (max_insns > TCG_MAX_INSNS) {
5587 max_insns = TCG_MAX_INSNS;
5593 tcg_gen_insn_start(dc.pc, dc.cc_op);
5596 if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
5597 status = EXIT_PC_STALE;
5599 /* The address covered by the breakpoint must be included in
5600 [tb->pc, tb->pc + tb->size) in order to for it to be
5601 properly cleared -- thus we increment the PC here so that
5602 the logic setting tb->size below does the right thing. */
5607 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5611 status = translate_one(env, &dc);
5613 /* If we reach a page boundary, are single stepping,
5614 or exhaust instruction count, stop generation. */
5615 if (status == NO_EXIT
5616 && (dc.pc >= next_page_start
5617 || tcg_op_buf_full()
5618 || num_insns >= max_insns
5620 || cs->singlestep_enabled
5622 status = EXIT_PC_STALE;
5624 } while (status == NO_EXIT);
5626 if (tb->cflags & CF_LAST_IO) {
5635 update_psw_addr(&dc);
5637 case EXIT_PC_UPDATED:
5638 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5639 cc op type is in env */
5642 case EXIT_PC_CC_UPDATED:
5643 /* Exit the TB, either by raising a debug exception or by return. */
5645 gen_exception(EXCP_DEBUG);
5646 } else if (use_exit_tb(&dc)) {
5649 tcg_gen_lookup_and_goto_ptr(psw_addr);
5656 gen_tb_end(tb, num_insns);
5658 tb->size = dc.pc - pc_start;
5659 tb->icount = num_insns;
5661 #if defined(S390X_DEBUG_DISAS)
5662 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5663 && qemu_log_in_addr_range(pc_start)) {
5665 if (unlikely(dc.ex_value)) {
5666 /* ??? Unfortunately log_target_disas can't use host memory. */
5667 qemu_log("IN: EXECUTE %016" PRIx64 "\n", dc.ex_value);
5669 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5670 log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
5678 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
5681 int cc_op = data[1];
5682 env->psw.addr = data[0];
5683 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {