2 * HPPA emulation cpu translation for qemu.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
34 typedef struct DisasCond {
41 typedef struct DisasContext {
42 struct TranslationBlock *tb;
56 bool singlestep_enabled;
60 /* Return values from translate_one, indicating the state of the TB.
61 Note that zero indicates that we are not exiting the TB. */
66 /* We have emitted one or more goto_tb. No fixup required. */
69 /* We are not using a goto_tb (for whatever reason), but have updated
70 the iaq (for whatever reason), so don't do it again on exit. */
73 /* We are exiting the TB, but have neither emitted a goto_tb, nor
74 updated the iaq for the next instruction to be executed. */
77 /* We are ending the TB with a noreturn function call, e.g. longjmp.
78 No following code will be executed. */
82 typedef struct DisasInsn {
84 ExitStatus (*trans)(DisasContext *ctx, uint32_t insn,
85 const struct DisasInsn *f);
87 void (*f_ttt)(TCGv, TCGv, TCGv);
88 void (*f_weww)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32);
89 void (*f_dedd)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64);
90 void (*f_wew)(TCGv_i32, TCGv_env, TCGv_i32);
91 void (*f_ded)(TCGv_i64, TCGv_env, TCGv_i64);
92 void (*f_wed)(TCGv_i32, TCGv_env, TCGv_i64);
93 void (*f_dew)(TCGv_i64, TCGv_env, TCGv_i32);
97 /* global register indexes */
98 static TCGv_env cpu_env;
99 static TCGv cpu_gr[32];
100 static TCGv cpu_iaoq_f;
101 static TCGv cpu_iaoq_b;
103 static TCGv cpu_psw_n;
104 static TCGv cpu_psw_v;
105 static TCGv cpu_psw_cb;
106 static TCGv cpu_psw_cb_msb;
107 static TCGv cpu_cr26;
108 static TCGv cpu_cr27;
110 #include "exec/gen-icount.h"
112 void hppa_translate_init(void)
114 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
116 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
117 static const GlobalVar vars[] = {
131 /* Use the symbolic register names that match the disassembler. */
132 static const char gr_names[32][4] = {
133 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
134 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
135 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
136 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
139 static bool done_init = 0;
147 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
148 tcg_ctx.tcg_env = cpu_env;
150 TCGV_UNUSED(cpu_gr[0]);
151 for (i = 1; i < 32; i++) {
152 cpu_gr[i] = tcg_global_mem_new(cpu_env,
153 offsetof(CPUHPPAState, gr[i]),
157 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
158 const GlobalVar *v = &vars[i];
159 *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
163 static DisasCond cond_make_f(void)
165 DisasCond r = { .c = TCG_COND_NEVER };
171 static DisasCond cond_make_n(void)
173 DisasCond r = { .c = TCG_COND_NE, .a0_is_n = true, .a1_is_0 = true };
179 static DisasCond cond_make_0(TCGCond c, TCGv a0)
181 DisasCond r = { .c = c, .a1_is_0 = true };
183 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
184 r.a0 = tcg_temp_new();
185 tcg_gen_mov_tl(r.a0, a0);
191 static DisasCond cond_make(TCGCond c, TCGv a0, TCGv a1)
193 DisasCond r = { .c = c };
195 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
196 r.a0 = tcg_temp_new();
197 tcg_gen_mov_tl(r.a0, a0);
198 r.a1 = tcg_temp_new();
199 tcg_gen_mov_tl(r.a1, a1);
204 static void cond_prep(DisasCond *cond)
207 cond->a1_is_0 = false;
208 cond->a1 = tcg_const_tl(0);
212 static void cond_free(DisasCond *cond)
216 if (!cond->a0_is_n) {
217 tcg_temp_free(cond->a0);
219 if (!cond->a1_is_0) {
220 tcg_temp_free(cond->a1);
222 cond->a0_is_n = false;
223 cond->a1_is_0 = false;
224 TCGV_UNUSED(cond->a0);
225 TCGV_UNUSED(cond->a1);
227 case TCG_COND_ALWAYS:
228 cond->c = TCG_COND_NEVER;
235 static TCGv get_temp(DisasContext *ctx)
237 unsigned i = ctx->ntemps++;
238 g_assert(i < ARRAY_SIZE(ctx->temps));
239 return ctx->temps[i] = tcg_temp_new();
242 static TCGv load_const(DisasContext *ctx, target_long v)
244 TCGv t = get_temp(ctx);
245 tcg_gen_movi_tl(t, v);
249 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
252 TCGv t = get_temp(ctx);
253 tcg_gen_movi_tl(t, 0);
260 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
262 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
263 return get_temp(ctx);
269 static void save_or_nullify(DisasContext *ctx, TCGv dest, TCGv t)
271 if (ctx->null_cond.c != TCG_COND_NEVER) {
272 cond_prep(&ctx->null_cond);
273 tcg_gen_movcond_tl(ctx->null_cond.c, dest, ctx->null_cond.a0,
274 ctx->null_cond.a1, dest, t);
276 tcg_gen_mov_tl(dest, t);
280 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv t)
283 save_or_nullify(ctx, cpu_gr[reg], t);
287 #ifdef HOST_WORDS_BIGENDIAN
295 static TCGv_i32 load_frw_i32(unsigned rt)
297 TCGv_i32 ret = tcg_temp_new_i32();
298 tcg_gen_ld_i32(ret, cpu_env,
299 offsetof(CPUHPPAState, fr[rt & 31])
300 + (rt & 32 ? LO_OFS : HI_OFS));
304 static TCGv_i32 load_frw0_i32(unsigned rt)
307 return tcg_const_i32(0);
309 return load_frw_i32(rt);
313 static TCGv_i64 load_frw0_i64(unsigned rt)
316 return tcg_const_i64(0);
318 TCGv_i64 ret = tcg_temp_new_i64();
319 tcg_gen_ld32u_i64(ret, cpu_env,
320 offsetof(CPUHPPAState, fr[rt & 31])
321 + (rt & 32 ? LO_OFS : HI_OFS));
326 static void save_frw_i32(unsigned rt, TCGv_i32 val)
328 tcg_gen_st_i32(val, cpu_env,
329 offsetof(CPUHPPAState, fr[rt & 31])
330 + (rt & 32 ? LO_OFS : HI_OFS));
336 static TCGv_i64 load_frd(unsigned rt)
338 TCGv_i64 ret = tcg_temp_new_i64();
339 tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
343 static TCGv_i64 load_frd0(unsigned rt)
346 return tcg_const_i64(0);
352 static void save_frd(unsigned rt, TCGv_i64 val)
354 tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
357 /* Skip over the implementation of an insn that has been nullified.
358 Use this when the insn is too complex for a conditional move. */
359 static void nullify_over(DisasContext *ctx)
361 if (ctx->null_cond.c != TCG_COND_NEVER) {
362 /* The always condition should have been handled in the main loop. */
363 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
365 ctx->null_lab = gen_new_label();
366 cond_prep(&ctx->null_cond);
368 /* If we're using PSW[N], copy it to a temp because... */
369 if (ctx->null_cond.a0_is_n) {
370 ctx->null_cond.a0_is_n = false;
371 ctx->null_cond.a0 = tcg_temp_new();
372 tcg_gen_mov_tl(ctx->null_cond.a0, cpu_psw_n);
374 /* ... we clear it before branching over the implementation,
375 so that (1) it's clear after nullifying this insn and
376 (2) if this insn nullifies the next, PSW[N] is valid. */
377 if (ctx->psw_n_nonzero) {
378 ctx->psw_n_nonzero = false;
379 tcg_gen_movi_tl(cpu_psw_n, 0);
382 tcg_gen_brcond_tl(ctx->null_cond.c, ctx->null_cond.a0,
383 ctx->null_cond.a1, ctx->null_lab);
384 cond_free(&ctx->null_cond);
388 /* Save the current nullification state to PSW[N]. */
389 static void nullify_save(DisasContext *ctx)
391 if (ctx->null_cond.c == TCG_COND_NEVER) {
392 if (ctx->psw_n_nonzero) {
393 tcg_gen_movi_tl(cpu_psw_n, 0);
397 if (!ctx->null_cond.a0_is_n) {
398 cond_prep(&ctx->null_cond);
399 tcg_gen_setcond_tl(ctx->null_cond.c, cpu_psw_n,
400 ctx->null_cond.a0, ctx->null_cond.a1);
401 ctx->psw_n_nonzero = true;
403 cond_free(&ctx->null_cond);
406 /* Set a PSW[N] to X. The intention is that this is used immediately
407 before a goto_tb/exit_tb, so that there is no fallthru path to other
408 code within the TB. Therefore we do not update psw_n_nonzero. */
409 static void nullify_set(DisasContext *ctx, bool x)
411 if (ctx->psw_n_nonzero || x) {
412 tcg_gen_movi_tl(cpu_psw_n, x);
416 /* Mark the end of an instruction that may have been nullified.
417 This is the pair to nullify_over. */
418 static ExitStatus nullify_end(DisasContext *ctx, ExitStatus status)
420 TCGLabel *null_lab = ctx->null_lab;
422 if (likely(null_lab == NULL)) {
423 /* The current insn wasn't conditional or handled the condition
424 applied to it without a branch, so the (new) setting of
425 NULL_COND can be applied directly to the next insn. */
428 ctx->null_lab = NULL;
430 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
431 /* The next instruction will be unconditional,
432 and NULL_COND already reflects that. */
433 gen_set_label(null_lab);
435 /* The insn that we just executed is itself nullifying the next
436 instruction. Store the condition in the PSW[N] global.
437 We asserted PSW[N] = 0 in nullify_over, so that after the
438 label we have the proper value in place. */
440 gen_set_label(null_lab);
441 ctx->null_cond = cond_make_n();
444 assert(status != EXIT_GOTO_TB && status != EXIT_IAQ_N_UPDATED);
445 if (status == EXIT_NORETURN) {
451 static void copy_iaoq_entry(TCGv dest, target_ulong ival, TCGv vval)
453 if (unlikely(ival == -1)) {
454 tcg_gen_mov_tl(dest, vval);
456 tcg_gen_movi_tl(dest, ival);
460 static inline target_ulong iaoq_dest(DisasContext *ctx, target_long disp)
462 return ctx->iaoq_f + disp + 8;
465 static void gen_excp_1(int exception)
467 TCGv_i32 t = tcg_const_i32(exception);
468 gen_helper_excp(cpu_env, t);
469 tcg_temp_free_i32(t);
472 static ExitStatus gen_excp(DisasContext *ctx, int exception)
474 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
475 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
477 gen_excp_1(exception);
478 return EXIT_NORETURN;
481 static ExitStatus gen_illegal(DisasContext *ctx)
484 return nullify_end(ctx, gen_excp(ctx, EXCP_SIGILL));
487 static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
489 /* Suppress goto_tb in the case of single-steping and IO. */
490 if ((ctx->tb->cflags & CF_LAST_IO) || ctx->singlestep_enabled) {
496 /* If the next insn is to be nullified, and it's on the same page,
497 and we're not attempting to set a breakpoint on it, then we can
498 totally skip the nullified insn. This avoids creating and
499 executing a TB that merely branches to the next TB. */
500 static bool use_nullify_skip(DisasContext *ctx)
502 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
503 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
506 static void gen_goto_tb(DisasContext *ctx, int which,
507 target_ulong f, target_ulong b)
509 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
510 tcg_gen_goto_tb(which);
511 tcg_gen_movi_tl(cpu_iaoq_f, f);
512 tcg_gen_movi_tl(cpu_iaoq_b, b);
513 tcg_gen_exit_tb((uintptr_t)ctx->tb + which);
515 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
516 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
517 if (ctx->singlestep_enabled) {
518 gen_excp_1(EXCP_DEBUG);
525 /* PA has a habit of taking the LSB of a field and using that as the sign,
526 with the rest of the field becoming the least significant bits. */
527 static target_long low_sextract(uint32_t val, int pos, int len)
529 target_ulong x = -(target_ulong)extract32(val, pos, 1);
530 x = (x << (len - 1)) | extract32(val, pos + 1, len - 1);
534 static unsigned assemble_rt64(uint32_t insn)
536 unsigned r1 = extract32(insn, 6, 1);
537 unsigned r0 = extract32(insn, 0, 5);
541 static unsigned assemble_ra64(uint32_t insn)
543 unsigned r1 = extract32(insn, 7, 1);
544 unsigned r0 = extract32(insn, 21, 5);
548 static unsigned assemble_rb64(uint32_t insn)
550 unsigned r1 = extract32(insn, 12, 1);
551 unsigned r0 = extract32(insn, 16, 5);
555 static unsigned assemble_rc64(uint32_t insn)
557 unsigned r2 = extract32(insn, 8, 1);
558 unsigned r1 = extract32(insn, 13, 3);
559 unsigned r0 = extract32(insn, 9, 2);
560 return r2 * 32 + r1 * 4 + r0;
563 static target_long assemble_12(uint32_t insn)
565 target_ulong x = -(target_ulong)(insn & 1);
566 x = (x << 1) | extract32(insn, 2, 1);
567 x = (x << 10) | extract32(insn, 3, 10);
571 static target_long assemble_16(uint32_t insn)
573 /* Take the name from PA2.0, which produces a 16-bit number
574 only with wide mode; otherwise a 14-bit number. Since we don't
575 implement wide mode, this is always the 14-bit number. */
576 return low_sextract(insn, 0, 14);
579 static target_long assemble_16a(uint32_t insn)
581 /* Take the name from PA2.0, which produces a 14-bit shifted number
582 only with wide mode; otherwise a 12-bit shifted number. Since we
583 don't implement wide mode, this is always the 12-bit number. */
584 target_ulong x = -(target_ulong)(insn & 1);
585 x = (x << 11) | extract32(insn, 2, 11);
589 static target_long assemble_17(uint32_t insn)
591 target_ulong x = -(target_ulong)(insn & 1);
592 x = (x << 5) | extract32(insn, 16, 5);
593 x = (x << 1) | extract32(insn, 2, 1);
594 x = (x << 10) | extract32(insn, 3, 10);
598 static target_long assemble_21(uint32_t insn)
600 target_ulong x = -(target_ulong)(insn & 1);
601 x = (x << 11) | extract32(insn, 1, 11);
602 x = (x << 2) | extract32(insn, 14, 2);
603 x = (x << 5) | extract32(insn, 16, 5);
604 x = (x << 2) | extract32(insn, 12, 2);
608 static target_long assemble_22(uint32_t insn)
610 target_ulong x = -(target_ulong)(insn & 1);
611 x = (x << 10) | extract32(insn, 16, 10);
612 x = (x << 1) | extract32(insn, 2, 1);
613 x = (x << 10) | extract32(insn, 3, 10);
617 /* The parisc documentation describes only the general interpretation of
618 the conditions, without describing their exact implementation. The
619 interpretations do not stand up well when considering ADD,C and SUB,B.
620 However, considering the Addition, Subtraction and Logical conditions
621 as a whole it would appear that these relations are similar to what
622 a traditional NZCV set of flags would produce. */
624 static DisasCond do_cond(unsigned cf, TCGv res, TCGv cb_msb, TCGv sv)
630 case 0: /* Never / TR */
631 cond = cond_make_f();
633 case 1: /* = / <> (Z / !Z) */
634 cond = cond_make_0(TCG_COND_EQ, res);
636 case 2: /* < / >= (N / !N) */
637 cond = cond_make_0(TCG_COND_LT, res);
639 case 3: /* <= / > (N | Z / !N & !Z) */
640 cond = cond_make_0(TCG_COND_LE, res);
642 case 4: /* NUV / UV (!C / C) */
643 cond = cond_make_0(TCG_COND_EQ, cb_msb);
645 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
646 tmp = tcg_temp_new();
647 tcg_gen_neg_tl(tmp, cb_msb);
648 tcg_gen_and_tl(tmp, tmp, res);
649 cond = cond_make_0(TCG_COND_EQ, tmp);
652 case 6: /* SV / NSV (V / !V) */
653 cond = cond_make_0(TCG_COND_LT, sv);
655 case 7: /* OD / EV */
656 tmp = tcg_temp_new();
657 tcg_gen_andi_tl(tmp, res, 1);
658 cond = cond_make_0(TCG_COND_NE, tmp);
662 g_assert_not_reached();
665 cond.c = tcg_invert_cond(cond.c);
671 /* Similar, but for the special case of subtraction without borrow, we
672 can use the inputs directly. This can allow other computation to be
673 deleted as unused. */
675 static DisasCond do_sub_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2, TCGv sv)
681 cond = cond_make(TCG_COND_EQ, in1, in2);
684 cond = cond_make(TCG_COND_LT, in1, in2);
687 cond = cond_make(TCG_COND_LE, in1, in2);
689 case 4: /* << / >>= */
690 cond = cond_make(TCG_COND_LTU, in1, in2);
692 case 5: /* <<= / >> */
693 cond = cond_make(TCG_COND_LEU, in1, in2);
696 return do_cond(cf, res, sv, sv);
699 cond.c = tcg_invert_cond(cond.c);
705 /* Similar, but for logicals, where the carry and overflow bits are not
706 computed, and use of them is undefined. */
708 static DisasCond do_log_cond(unsigned cf, TCGv res)
711 case 4: case 5: case 6:
715 return do_cond(cf, res, res, res);
718 /* Similar, but for shift/extract/deposit conditions. */
720 static DisasCond do_sed_cond(unsigned orig, TCGv res)
724 /* Convert the compressed condition codes to standard.
725 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
726 4-7 are the reverse of 0-3. */
733 return do_log_cond(c * 2 + f, res);
736 /* Similar, but for unit conditions. */
738 static DisasCond do_unit_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2)
745 /* Since we want to test lots of carry-out bits all at once, do not
746 * do our normal thing and compute carry-in of bit B+1 since that
747 * leaves us with carry bits spread across two words.
750 tmp = tcg_temp_new();
751 tcg_gen_or_tl(cb, in1, in2);
752 tcg_gen_and_tl(tmp, in1, in2);
753 tcg_gen_andc_tl(cb, cb, res);
754 tcg_gen_or_tl(cb, cb, tmp);
759 case 0: /* never / TR */
760 case 1: /* undefined */
761 case 5: /* undefined */
762 cond = cond_make_f();
765 case 2: /* SBZ / NBZ */
766 /* See hasless(v,1) from
767 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
769 tmp = tcg_temp_new();
770 tcg_gen_subi_tl(tmp, res, 0x01010101u);
771 tcg_gen_andc_tl(tmp, tmp, res);
772 tcg_gen_andi_tl(tmp, tmp, 0x80808080u);
773 cond = cond_make_0(TCG_COND_NE, tmp);
777 case 3: /* SHZ / NHZ */
778 tmp = tcg_temp_new();
779 tcg_gen_subi_tl(tmp, res, 0x00010001u);
780 tcg_gen_andc_tl(tmp, tmp, res);
781 tcg_gen_andi_tl(tmp, tmp, 0x80008000u);
782 cond = cond_make_0(TCG_COND_NE, tmp);
786 case 4: /* SDC / NDC */
787 tcg_gen_andi_tl(cb, cb, 0x88888888u);
788 cond = cond_make_0(TCG_COND_NE, cb);
791 case 6: /* SBC / NBC */
792 tcg_gen_andi_tl(cb, cb, 0x80808080u);
793 cond = cond_make_0(TCG_COND_NE, cb);
796 case 7: /* SHC / NHC */
797 tcg_gen_andi_tl(cb, cb, 0x80008000u);
798 cond = cond_make_0(TCG_COND_NE, cb);
802 g_assert_not_reached();
808 cond.c = tcg_invert_cond(cond.c);
814 /* Compute signed overflow for addition. */
815 static TCGv do_add_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
817 TCGv sv = get_temp(ctx);
818 TCGv tmp = tcg_temp_new();
820 tcg_gen_xor_tl(sv, res, in1);
821 tcg_gen_xor_tl(tmp, in1, in2);
822 tcg_gen_andc_tl(sv, sv, tmp);
828 /* Compute signed overflow for subtraction. */
829 static TCGv do_sub_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
831 TCGv sv = get_temp(ctx);
832 TCGv tmp = tcg_temp_new();
834 tcg_gen_xor_tl(sv, res, in1);
835 tcg_gen_xor_tl(tmp, in1, in2);
836 tcg_gen_and_tl(sv, sv, tmp);
842 static ExitStatus do_add(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
843 unsigned shift, bool is_l, bool is_tsv, bool is_tc,
844 bool is_c, unsigned cf)
846 TCGv dest, cb, cb_msb, sv, tmp;
847 unsigned c = cf >> 1;
850 dest = tcg_temp_new();
856 tcg_gen_shli_tl(tmp, in1, shift);
860 if (!is_l || c == 4 || c == 5) {
861 TCGv zero = tcg_const_tl(0);
862 cb_msb = get_temp(ctx);
863 tcg_gen_add2_tl(dest, cb_msb, in1, zero, in2, zero);
865 tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
870 tcg_gen_xor_tl(cb, in1, in2);
871 tcg_gen_xor_tl(cb, cb, dest);
874 tcg_gen_add_tl(dest, in1, in2);
876 tcg_gen_add_tl(dest, dest, cpu_psw_cb_msb);
880 /* Compute signed overflow if required. */
882 if (is_tsv || c == 6) {
883 sv = do_add_sv(ctx, dest, in1, in2);
885 /* ??? Need to include overflow from shift. */
886 gen_helper_tsv(cpu_env, sv);
890 /* Emit any conditional trap before any writeback. */
891 cond = do_cond(cf, dest, cb_msb, sv);
894 tmp = tcg_temp_new();
895 tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
896 gen_helper_tcond(cpu_env, tmp);
900 /* Write back the result. */
902 save_or_nullify(ctx, cpu_psw_cb, cb);
903 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
905 save_gpr(ctx, rt, dest);
908 /* Install the new nullification. */
909 cond_free(&ctx->null_cond);
910 ctx->null_cond = cond;
914 static ExitStatus do_sub(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
915 bool is_tsv, bool is_b, bool is_tc, unsigned cf)
917 TCGv dest, sv, cb, cb_msb, zero, tmp;
918 unsigned c = cf >> 1;
921 dest = tcg_temp_new();
923 cb_msb = tcg_temp_new();
925 zero = tcg_const_tl(0);
927 /* DEST,C = IN1 + ~IN2 + C. */
928 tcg_gen_not_tl(cb, in2);
929 tcg_gen_add2_tl(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
930 tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cb, zero);
931 tcg_gen_xor_tl(cb, cb, in1);
932 tcg_gen_xor_tl(cb, cb, dest);
934 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
935 operations by seeding the high word with 1 and subtracting. */
936 tcg_gen_movi_tl(cb_msb, 1);
937 tcg_gen_sub2_tl(dest, cb_msb, in1, cb_msb, in2, zero);
938 tcg_gen_eqv_tl(cb, in1, in2);
939 tcg_gen_xor_tl(cb, cb, dest);
943 /* Compute signed overflow if required. */
945 if (is_tsv || c == 6) {
946 sv = do_sub_sv(ctx, dest, in1, in2);
948 gen_helper_tsv(cpu_env, sv);
952 /* Compute the condition. We cannot use the special case for borrow. */
954 cond = do_sub_cond(cf, dest, in1, in2, sv);
956 cond = do_cond(cf, dest, cb_msb, sv);
959 /* Emit any conditional trap before any writeback. */
962 tmp = tcg_temp_new();
963 tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
964 gen_helper_tcond(cpu_env, tmp);
968 /* Write back the result. */
969 save_or_nullify(ctx, cpu_psw_cb, cb);
970 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
971 save_gpr(ctx, rt, dest);
974 /* Install the new nullification. */
975 cond_free(&ctx->null_cond);
976 ctx->null_cond = cond;
980 static ExitStatus do_cmpclr(DisasContext *ctx, unsigned rt, TCGv in1,
981 TCGv in2, unsigned cf)
986 dest = tcg_temp_new();
987 tcg_gen_sub_tl(dest, in1, in2);
989 /* Compute signed overflow if required. */
991 if ((cf >> 1) == 6) {
992 sv = do_sub_sv(ctx, dest, in1, in2);
995 /* Form the condition for the compare. */
996 cond = do_sub_cond(cf, dest, in1, in2, sv);
999 tcg_gen_movi_tl(dest, 0);
1000 save_gpr(ctx, rt, dest);
1001 tcg_temp_free(dest);
1003 /* Install the new nullification. */
1004 cond_free(&ctx->null_cond);
1005 ctx->null_cond = cond;
1009 static ExitStatus do_log(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
1010 unsigned cf, void (*fn)(TCGv, TCGv, TCGv))
1012 TCGv dest = dest_gpr(ctx, rt);
1014 /* Perform the operation, and writeback. */
1016 save_gpr(ctx, rt, dest);
1018 /* Install the new nullification. */
1019 cond_free(&ctx->null_cond);
1021 ctx->null_cond = do_log_cond(cf, dest);
1026 static ExitStatus do_unit(DisasContext *ctx, unsigned rt, TCGv in1,
1027 TCGv in2, unsigned cf, bool is_tc,
1028 void (*fn)(TCGv, TCGv, TCGv))
1034 dest = dest_gpr(ctx, rt);
1036 save_gpr(ctx, rt, dest);
1037 cond_free(&ctx->null_cond);
1039 dest = tcg_temp_new();
1042 cond = do_unit_cond(cf, dest, in1, in2);
1045 TCGv tmp = tcg_temp_new();
1047 tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
1048 gen_helper_tcond(cpu_env, tmp);
1051 save_gpr(ctx, rt, dest);
1053 cond_free(&ctx->null_cond);
1054 ctx->null_cond = cond;
1059 /* Emit a memory load. The modify parameter should be
1060 * < 0 for pre-modify,
1061 * > 0 for post-modify,
1062 * = 0 for no base register update.
1064 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1065 unsigned rx, int scale, target_long disp,
1066 int modify, TCGMemOp mop)
1070 /* Caller uses nullify_over/nullify_end. */
1071 assert(ctx->null_cond.c == TCG_COND_NEVER);
1073 addr = tcg_temp_new();
1074 base = load_gpr(ctx, rb);
1076 /* Note that RX is mutually exclusive with DISP. */
1078 tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1079 tcg_gen_add_tl(addr, addr, base);
1081 tcg_gen_addi_tl(addr, base, disp);
1085 tcg_gen_qemu_ld_i32(dest, addr, MMU_USER_IDX, mop);
1087 tcg_gen_qemu_ld_i32(dest, (modify < 0 ? addr : base),
1089 save_gpr(ctx, rb, addr);
1091 tcg_temp_free(addr);
1094 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1095 unsigned rx, int scale, target_long disp,
1096 int modify, TCGMemOp mop)
1100 /* Caller uses nullify_over/nullify_end. */
1101 assert(ctx->null_cond.c == TCG_COND_NEVER);
1103 addr = tcg_temp_new();
1104 base = load_gpr(ctx, rb);
1106 /* Note that RX is mutually exclusive with DISP. */
1108 tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1109 tcg_gen_add_tl(addr, addr, base);
1111 tcg_gen_addi_tl(addr, base, disp);
1115 tcg_gen_qemu_ld_i64(dest, addr, MMU_USER_IDX, mop);
1117 tcg_gen_qemu_ld_i64(dest, (modify < 0 ? addr : base),
1119 save_gpr(ctx, rb, addr);
1121 tcg_temp_free(addr);
1124 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1125 unsigned rx, int scale, target_long disp,
1126 int modify, TCGMemOp mop)
1130 /* Caller uses nullify_over/nullify_end. */
1131 assert(ctx->null_cond.c == TCG_COND_NEVER);
1133 addr = tcg_temp_new();
1134 base = load_gpr(ctx, rb);
1136 /* Note that RX is mutually exclusive with DISP. */
1138 tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1139 tcg_gen_add_tl(addr, addr, base);
1141 tcg_gen_addi_tl(addr, base, disp);
1144 tcg_gen_qemu_st_i32(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop);
1147 save_gpr(ctx, rb, addr);
1149 tcg_temp_free(addr);
1152 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1153 unsigned rx, int scale, target_long disp,
1154 int modify, TCGMemOp mop)
1158 /* Caller uses nullify_over/nullify_end. */
1159 assert(ctx->null_cond.c == TCG_COND_NEVER);
1161 addr = tcg_temp_new();
1162 base = load_gpr(ctx, rb);
1164 /* Note that RX is mutually exclusive with DISP. */
1166 tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1167 tcg_gen_add_tl(addr, addr, base);
1169 tcg_gen_addi_tl(addr, base, disp);
1172 tcg_gen_qemu_st_i64(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop);
1175 save_gpr(ctx, rb, addr);
1177 tcg_temp_free(addr);
1180 #if TARGET_LONG_BITS == 64
1181 #define do_load_tl do_load_64
1182 #define do_store_tl do_store_64
1184 #define do_load_tl do_load_32
1185 #define do_store_tl do_store_32
1188 static ExitStatus do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1189 unsigned rx, int scale, target_long disp,
1190 int modify, TCGMemOp mop)
1197 /* No base register update. */
1198 dest = dest_gpr(ctx, rt);
1200 /* Make sure if RT == RB, we see the result of the load. */
1201 dest = get_temp(ctx);
1203 do_load_tl(ctx, dest, rb, rx, scale, disp, modify, mop);
1204 save_gpr(ctx, rt, dest);
1206 return nullify_end(ctx, NO_EXIT);
1209 static ExitStatus do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1210 unsigned rx, int scale, target_long disp,
1217 tmp = tcg_temp_new_i32();
1218 do_load_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
1219 save_frw_i32(rt, tmp);
1220 tcg_temp_free_i32(tmp);
1223 gen_helper_loaded_fr0(cpu_env);
1226 return nullify_end(ctx, NO_EXIT);
1229 static ExitStatus do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1230 unsigned rx, int scale, target_long disp,
1237 tmp = tcg_temp_new_i64();
1238 do_load_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
1240 tcg_temp_free_i64(tmp);
1243 gen_helper_loaded_fr0(cpu_env);
1246 return nullify_end(ctx, NO_EXIT);
1249 static ExitStatus do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1250 target_long disp, int modify, TCGMemOp mop)
1253 do_store_tl(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, modify, mop);
1254 return nullify_end(ctx, NO_EXIT);
1257 static ExitStatus do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1258 unsigned rx, int scale, target_long disp,
1265 tmp = load_frw_i32(rt);
1266 do_store_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
1267 tcg_temp_free_i32(tmp);
1269 return nullify_end(ctx, NO_EXIT);
1272 static ExitStatus do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1273 unsigned rx, int scale, target_long disp,
1281 do_store_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
1282 tcg_temp_free_i64(tmp);
1284 return nullify_end(ctx, NO_EXIT);
1287 static ExitStatus do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1288 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1293 tmp = load_frw0_i32(ra);
1295 func(tmp, cpu_env, tmp);
1297 save_frw_i32(rt, tmp);
1298 tcg_temp_free_i32(tmp);
1299 return nullify_end(ctx, NO_EXIT);
1302 static ExitStatus do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1303 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1310 dst = tcg_temp_new_i32();
1312 func(dst, cpu_env, src);
1314 tcg_temp_free_i64(src);
1315 save_frw_i32(rt, dst);
1316 tcg_temp_free_i32(dst);
1317 return nullify_end(ctx, NO_EXIT);
1320 static ExitStatus do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1321 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1326 tmp = load_frd0(ra);
1328 func(tmp, cpu_env, tmp);
1331 tcg_temp_free_i64(tmp);
1332 return nullify_end(ctx, NO_EXIT);
1335 static ExitStatus do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1336 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1342 src = load_frw0_i32(ra);
1343 dst = tcg_temp_new_i64();
1345 func(dst, cpu_env, src);
1347 tcg_temp_free_i32(src);
1349 tcg_temp_free_i64(dst);
1350 return nullify_end(ctx, NO_EXIT);
1353 static ExitStatus do_fop_weww(DisasContext *ctx, unsigned rt,
1354 unsigned ra, unsigned rb,
1355 void (*func)(TCGv_i32, TCGv_env,
1356 TCGv_i32, TCGv_i32))
1361 a = load_frw0_i32(ra);
1362 b = load_frw0_i32(rb);
1364 func(a, cpu_env, a, b);
1366 tcg_temp_free_i32(b);
1367 save_frw_i32(rt, a);
1368 tcg_temp_free_i32(a);
1369 return nullify_end(ctx, NO_EXIT);
1372 static ExitStatus do_fop_dedd(DisasContext *ctx, unsigned rt,
1373 unsigned ra, unsigned rb,
1374 void (*func)(TCGv_i64, TCGv_env,
1375 TCGv_i64, TCGv_i64))
1383 func(a, cpu_env, a, b);
1385 tcg_temp_free_i64(b);
1387 tcg_temp_free_i64(a);
1388 return nullify_end(ctx, NO_EXIT);
1391 /* Emit an unconditional branch to a direct target, which may or may not
1392 have already had nullification handled. */
1393 static ExitStatus do_dbranch(DisasContext *ctx, target_ulong dest,
1394 unsigned link, bool is_n)
1396 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1398 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1402 ctx->null_cond.c = TCG_COND_ALWAYS;
1409 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1412 if (is_n && use_nullify_skip(ctx)) {
1413 nullify_set(ctx, 0);
1414 gen_goto_tb(ctx, 0, dest, dest + 4);
1416 nullify_set(ctx, is_n);
1417 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1420 nullify_end(ctx, NO_EXIT);
1422 nullify_set(ctx, 0);
1423 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1424 return EXIT_GOTO_TB;
1428 /* Emit a conditional branch to a direct target. If the branch itself
1429 is nullified, we should have already used nullify_over. */
1430 static ExitStatus do_cbranch(DisasContext *ctx, target_long disp, bool is_n,
1433 target_ulong dest = iaoq_dest(ctx, disp);
1434 TCGLabel *taken = NULL;
1435 TCGCond c = cond->c;
1438 assert(ctx->null_cond.c == TCG_COND_NEVER);
1440 /* Handle TRUE and NEVER as direct branches. */
1441 if (c == TCG_COND_ALWAYS) {
1442 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1444 if (c == TCG_COND_NEVER) {
1445 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1448 taken = gen_new_label();
1450 tcg_gen_brcond_tl(c, cond->a0, cond->a1, taken);
1453 /* Not taken: Condition not satisfied; nullify on backward branches. */
1454 n = is_n && disp < 0;
1455 if (n && use_nullify_skip(ctx)) {
1456 nullify_set(ctx, 0);
1457 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1459 if (!n && ctx->null_lab) {
1460 gen_set_label(ctx->null_lab);
1461 ctx->null_lab = NULL;
1463 nullify_set(ctx, n);
1464 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1467 gen_set_label(taken);
1469 /* Taken: Condition satisfied; nullify on forward branches. */
1470 n = is_n && disp >= 0;
1471 if (n && use_nullify_skip(ctx)) {
1472 nullify_set(ctx, 0);
1473 gen_goto_tb(ctx, 1, dest, dest + 4);
1475 nullify_set(ctx, n);
1476 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1479 /* Not taken: the branch itself was nullified. */
1480 if (ctx->null_lab) {
1481 gen_set_label(ctx->null_lab);
1482 ctx->null_lab = NULL;
1483 return EXIT_IAQ_N_STALE;
1485 return EXIT_GOTO_TB;
1489 /* Emit an unconditional branch to an indirect target. This handles
1490 nullification of the branch itself. */
1491 static ExitStatus do_ibranch(DisasContext *ctx, TCGv dest,
1492 unsigned link, bool is_n)
1494 TCGv a0, a1, next, tmp;
1497 assert(ctx->null_lab == NULL);
1499 if (ctx->null_cond.c == TCG_COND_NEVER) {
1501 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1503 next = get_temp(ctx);
1504 tcg_gen_mov_tl(next, dest);
1506 ctx->iaoq_n_var = next;
1508 ctx->null_cond.c = TCG_COND_ALWAYS;
1510 } else if (is_n && use_nullify_skip(ctx)) {
1511 /* The (conditional) branch, B, nullifies the next insn, N,
1512 and we're allowed to skip execution N (no single-step or
1513 tracepoint in effect). Since the exit_tb that we must use
1514 for the indirect branch consumes no special resources, we
1515 can (conditionally) skip B and continue execution. */
1516 /* The use_nullify_skip test implies we have a known control path. */
1517 tcg_debug_assert(ctx->iaoq_b != -1);
1518 tcg_debug_assert(ctx->iaoq_n != -1);
1520 /* We do have to handle the non-local temporary, DEST, before
1521 branching. Since IOAQ_F is not really live at this point, we
1522 can simply store DEST optimistically. Similarly with IAOQ_B. */
1523 tcg_gen_mov_tl(cpu_iaoq_f, dest);
1524 tcg_gen_addi_tl(cpu_iaoq_b, dest, 4);
1528 tcg_gen_movi_tl(cpu_gr[link], ctx->iaoq_n);
1531 return nullify_end(ctx, NO_EXIT);
1533 cond_prep(&ctx->null_cond);
1534 c = ctx->null_cond.c;
1535 a0 = ctx->null_cond.a0;
1536 a1 = ctx->null_cond.a1;
1538 tmp = tcg_temp_new();
1539 next = get_temp(ctx);
1541 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1542 tcg_gen_movcond_tl(c, next, a0, a1, tmp, dest);
1544 ctx->iaoq_n_var = next;
1547 tcg_gen_movcond_tl(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1551 /* The branch nullifies the next insn, which means the state of N
1552 after the branch is the inverse of the state of N that applied
1554 tcg_gen_setcond_tl(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1555 cond_free(&ctx->null_cond);
1556 ctx->null_cond = cond_make_n();
1557 ctx->psw_n_nonzero = true;
1559 cond_free(&ctx->null_cond);
1566 /* On Linux, page zero is normally marked execute only + gateway.
1567 Therefore normal read or write is supposed to fail, but specific
1568 offsets have kernel code mapped to raise permissions to implement
1569 system calls. Handling this via an explicit check here, rather
1570 in than the "be disp(sr2,r0)" instruction that probably sent us
1571 here, is the easiest way to handle the branch delay slot on the
1572 aforementioned BE. */
1573 static ExitStatus do_page_zero(DisasContext *ctx)
1575 /* If by some means we get here with PSW[N]=1, that implies that
1576 the B,GATE instruction would be skipped, and we'd fault on the
1577 next insn within the privilaged page. */
1578 switch (ctx->null_cond.c) {
1579 case TCG_COND_NEVER:
1581 case TCG_COND_ALWAYS:
1582 tcg_gen_movi_tl(cpu_psw_n, 0);
1585 /* Since this is always the first (and only) insn within the
1586 TB, we should know the state of PSW[N] from TB->FLAGS. */
1587 g_assert_not_reached();
1590 /* Check that we didn't arrive here via some means that allowed
1591 non-sequential instruction execution. Normally the PSW[B] bit
1592 detects this by disallowing the B,GATE instruction to execute
1593 under such conditions. */
1594 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1598 switch (ctx->iaoq_f) {
1599 case 0x00: /* Null pointer call */
1600 gen_excp_1(EXCP_SIGSEGV);
1601 return EXIT_NORETURN;
1603 case 0xb0: /* LWS */
1604 gen_excp_1(EXCP_SYSCALL_LWS);
1605 return EXIT_NORETURN;
1607 case 0xe0: /* SET_THREAD_POINTER */
1608 tcg_gen_mov_tl(cpu_cr27, cpu_gr[26]);
1609 tcg_gen_mov_tl(cpu_iaoq_f, cpu_gr[31]);
1610 tcg_gen_addi_tl(cpu_iaoq_b, cpu_iaoq_f, 4);
1611 return EXIT_IAQ_N_UPDATED;
1613 case 0x100: /* SYSCALL */
1614 gen_excp_1(EXCP_SYSCALL);
1615 return EXIT_NORETURN;
1619 gen_excp_1(EXCP_SIGILL);
1620 return EXIT_NORETURN;
1624 static ExitStatus trans_nop(DisasContext *ctx, uint32_t insn,
1625 const DisasInsn *di)
1627 cond_free(&ctx->null_cond);
1631 static ExitStatus trans_break(DisasContext *ctx, uint32_t insn,
1632 const DisasInsn *di)
1635 return nullify_end(ctx, gen_excp(ctx, EXCP_DEBUG));
1638 static ExitStatus trans_sync(DisasContext *ctx, uint32_t insn,
1639 const DisasInsn *di)
1641 /* No point in nullifying the memory barrier. */
1642 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1644 cond_free(&ctx->null_cond);
1648 static ExitStatus trans_mfia(DisasContext *ctx, uint32_t insn,
1649 const DisasInsn *di)
1651 unsigned rt = extract32(insn, 0, 5);
1652 TCGv tmp = dest_gpr(ctx, rt);
1653 tcg_gen_movi_tl(tmp, ctx->iaoq_f);
1654 save_gpr(ctx, rt, tmp);
1656 cond_free(&ctx->null_cond);
1660 static ExitStatus trans_mfsp(DisasContext *ctx, uint32_t insn,
1661 const DisasInsn *di)
1663 unsigned rt = extract32(insn, 0, 5);
1664 TCGv tmp = dest_gpr(ctx, rt);
1666 /* ??? We don't implement space registers. */
1667 tcg_gen_movi_tl(tmp, 0);
1668 save_gpr(ctx, rt, tmp);
1670 cond_free(&ctx->null_cond);
1674 static ExitStatus trans_mfctl(DisasContext *ctx, uint32_t insn,
1675 const DisasInsn *di)
1677 unsigned rt = extract32(insn, 0, 5);
1678 unsigned ctl = extract32(insn, 21, 5);
1683 #ifdef TARGET_HPPA64
1684 if (extract32(insn, 14, 1) == 0) {
1685 /* MFSAR without ,W masks low 5 bits. */
1686 tmp = dest_gpr(ctx, rt);
1687 tcg_gen_andi_tl(tmp, cpu_sar, 31);
1688 save_gpr(ctx, rt, tmp);
1692 save_gpr(ctx, rt, cpu_sar);
1694 case 16: /* Interval Timer */
1695 tmp = dest_gpr(ctx, rt);
1696 tcg_gen_movi_tl(tmp, 0); /* FIXME */
1697 save_gpr(ctx, rt, tmp);
1700 save_gpr(ctx, rt, cpu_cr26);
1703 save_gpr(ctx, rt, cpu_cr27);
1706 /* All other control registers are privileged. */
1707 return gen_illegal(ctx);
1710 cond_free(&ctx->null_cond);
1714 static ExitStatus trans_mtctl(DisasContext *ctx, uint32_t insn,
1715 const DisasInsn *di)
1717 unsigned rin = extract32(insn, 16, 5);
1718 unsigned ctl = extract32(insn, 21, 5);
1721 if (ctl == 11) { /* SAR */
1722 tmp = tcg_temp_new();
1723 tcg_gen_andi_tl(tmp, load_gpr(ctx, rin), TARGET_LONG_BITS - 1);
1724 save_or_nullify(ctx, cpu_sar, tmp);
1727 /* All other control registers are privileged or read-only. */
1728 return gen_illegal(ctx);
1731 cond_free(&ctx->null_cond);
1735 static ExitStatus trans_mtsarcm(DisasContext *ctx, uint32_t insn,
1736 const DisasInsn *di)
1738 unsigned rin = extract32(insn, 16, 5);
1739 TCGv tmp = tcg_temp_new();
1741 tcg_gen_not_tl(tmp, load_gpr(ctx, rin));
1742 tcg_gen_andi_tl(tmp, tmp, TARGET_LONG_BITS - 1);
1743 save_or_nullify(ctx, cpu_sar, tmp);
1746 cond_free(&ctx->null_cond);
1750 static ExitStatus trans_ldsid(DisasContext *ctx, uint32_t insn,
1751 const DisasInsn *di)
1753 unsigned rt = extract32(insn, 0, 5);
1754 TCGv dest = dest_gpr(ctx, rt);
1756 /* Since we don't implement space registers, this returns zero. */
1757 tcg_gen_movi_tl(dest, 0);
1758 save_gpr(ctx, rt, dest);
1760 cond_free(&ctx->null_cond);
1764 static const DisasInsn table_system[] = {
1765 { 0x00000000u, 0xfc001fe0u, trans_break },
1766 /* We don't implement space register, so MTSP is a nop. */
1767 { 0x00001820u, 0xffe01fffu, trans_nop },
1768 { 0x00001840u, 0xfc00ffffu, trans_mtctl },
1769 { 0x016018c0u, 0xffe0ffffu, trans_mtsarcm },
1770 { 0x000014a0u, 0xffffffe0u, trans_mfia },
1771 { 0x000004a0u, 0xffff1fe0u, trans_mfsp },
1772 { 0x000008a0u, 0xfc1fffe0u, trans_mfctl },
1773 { 0x00000400u, 0xffffffffu, trans_sync },
1774 { 0x000010a0u, 0xfc1f3fe0u, trans_ldsid },
1777 static ExitStatus trans_base_idx_mod(DisasContext *ctx, uint32_t insn,
1778 const DisasInsn *di)
1780 unsigned rb = extract32(insn, 21, 5);
1781 unsigned rx = extract32(insn, 16, 5);
1782 TCGv dest = dest_gpr(ctx, rb);
1783 TCGv src1 = load_gpr(ctx, rb);
1784 TCGv src2 = load_gpr(ctx, rx);
1786 /* The only thing we need to do is the base register modification. */
1787 tcg_gen_add_tl(dest, src1, src2);
1788 save_gpr(ctx, rb, dest);
1790 cond_free(&ctx->null_cond);
1794 static ExitStatus trans_probe(DisasContext *ctx, uint32_t insn,
1795 const DisasInsn *di)
1797 unsigned rt = extract32(insn, 0, 5);
1798 unsigned rb = extract32(insn, 21, 5);
1799 unsigned is_write = extract32(insn, 6, 1);
1804 /* ??? Do something with priv level operand. */
1805 dest = dest_gpr(ctx, rt);
1807 gen_helper_probe_w(dest, load_gpr(ctx, rb));
1809 gen_helper_probe_r(dest, load_gpr(ctx, rb));
1811 save_gpr(ctx, rt, dest);
1812 return nullify_end(ctx, NO_EXIT);
1815 static const DisasInsn table_mem_mgmt[] = {
1816 { 0x04003280u, 0xfc003fffu, trans_nop }, /* fdc, disp */
1817 { 0x04001280u, 0xfc003fffu, trans_nop }, /* fdc, index */
1818 { 0x040012a0u, 0xfc003fffu, trans_base_idx_mod }, /* fdc, index, base mod */
1819 { 0x040012c0u, 0xfc003fffu, trans_nop }, /* fdce */
1820 { 0x040012e0u, 0xfc003fffu, trans_base_idx_mod }, /* fdce, base mod */
1821 { 0x04000280u, 0xfc001fffu, trans_nop }, /* fic 0a */
1822 { 0x040002a0u, 0xfc001fffu, trans_base_idx_mod }, /* fic 0a, base mod */
1823 { 0x040013c0u, 0xfc003fffu, trans_nop }, /* fic 4f */
1824 { 0x040013e0u, 0xfc003fffu, trans_base_idx_mod }, /* fic 4f, base mod */
1825 { 0x040002c0u, 0xfc001fffu, trans_nop }, /* fice */
1826 { 0x040002e0u, 0xfc001fffu, trans_base_idx_mod }, /* fice, base mod */
1827 { 0x04002700u, 0xfc003fffu, trans_nop }, /* pdc */
1828 { 0x04002720u, 0xfc003fffu, trans_base_idx_mod }, /* pdc, base mod */
1829 { 0x04001180u, 0xfc003fa0u, trans_probe }, /* probe */
1830 { 0x04003180u, 0xfc003fa0u, trans_probe }, /* probei */
1833 static ExitStatus trans_add(DisasContext *ctx, uint32_t insn,
1834 const DisasInsn *di)
1836 unsigned r2 = extract32(insn, 21, 5);
1837 unsigned r1 = extract32(insn, 16, 5);
1838 unsigned cf = extract32(insn, 12, 4);
1839 unsigned ext = extract32(insn, 8, 4);
1840 unsigned shift = extract32(insn, 6, 2);
1841 unsigned rt = extract32(insn, 0, 5);
1842 TCGv tcg_r1, tcg_r2;
1846 bool is_tsv = false;
1850 case 0x6: /* ADD, SHLADD */
1852 case 0xa: /* ADD,L, SHLADD,L */
1855 case 0xe: /* ADD,TSV, SHLADD,TSV (1) */
1858 case 0x7: /* ADD,C */
1861 case 0xf: /* ADD,C,TSV */
1862 is_c = is_tsv = true;
1865 return gen_illegal(ctx);
1871 tcg_r1 = load_gpr(ctx, r1);
1872 tcg_r2 = load_gpr(ctx, r2);
1873 ret = do_add(ctx, rt, tcg_r1, tcg_r2, shift, is_l, is_tsv, is_tc, is_c, cf);
1874 return nullify_end(ctx, ret);
1877 static ExitStatus trans_sub(DisasContext *ctx, uint32_t insn,
1878 const DisasInsn *di)
1880 unsigned r2 = extract32(insn, 21, 5);
1881 unsigned r1 = extract32(insn, 16, 5);
1882 unsigned cf = extract32(insn, 12, 4);
1883 unsigned ext = extract32(insn, 6, 6);
1884 unsigned rt = extract32(insn, 0, 5);
1885 TCGv tcg_r1, tcg_r2;
1888 bool is_tsv = false;
1892 case 0x10: /* SUB */
1894 case 0x30: /* SUB,TSV */
1897 case 0x14: /* SUB,B */
1900 case 0x34: /* SUB,B,TSV */
1901 is_b = is_tsv = true;
1903 case 0x13: /* SUB,TC */
1906 case 0x33: /* SUB,TSV,TC */
1907 is_tc = is_tsv = true;
1910 return gen_illegal(ctx);
1916 tcg_r1 = load_gpr(ctx, r1);
1917 tcg_r2 = load_gpr(ctx, r2);
1918 ret = do_sub(ctx, rt, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, cf);
1919 return nullify_end(ctx, ret);
1922 static ExitStatus trans_log(DisasContext *ctx, uint32_t insn,
1923 const DisasInsn *di)
1925 unsigned r2 = extract32(insn, 21, 5);
1926 unsigned r1 = extract32(insn, 16, 5);
1927 unsigned cf = extract32(insn, 12, 4);
1928 unsigned rt = extract32(insn, 0, 5);
1929 TCGv tcg_r1, tcg_r2;
1935 tcg_r1 = load_gpr(ctx, r1);
1936 tcg_r2 = load_gpr(ctx, r2);
1937 ret = do_log(ctx, rt, tcg_r1, tcg_r2, cf, di->f_ttt);
1938 return nullify_end(ctx, ret);
1941 /* OR r,0,t -> COPY (according to gas) */
1942 static ExitStatus trans_copy(DisasContext *ctx, uint32_t insn,
1943 const DisasInsn *di)
1945 unsigned r1 = extract32(insn, 16, 5);
1946 unsigned rt = extract32(insn, 0, 5);
1949 TCGv dest = dest_gpr(ctx, rt);
1950 tcg_gen_movi_tl(dest, 0);
1951 save_gpr(ctx, rt, dest);
1953 save_gpr(ctx, rt, cpu_gr[r1]);
1955 cond_free(&ctx->null_cond);
1959 static ExitStatus trans_cmpclr(DisasContext *ctx, uint32_t insn,
1960 const DisasInsn *di)
1962 unsigned r2 = extract32(insn, 21, 5);
1963 unsigned r1 = extract32(insn, 16, 5);
1964 unsigned cf = extract32(insn, 12, 4);
1965 unsigned rt = extract32(insn, 0, 5);
1966 TCGv tcg_r1, tcg_r2;
1972 tcg_r1 = load_gpr(ctx, r1);
1973 tcg_r2 = load_gpr(ctx, r2);
1974 ret = do_cmpclr(ctx, rt, tcg_r1, tcg_r2, cf);
1975 return nullify_end(ctx, ret);
1978 static ExitStatus trans_uxor(DisasContext *ctx, uint32_t insn,
1979 const DisasInsn *di)
1981 unsigned r2 = extract32(insn, 21, 5);
1982 unsigned r1 = extract32(insn, 16, 5);
1983 unsigned cf = extract32(insn, 12, 4);
1984 unsigned rt = extract32(insn, 0, 5);
1985 TCGv tcg_r1, tcg_r2;
1991 tcg_r1 = load_gpr(ctx, r1);
1992 tcg_r2 = load_gpr(ctx, r2);
1993 ret = do_unit(ctx, rt, tcg_r1, tcg_r2, cf, false, tcg_gen_xor_tl);
1994 return nullify_end(ctx, ret);
1997 static ExitStatus trans_uaddcm(DisasContext *ctx, uint32_t insn,
1998 const DisasInsn *di)
2000 unsigned r2 = extract32(insn, 21, 5);
2001 unsigned r1 = extract32(insn, 16, 5);
2002 unsigned cf = extract32(insn, 12, 4);
2003 unsigned is_tc = extract32(insn, 6, 1);
2004 unsigned rt = extract32(insn, 0, 5);
2005 TCGv tcg_r1, tcg_r2, tmp;
2011 tcg_r1 = load_gpr(ctx, r1);
2012 tcg_r2 = load_gpr(ctx, r2);
2013 tmp = get_temp(ctx);
2014 tcg_gen_not_tl(tmp, tcg_r2);
2015 ret = do_unit(ctx, rt, tcg_r1, tmp, cf, is_tc, tcg_gen_add_tl);
2016 return nullify_end(ctx, ret);
2019 static ExitStatus trans_dcor(DisasContext *ctx, uint32_t insn,
2020 const DisasInsn *di)
2022 unsigned r2 = extract32(insn, 21, 5);
2023 unsigned cf = extract32(insn, 12, 4);
2024 unsigned is_i = extract32(insn, 6, 1);
2025 unsigned rt = extract32(insn, 0, 5);
2031 tmp = get_temp(ctx);
2032 tcg_gen_shri_tl(tmp, cpu_psw_cb, 3);
2034 tcg_gen_not_tl(tmp, tmp);
2036 tcg_gen_andi_tl(tmp, tmp, 0x11111111);
2037 tcg_gen_muli_tl(tmp, tmp, 6);
2038 ret = do_unit(ctx, rt, tmp, load_gpr(ctx, r2), cf, false,
2039 is_i ? tcg_gen_add_tl : tcg_gen_sub_tl);
2041 return nullify_end(ctx, ret);
2044 static ExitStatus trans_ds(DisasContext *ctx, uint32_t insn,
2045 const DisasInsn *di)
2047 unsigned r2 = extract32(insn, 21, 5);
2048 unsigned r1 = extract32(insn, 16, 5);
2049 unsigned cf = extract32(insn, 12, 4);
2050 unsigned rt = extract32(insn, 0, 5);
2051 TCGv dest, add1, add2, addc, zero, in1, in2;
2055 in1 = load_gpr(ctx, r1);
2056 in2 = load_gpr(ctx, r2);
2058 add1 = tcg_temp_new();
2059 add2 = tcg_temp_new();
2060 addc = tcg_temp_new();
2061 dest = tcg_temp_new();
2062 zero = tcg_const_tl(0);
2064 /* Form R1 << 1 | PSW[CB]{8}. */
2065 tcg_gen_add_tl(add1, in1, in1);
2066 tcg_gen_add_tl(add1, add1, cpu_psw_cb_msb);
2068 /* Add or subtract R2, depending on PSW[V]. Proper computation of
2069 carry{8} requires that we subtract via + ~R2 + 1, as described in
2070 the manual. By extracting and masking V, we can produce the
2071 proper inputs to the addition without movcond. */
2072 tcg_gen_sari_tl(addc, cpu_psw_v, TARGET_LONG_BITS - 1);
2073 tcg_gen_xor_tl(add2, in2, addc);
2074 tcg_gen_andi_tl(addc, addc, 1);
2075 /* ??? This is only correct for 32-bit. */
2076 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2077 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2079 tcg_temp_free(addc);
2080 tcg_temp_free(zero);
2082 /* Write back the result register. */
2083 save_gpr(ctx, rt, dest);
2085 /* Write back PSW[CB]. */
2086 tcg_gen_xor_tl(cpu_psw_cb, add1, add2);
2087 tcg_gen_xor_tl(cpu_psw_cb, cpu_psw_cb, dest);
2089 /* Write back PSW[V] for the division step. */
2090 tcg_gen_neg_tl(cpu_psw_v, cpu_psw_cb_msb);
2091 tcg_gen_xor_tl(cpu_psw_v, cpu_psw_v, in2);
2093 /* Install the new nullification. */
2098 /* ??? The lshift is supposed to contribute to overflow. */
2099 sv = do_add_sv(ctx, dest, add1, add2);
2101 ctx->null_cond = do_cond(cf, dest, cpu_psw_cb_msb, sv);
2104 tcg_temp_free(add1);
2105 tcg_temp_free(add2);
2106 tcg_temp_free(dest);
2108 return nullify_end(ctx, NO_EXIT);
2111 static const DisasInsn table_arith_log[] = {
2112 { 0x08000240u, 0xfc00ffffu, trans_nop }, /* or x,y,0 */
2113 { 0x08000240u, 0xffe0ffe0u, trans_copy }, /* or x,0,t */
2114 { 0x08000000u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_andc_tl },
2115 { 0x08000200u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_and_tl },
2116 { 0x08000240u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_or_tl },
2117 { 0x08000280u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_xor_tl },
2118 { 0x08000880u, 0xfc000fe0u, trans_cmpclr },
2119 { 0x08000380u, 0xfc000fe0u, trans_uxor },
2120 { 0x08000980u, 0xfc000fa0u, trans_uaddcm },
2121 { 0x08000b80u, 0xfc1f0fa0u, trans_dcor },
2122 { 0x08000440u, 0xfc000fe0u, trans_ds },
2123 { 0x08000700u, 0xfc0007e0u, trans_add }, /* add */
2124 { 0x08000400u, 0xfc0006e0u, trans_sub }, /* sub; sub,b; sub,tsv */
2125 { 0x080004c0u, 0xfc0007e0u, trans_sub }, /* sub,tc; sub,tsv,tc */
2126 { 0x08000200u, 0xfc000320u, trans_add }, /* shladd */
2129 static ExitStatus trans_addi(DisasContext *ctx, uint32_t insn)
2131 target_long im = low_sextract(insn, 0, 11);
2132 unsigned e1 = extract32(insn, 11, 1);
2133 unsigned cf = extract32(insn, 12, 4);
2134 unsigned rt = extract32(insn, 16, 5);
2135 unsigned r2 = extract32(insn, 21, 5);
2136 unsigned o1 = extract32(insn, 26, 1);
2137 TCGv tcg_im, tcg_r2;
2144 tcg_im = load_const(ctx, im);
2145 tcg_r2 = load_gpr(ctx, r2);
2146 ret = do_add(ctx, rt, tcg_im, tcg_r2, 0, false, e1, !o1, false, cf);
2148 return nullify_end(ctx, ret);
2151 static ExitStatus trans_subi(DisasContext *ctx, uint32_t insn)
2153 target_long im = low_sextract(insn, 0, 11);
2154 unsigned e1 = extract32(insn, 11, 1);
2155 unsigned cf = extract32(insn, 12, 4);
2156 unsigned rt = extract32(insn, 16, 5);
2157 unsigned r2 = extract32(insn, 21, 5);
2158 TCGv tcg_im, tcg_r2;
2165 tcg_im = load_const(ctx, im);
2166 tcg_r2 = load_gpr(ctx, r2);
2167 ret = do_sub(ctx, rt, tcg_im, tcg_r2, e1, false, false, cf);
2169 return nullify_end(ctx, ret);
2172 static ExitStatus trans_cmpiclr(DisasContext *ctx, uint32_t insn)
2174 target_long im = low_sextract(insn, 0, 11);
2175 unsigned cf = extract32(insn, 12, 4);
2176 unsigned rt = extract32(insn, 16, 5);
2177 unsigned r2 = extract32(insn, 21, 5);
2178 TCGv tcg_im, tcg_r2;
2185 tcg_im = load_const(ctx, im);
2186 tcg_r2 = load_gpr(ctx, r2);
2187 ret = do_cmpclr(ctx, rt, tcg_im, tcg_r2, cf);
2189 return nullify_end(ctx, ret);
2192 static ExitStatus trans_ld_idx_i(DisasContext *ctx, uint32_t insn,
2193 const DisasInsn *di)
2195 unsigned rt = extract32(insn, 0, 5);
2196 unsigned m = extract32(insn, 5, 1);
2197 unsigned sz = extract32(insn, 6, 2);
2198 unsigned a = extract32(insn, 13, 1);
2199 int disp = low_sextract(insn, 16, 5);
2200 unsigned rb = extract32(insn, 21, 5);
2201 int modify = (m ? (a ? -1 : 1) : 0);
2202 TCGMemOp mop = MO_TE | sz;
2204 return do_load(ctx, rt, rb, 0, 0, disp, modify, mop);
2207 static ExitStatus trans_ld_idx_x(DisasContext *ctx, uint32_t insn,
2208 const DisasInsn *di)
2210 unsigned rt = extract32(insn, 0, 5);
2211 unsigned m = extract32(insn, 5, 1);
2212 unsigned sz = extract32(insn, 6, 2);
2213 unsigned u = extract32(insn, 13, 1);
2214 unsigned rx = extract32(insn, 16, 5);
2215 unsigned rb = extract32(insn, 21, 5);
2216 TCGMemOp mop = MO_TE | sz;
2218 return do_load(ctx, rt, rb, rx, u ? sz : 0, 0, m, mop);
2221 static ExitStatus trans_st_idx_i(DisasContext *ctx, uint32_t insn,
2222 const DisasInsn *di)
2224 int disp = low_sextract(insn, 0, 5);
2225 unsigned m = extract32(insn, 5, 1);
2226 unsigned sz = extract32(insn, 6, 2);
2227 unsigned a = extract32(insn, 13, 1);
2228 unsigned rr = extract32(insn, 16, 5);
2229 unsigned rb = extract32(insn, 21, 5);
2230 int modify = (m ? (a ? -1 : 1) : 0);
2231 TCGMemOp mop = MO_TE | sz;
2233 return do_store(ctx, rr, rb, disp, modify, mop);
2236 static ExitStatus trans_ldcw(DisasContext *ctx, uint32_t insn,
2237 const DisasInsn *di)
2239 unsigned rt = extract32(insn, 0, 5);
2240 unsigned m = extract32(insn, 5, 1);
2241 unsigned i = extract32(insn, 12, 1);
2242 unsigned au = extract32(insn, 13, 1);
2243 unsigned rx = extract32(insn, 16, 5);
2244 unsigned rb = extract32(insn, 21, 5);
2245 TCGMemOp mop = MO_TEUL | MO_ALIGN_16;
2246 TCGv zero, addr, base, dest;
2247 int modify, disp = 0, scale = 0;
2251 /* ??? Share more code with do_load and do_load_{32,64}. */
2254 modify = (m ? (au ? -1 : 1) : 0);
2255 disp = low_sextract(rx, 0, 5);
2260 scale = mop & MO_SIZE;
2264 /* Base register modification. Make sure if RT == RB, we see
2265 the result of the load. */
2266 dest = get_temp(ctx);
2268 dest = dest_gpr(ctx, rt);
2271 addr = tcg_temp_new();
2272 base = load_gpr(ctx, rb);
2274 tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
2275 tcg_gen_add_tl(addr, addr, base);
2277 tcg_gen_addi_tl(addr, base, disp);
2280 zero = tcg_const_tl(0);
2281 tcg_gen_atomic_xchg_tl(dest, (modify <= 0 ? addr : base),
2282 zero, MMU_USER_IDX, mop);
2284 save_gpr(ctx, rb, addr);
2286 save_gpr(ctx, rt, dest);
2288 return nullify_end(ctx, NO_EXIT);
2291 static ExitStatus trans_stby(DisasContext *ctx, uint32_t insn,
2292 const DisasInsn *di)
2294 target_long disp = low_sextract(insn, 0, 5);
2295 unsigned m = extract32(insn, 5, 1);
2296 unsigned a = extract32(insn, 13, 1);
2297 unsigned rt = extract32(insn, 16, 5);
2298 unsigned rb = extract32(insn, 21, 5);
2303 addr = tcg_temp_new();
2304 if (m || disp == 0) {
2305 tcg_gen_mov_tl(addr, load_gpr(ctx, rb));
2307 tcg_gen_addi_tl(addr, load_gpr(ctx, rb), disp);
2309 val = load_gpr(ctx, rt);
2312 gen_helper_stby_e(cpu_env, addr, val);
2314 gen_helper_stby_b(cpu_env, addr, val);
2318 tcg_gen_addi_tl(addr, addr, disp);
2319 tcg_gen_andi_tl(addr, addr, ~3);
2320 save_gpr(ctx, rb, addr);
2322 tcg_temp_free(addr);
2324 return nullify_end(ctx, NO_EXIT);
2327 static const DisasInsn table_index_mem[] = {
2328 { 0x0c001000u, 0xfc001300, trans_ld_idx_i }, /* LD[BHWD], im */
2329 { 0x0c000000u, 0xfc001300, trans_ld_idx_x }, /* LD[BHWD], rx */
2330 { 0x0c001200u, 0xfc001300, trans_st_idx_i }, /* ST[BHWD] */
2331 { 0x0c0001c0u, 0xfc0003c0, trans_ldcw },
2332 { 0x0c001300u, 0xfc0013c0, trans_stby },
2335 static ExitStatus trans_ldil(DisasContext *ctx, uint32_t insn)
2337 unsigned rt = extract32(insn, 21, 5);
2338 target_long i = assemble_21(insn);
2339 TCGv tcg_rt = dest_gpr(ctx, rt);
2341 tcg_gen_movi_tl(tcg_rt, i);
2342 save_gpr(ctx, rt, tcg_rt);
2343 cond_free(&ctx->null_cond);
2348 static ExitStatus trans_addil(DisasContext *ctx, uint32_t insn)
2350 unsigned rt = extract32(insn, 21, 5);
2351 target_long i = assemble_21(insn);
2352 TCGv tcg_rt = load_gpr(ctx, rt);
2353 TCGv tcg_r1 = dest_gpr(ctx, 1);
2355 tcg_gen_addi_tl(tcg_r1, tcg_rt, i);
2356 save_gpr(ctx, 1, tcg_r1);
2357 cond_free(&ctx->null_cond);
2362 static ExitStatus trans_ldo(DisasContext *ctx, uint32_t insn)
2364 unsigned rb = extract32(insn, 21, 5);
2365 unsigned rt = extract32(insn, 16, 5);
2366 target_long i = assemble_16(insn);
2367 TCGv tcg_rt = dest_gpr(ctx, rt);
2369 /* Special case rb == 0, for the LDI pseudo-op.
2370 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
2372 tcg_gen_movi_tl(tcg_rt, i);
2374 tcg_gen_addi_tl(tcg_rt, cpu_gr[rb], i);
2376 save_gpr(ctx, rt, tcg_rt);
2377 cond_free(&ctx->null_cond);
2382 static ExitStatus trans_load(DisasContext *ctx, uint32_t insn,
2383 bool is_mod, TCGMemOp mop)
2385 unsigned rb = extract32(insn, 21, 5);
2386 unsigned rt = extract32(insn, 16, 5);
2387 target_long i = assemble_16(insn);
2389 return do_load(ctx, rt, rb, 0, 0, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
2392 static ExitStatus trans_load_w(DisasContext *ctx, uint32_t insn)
2394 unsigned rb = extract32(insn, 21, 5);
2395 unsigned rt = extract32(insn, 16, 5);
2396 target_long i = assemble_16a(insn);
2397 unsigned ext2 = extract32(insn, 1, 2);
2402 /* FLDW without modification. */
2403 return do_floadw(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
2405 /* LDW with modification. Note that the sign of I selects
2406 post-dec vs pre-inc. */
2407 return do_load(ctx, rt, rb, 0, 0, i, (i < 0 ? 1 : -1), MO_TEUL);
2409 return gen_illegal(ctx);
2413 static ExitStatus trans_fload_mod(DisasContext *ctx, uint32_t insn)
2415 target_long i = assemble_16a(insn);
2416 unsigned t1 = extract32(insn, 1, 1);
2417 unsigned a = extract32(insn, 2, 1);
2418 unsigned t0 = extract32(insn, 16, 5);
2419 unsigned rb = extract32(insn, 21, 5);
2421 /* FLDW with modification. */
2422 return do_floadw(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
2425 static ExitStatus trans_store(DisasContext *ctx, uint32_t insn,
2426 bool is_mod, TCGMemOp mop)
2428 unsigned rb = extract32(insn, 21, 5);
2429 unsigned rt = extract32(insn, 16, 5);
2430 target_long i = assemble_16(insn);
2432 return do_store(ctx, rt, rb, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
2435 static ExitStatus trans_store_w(DisasContext *ctx, uint32_t insn)
2437 unsigned rb = extract32(insn, 21, 5);
2438 unsigned rt = extract32(insn, 16, 5);
2439 target_long i = assemble_16a(insn);
2440 unsigned ext2 = extract32(insn, 1, 2);
2445 /* FSTW without modification. */
2446 return do_fstorew(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
2448 /* LDW with modification. */
2449 return do_store(ctx, rt, rb, i, (i < 0 ? 1 : -1), MO_TEUL);
2451 return gen_illegal(ctx);
2455 static ExitStatus trans_fstore_mod(DisasContext *ctx, uint32_t insn)
2457 target_long i = assemble_16a(insn);
2458 unsigned t1 = extract32(insn, 1, 1);
2459 unsigned a = extract32(insn, 2, 1);
2460 unsigned t0 = extract32(insn, 16, 5);
2461 unsigned rb = extract32(insn, 21, 5);
2463 /* FSTW with modification. */
2464 return do_fstorew(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
2467 static ExitStatus trans_copr_w(DisasContext *ctx, uint32_t insn)
2469 unsigned t0 = extract32(insn, 0, 5);
2470 unsigned m = extract32(insn, 5, 1);
2471 unsigned t1 = extract32(insn, 6, 1);
2472 unsigned ext3 = extract32(insn, 7, 3);
2473 /* unsigned cc = extract32(insn, 10, 2); */
2474 unsigned i = extract32(insn, 12, 1);
2475 unsigned ua = extract32(insn, 13, 1);
2476 unsigned rx = extract32(insn, 16, 5);
2477 unsigned rb = extract32(insn, 21, 5);
2478 unsigned rt = t1 * 32 + t0;
2479 int modify = (m ? (ua ? -1 : 1) : 0);
2483 scale = (ua ? 2 : 0);
2487 disp = low_sextract(rx, 0, 5);
2490 modify = (m ? (ua ? -1 : 1) : 0);
2495 return do_floadw(ctx, rt, rb, rx, scale, disp, modify);
2497 return do_fstorew(ctx, rt, rb, rx, scale, disp, modify);
2499 return gen_illegal(ctx);
2502 static ExitStatus trans_copr_dw(DisasContext *ctx, uint32_t insn)
2504 unsigned rt = extract32(insn, 0, 5);
2505 unsigned m = extract32(insn, 5, 1);
2506 unsigned ext4 = extract32(insn, 6, 4);
2507 /* unsigned cc = extract32(insn, 10, 2); */
2508 unsigned i = extract32(insn, 12, 1);
2509 unsigned ua = extract32(insn, 13, 1);
2510 unsigned rx = extract32(insn, 16, 5);
2511 unsigned rb = extract32(insn, 21, 5);
2512 int modify = (m ? (ua ? -1 : 1) : 0);
2516 scale = (ua ? 3 : 0);
2520 disp = low_sextract(rx, 0, 5);
2523 modify = (m ? (ua ? -1 : 1) : 0);
2528 return do_floadd(ctx, rt, rb, rx, scale, disp, modify);
2530 return do_fstored(ctx, rt, rb, rx, scale, disp, modify);
2532 return gen_illegal(ctx);
2536 static ExitStatus trans_cmpb(DisasContext *ctx, uint32_t insn,
2537 bool is_true, bool is_imm, bool is_dw)
2539 target_long disp = assemble_12(insn) * 4;
2540 unsigned n = extract32(insn, 1, 1);
2541 unsigned c = extract32(insn, 13, 3);
2542 unsigned r = extract32(insn, 21, 5);
2543 unsigned cf = c * 2 + !is_true;
2544 TCGv dest, in1, in2, sv;
2550 in1 = load_const(ctx, low_sextract(insn, 16, 5));
2552 in1 = load_gpr(ctx, extract32(insn, 16, 5));
2554 in2 = load_gpr(ctx, r);
2555 dest = get_temp(ctx);
2557 tcg_gen_sub_tl(dest, in1, in2);
2561 sv = do_sub_sv(ctx, dest, in1, in2);
2564 cond = do_sub_cond(cf, dest, in1, in2, sv);
2565 return do_cbranch(ctx, disp, n, &cond);
2568 static ExitStatus trans_addb(DisasContext *ctx, uint32_t insn,
2569 bool is_true, bool is_imm)
2571 target_long disp = assemble_12(insn) * 4;
2572 unsigned n = extract32(insn, 1, 1);
2573 unsigned c = extract32(insn, 13, 3);
2574 unsigned r = extract32(insn, 21, 5);
2575 unsigned cf = c * 2 + !is_true;
2576 TCGv dest, in1, in2, sv, cb_msb;
2582 in1 = load_const(ctx, low_sextract(insn, 16, 5));
2584 in1 = load_gpr(ctx, extract32(insn, 16, 5));
2586 in2 = load_gpr(ctx, r);
2587 dest = dest_gpr(ctx, r);
2589 TCGV_UNUSED(cb_msb);
2593 tcg_gen_add_tl(dest, in1, in2);
2596 cb_msb = get_temp(ctx);
2597 tcg_gen_movi_tl(cb_msb, 0);
2598 tcg_gen_add2_tl(dest, cb_msb, in1, cb_msb, in2, cb_msb);
2601 tcg_gen_add_tl(dest, in1, in2);
2602 sv = do_add_sv(ctx, dest, in1, in2);
2606 cond = do_cond(cf, dest, cb_msb, sv);
2607 return do_cbranch(ctx, disp, n, &cond);
2610 static ExitStatus trans_bb(DisasContext *ctx, uint32_t insn)
2612 target_long disp = assemble_12(insn) * 4;
2613 unsigned n = extract32(insn, 1, 1);
2614 unsigned c = extract32(insn, 15, 1);
2615 unsigned r = extract32(insn, 16, 5);
2616 unsigned p = extract32(insn, 21, 5);
2617 unsigned i = extract32(insn, 26, 1);
2623 tmp = tcg_temp_new();
2624 tcg_r = load_gpr(ctx, r);
2626 tcg_gen_shli_tl(tmp, tcg_r, p);
2628 tcg_gen_shl_tl(tmp, tcg_r, cpu_sar);
2631 cond = cond_make_0(c ? TCG_COND_GE : TCG_COND_LT, tmp);
2633 return do_cbranch(ctx, disp, n, &cond);
2636 static ExitStatus trans_movb(DisasContext *ctx, uint32_t insn, bool is_imm)
2638 target_long disp = assemble_12(insn) * 4;
2639 unsigned n = extract32(insn, 1, 1);
2640 unsigned c = extract32(insn, 13, 3);
2641 unsigned t = extract32(insn, 16, 5);
2642 unsigned r = extract32(insn, 21, 5);
2648 dest = dest_gpr(ctx, r);
2650 tcg_gen_movi_tl(dest, low_sextract(t, 0, 5));
2651 } else if (t == 0) {
2652 tcg_gen_movi_tl(dest, 0);
2654 tcg_gen_mov_tl(dest, cpu_gr[t]);
2657 cond = do_sed_cond(c, dest);
2658 return do_cbranch(ctx, disp, n, &cond);
2661 static ExitStatus trans_shrpw_sar(DisasContext *ctx, uint32_t insn,
2662 const DisasInsn *di)
2664 unsigned rt = extract32(insn, 0, 5);
2665 unsigned c = extract32(insn, 13, 3);
2666 unsigned r1 = extract32(insn, 16, 5);
2667 unsigned r2 = extract32(insn, 21, 5);
2674 dest = dest_gpr(ctx, rt);
2676 tcg_gen_ext32u_tl(dest, load_gpr(ctx, r2));
2677 tcg_gen_shr_tl(dest, dest, cpu_sar);
2678 } else if (r1 == r2) {
2679 TCGv_i32 t32 = tcg_temp_new_i32();
2680 tcg_gen_trunc_tl_i32(t32, load_gpr(ctx, r2));
2681 tcg_gen_rotr_i32(t32, t32, cpu_sar);
2682 tcg_gen_extu_i32_tl(dest, t32);
2683 tcg_temp_free_i32(t32);
2685 TCGv_i64 t = tcg_temp_new_i64();
2686 TCGv_i64 s = tcg_temp_new_i64();
2688 tcg_gen_concat_tl_i64(t, load_gpr(ctx, r2), load_gpr(ctx, r1));
2689 tcg_gen_extu_tl_i64(s, cpu_sar);
2690 tcg_gen_shr_i64(t, t, s);
2691 tcg_gen_trunc_i64_tl(dest, t);
2693 tcg_temp_free_i64(t);
2694 tcg_temp_free_i64(s);
2696 save_gpr(ctx, rt, dest);
2698 /* Install the new nullification. */
2699 cond_free(&ctx->null_cond);
2701 ctx->null_cond = do_sed_cond(c, dest);
2703 return nullify_end(ctx, NO_EXIT);
2706 static ExitStatus trans_shrpw_imm(DisasContext *ctx, uint32_t insn,
2707 const DisasInsn *di)
2709 unsigned rt = extract32(insn, 0, 5);
2710 unsigned cpos = extract32(insn, 5, 5);
2711 unsigned c = extract32(insn, 13, 3);
2712 unsigned r1 = extract32(insn, 16, 5);
2713 unsigned r2 = extract32(insn, 21, 5);
2714 unsigned sa = 31 - cpos;
2721 dest = dest_gpr(ctx, rt);
2722 t2 = load_gpr(ctx, r2);
2724 TCGv_i32 t32 = tcg_temp_new_i32();
2725 tcg_gen_trunc_tl_i32(t32, t2);
2726 tcg_gen_rotri_i32(t32, t32, sa);
2727 tcg_gen_extu_i32_tl(dest, t32);
2728 tcg_temp_free_i32(t32);
2729 } else if (r1 == 0) {
2730 tcg_gen_extract_tl(dest, t2, sa, 32 - sa);
2732 TCGv t0 = tcg_temp_new();
2733 tcg_gen_extract_tl(t0, t2, sa, 32 - sa);
2734 tcg_gen_deposit_tl(dest, t0, cpu_gr[r1], 32 - sa, sa);
2737 save_gpr(ctx, rt, dest);
2739 /* Install the new nullification. */
2740 cond_free(&ctx->null_cond);
2742 ctx->null_cond = do_sed_cond(c, dest);
2744 return nullify_end(ctx, NO_EXIT);
2747 static ExitStatus trans_extrw_sar(DisasContext *ctx, uint32_t insn,
2748 const DisasInsn *di)
2750 unsigned clen = extract32(insn, 0, 5);
2751 unsigned is_se = extract32(insn, 10, 1);
2752 unsigned c = extract32(insn, 13, 3);
2753 unsigned rt = extract32(insn, 16, 5);
2754 unsigned rr = extract32(insn, 21, 5);
2755 unsigned len = 32 - clen;
2756 TCGv dest, src, tmp;
2762 dest = dest_gpr(ctx, rt);
2763 src = load_gpr(ctx, rr);
2764 tmp = tcg_temp_new();
2766 /* Recall that SAR is using big-endian bit numbering. */
2767 tcg_gen_xori_tl(tmp, cpu_sar, TARGET_LONG_BITS - 1);
2769 tcg_gen_sar_tl(dest, src, tmp);
2770 tcg_gen_sextract_tl(dest, dest, 0, len);
2772 tcg_gen_shr_tl(dest, src, tmp);
2773 tcg_gen_extract_tl(dest, dest, 0, len);
2776 save_gpr(ctx, rt, dest);
2778 /* Install the new nullification. */
2779 cond_free(&ctx->null_cond);
2781 ctx->null_cond = do_sed_cond(c, dest);
2783 return nullify_end(ctx, NO_EXIT);
2786 static ExitStatus trans_extrw_imm(DisasContext *ctx, uint32_t insn,
2787 const DisasInsn *di)
2789 unsigned clen = extract32(insn, 0, 5);
2790 unsigned pos = extract32(insn, 5, 5);
2791 unsigned is_se = extract32(insn, 10, 1);
2792 unsigned c = extract32(insn, 13, 3);
2793 unsigned rt = extract32(insn, 16, 5);
2794 unsigned rr = extract32(insn, 21, 5);
2795 unsigned len = 32 - clen;
2796 unsigned cpos = 31 - pos;
2803 dest = dest_gpr(ctx, rt);
2804 src = load_gpr(ctx, rr);
2806 tcg_gen_sextract_tl(dest, src, cpos, len);
2808 tcg_gen_extract_tl(dest, src, cpos, len);
2810 save_gpr(ctx, rt, dest);
2812 /* Install the new nullification. */
2813 cond_free(&ctx->null_cond);
2815 ctx->null_cond = do_sed_cond(c, dest);
2817 return nullify_end(ctx, NO_EXIT);
2820 static const DisasInsn table_sh_ex[] = {
2821 { 0xd0000000u, 0xfc001fe0u, trans_shrpw_sar },
2822 { 0xd0000800u, 0xfc001c00u, trans_shrpw_imm },
2823 { 0xd0001000u, 0xfc001be0u, trans_extrw_sar },
2824 { 0xd0001800u, 0xfc001800u, trans_extrw_imm },
2827 static ExitStatus trans_depw_imm_c(DisasContext *ctx, uint32_t insn,
2828 const DisasInsn *di)
2830 unsigned clen = extract32(insn, 0, 5);
2831 unsigned cpos = extract32(insn, 5, 5);
2832 unsigned nz = extract32(insn, 10, 1);
2833 unsigned c = extract32(insn, 13, 3);
2834 target_long val = low_sextract(insn, 16, 5);
2835 unsigned rt = extract32(insn, 21, 5);
2836 unsigned len = 32 - clen;
2837 target_long mask0, mask1;
2843 if (cpos + len > 32) {
2847 dest = dest_gpr(ctx, rt);
2848 mask0 = deposit64(0, cpos, len, val);
2849 mask1 = deposit64(-1, cpos, len, val);
2852 TCGv src = load_gpr(ctx, rt);
2854 tcg_gen_andi_tl(dest, src, mask1);
2857 tcg_gen_ori_tl(dest, src, mask0);
2859 tcg_gen_movi_tl(dest, mask0);
2861 save_gpr(ctx, rt, dest);
2863 /* Install the new nullification. */
2864 cond_free(&ctx->null_cond);
2866 ctx->null_cond = do_sed_cond(c, dest);
2868 return nullify_end(ctx, NO_EXIT);
2871 static ExitStatus trans_depw_imm(DisasContext *ctx, uint32_t insn,
2872 const DisasInsn *di)
2874 unsigned clen = extract32(insn, 0, 5);
2875 unsigned cpos = extract32(insn, 5, 5);
2876 unsigned nz = extract32(insn, 10, 1);
2877 unsigned c = extract32(insn, 13, 3);
2878 unsigned rr = extract32(insn, 16, 5);
2879 unsigned rt = extract32(insn, 21, 5);
2880 unsigned rs = nz ? rt : 0;
2881 unsigned len = 32 - clen;
2887 if (cpos + len > 32) {
2891 dest = dest_gpr(ctx, rt);
2892 val = load_gpr(ctx, rr);
2894 tcg_gen_deposit_z_tl(dest, val, cpos, len);
2896 tcg_gen_deposit_tl(dest, cpu_gr[rs], val, cpos, len);
2898 save_gpr(ctx, rt, dest);
2900 /* Install the new nullification. */
2901 cond_free(&ctx->null_cond);
2903 ctx->null_cond = do_sed_cond(c, dest);
2905 return nullify_end(ctx, NO_EXIT);
2908 static ExitStatus trans_depw_sar(DisasContext *ctx, uint32_t insn,
2909 const DisasInsn *di)
2911 unsigned clen = extract32(insn, 0, 5);
2912 unsigned nz = extract32(insn, 10, 1);
2913 unsigned i = extract32(insn, 12, 1);
2914 unsigned c = extract32(insn, 13, 3);
2915 unsigned rt = extract32(insn, 21, 5);
2916 unsigned rs = nz ? rt : 0;
2917 unsigned len = 32 - clen;
2918 TCGv val, mask, tmp, shift, dest;
2919 unsigned msb = 1U << (len - 1);
2926 val = load_const(ctx, low_sextract(insn, 16, 5));
2928 val = load_gpr(ctx, extract32(insn, 16, 5));
2930 dest = dest_gpr(ctx, rt);
2931 shift = tcg_temp_new();
2932 tmp = tcg_temp_new();
2934 /* Convert big-endian bit numbering in SAR to left-shift. */
2935 tcg_gen_xori_tl(shift, cpu_sar, TARGET_LONG_BITS - 1);
2937 mask = tcg_const_tl(msb + (msb - 1));
2938 tcg_gen_and_tl(tmp, val, mask);
2940 tcg_gen_shl_tl(mask, mask, shift);
2941 tcg_gen_shl_tl(tmp, tmp, shift);
2942 tcg_gen_andc_tl(dest, cpu_gr[rs], mask);
2943 tcg_gen_or_tl(dest, dest, tmp);
2945 tcg_gen_shl_tl(dest, tmp, shift);
2947 tcg_temp_free(shift);
2948 tcg_temp_free(mask);
2950 save_gpr(ctx, rt, dest);
2952 /* Install the new nullification. */
2953 cond_free(&ctx->null_cond);
2955 ctx->null_cond = do_sed_cond(c, dest);
2957 return nullify_end(ctx, NO_EXIT);
2960 static const DisasInsn table_depw[] = {
2961 { 0xd4000000u, 0xfc000be0u, trans_depw_sar },
2962 { 0xd4000800u, 0xfc001800u, trans_depw_imm },
2963 { 0xd4001800u, 0xfc001800u, trans_depw_imm_c },
2966 static ExitStatus trans_be(DisasContext *ctx, uint32_t insn, bool is_l)
2968 unsigned n = extract32(insn, 1, 1);
2969 unsigned b = extract32(insn, 21, 5);
2970 target_long disp = assemble_17(insn);
2972 /* unsigned s = low_uextract(insn, 13, 3); */
2973 /* ??? It seems like there should be a good way of using
2974 "be disp(sr2, r0)", the canonical gateway entry mechanism
2975 to our advantage. But that appears to be inconvenient to
2976 manage along side branch delay slots. Therefore we handle
2977 entry into the gateway page via absolute address. */
2979 /* Since we don't implement spaces, just branch. Do notice the special
2980 case of "be disp(*,r0)" using a direct branch to disp, so that we can
2981 goto_tb to the TB containing the syscall. */
2983 return do_dbranch(ctx, disp, is_l ? 31 : 0, n);
2985 TCGv tmp = get_temp(ctx);
2986 tcg_gen_addi_tl(tmp, load_gpr(ctx, b), disp);
2987 return do_ibranch(ctx, tmp, is_l ? 31 : 0, n);
2991 static ExitStatus trans_bl(DisasContext *ctx, uint32_t insn,
2992 const DisasInsn *di)
2994 unsigned n = extract32(insn, 1, 1);
2995 unsigned link = extract32(insn, 21, 5);
2996 target_long disp = assemble_17(insn);
2998 return do_dbranch(ctx, iaoq_dest(ctx, disp), link, n);
3001 static ExitStatus trans_bl_long(DisasContext *ctx, uint32_t insn,
3002 const DisasInsn *di)
3004 unsigned n = extract32(insn, 1, 1);
3005 target_long disp = assemble_22(insn);
3007 return do_dbranch(ctx, iaoq_dest(ctx, disp), 2, n);
3010 static ExitStatus trans_blr(DisasContext *ctx, uint32_t insn,
3011 const DisasInsn *di)
3013 unsigned n = extract32(insn, 1, 1);
3014 unsigned rx = extract32(insn, 16, 5);
3015 unsigned link = extract32(insn, 21, 5);
3016 TCGv tmp = get_temp(ctx);
3018 tcg_gen_shli_tl(tmp, load_gpr(ctx, rx), 3);
3019 tcg_gen_addi_tl(tmp, tmp, ctx->iaoq_f + 8);
3020 return do_ibranch(ctx, tmp, link, n);
3023 static ExitStatus trans_bv(DisasContext *ctx, uint32_t insn,
3024 const DisasInsn *di)
3026 unsigned n = extract32(insn, 1, 1);
3027 unsigned rx = extract32(insn, 16, 5);
3028 unsigned rb = extract32(insn, 21, 5);
3032 dest = load_gpr(ctx, rb);
3034 dest = get_temp(ctx);
3035 tcg_gen_shli_tl(dest, load_gpr(ctx, rx), 3);
3036 tcg_gen_add_tl(dest, dest, load_gpr(ctx, rb));
3038 return do_ibranch(ctx, dest, 0, n);
3041 static ExitStatus trans_bve(DisasContext *ctx, uint32_t insn,
3042 const DisasInsn *di)
3044 unsigned n = extract32(insn, 1, 1);
3045 unsigned rb = extract32(insn, 21, 5);
3046 unsigned link = extract32(insn, 13, 1) ? 2 : 0;
3048 return do_ibranch(ctx, load_gpr(ctx, rb), link, n);
3051 static const DisasInsn table_branch[] = {
3052 { 0xe8000000u, 0xfc006000u, trans_bl }, /* B,L and B,L,PUSH */
3053 { 0xe800a000u, 0xfc00e000u, trans_bl_long },
3054 { 0xe8004000u, 0xfc00fffdu, trans_blr },
3055 { 0xe800c000u, 0xfc00fffdu, trans_bv },
3056 { 0xe800d000u, 0xfc00dffcu, trans_bve },
3059 static ExitStatus trans_fop_wew_0c(DisasContext *ctx, uint32_t insn,
3060 const DisasInsn *di)
3062 unsigned rt = extract32(insn, 0, 5);
3063 unsigned ra = extract32(insn, 21, 5);
3064 return do_fop_wew(ctx, rt, ra, di->f_wew);
3067 static ExitStatus trans_fop_wew_0e(DisasContext *ctx, uint32_t insn,
3068 const DisasInsn *di)
3070 unsigned rt = assemble_rt64(insn);
3071 unsigned ra = assemble_ra64(insn);
3072 return do_fop_wew(ctx, rt, ra, di->f_wew);
3075 static ExitStatus trans_fop_ded(DisasContext *ctx, uint32_t insn,
3076 const DisasInsn *di)
3078 unsigned rt = extract32(insn, 0, 5);
3079 unsigned ra = extract32(insn, 21, 5);
3080 return do_fop_ded(ctx, rt, ra, di->f_ded);
3083 static ExitStatus trans_fop_wed_0c(DisasContext *ctx, uint32_t insn,
3084 const DisasInsn *di)
3086 unsigned rt = extract32(insn, 0, 5);
3087 unsigned ra = extract32(insn, 21, 5);
3088 return do_fop_wed(ctx, rt, ra, di->f_wed);
3091 static ExitStatus trans_fop_wed_0e(DisasContext *ctx, uint32_t insn,
3092 const DisasInsn *di)
3094 unsigned rt = assemble_rt64(insn);
3095 unsigned ra = extract32(insn, 21, 5);
3096 return do_fop_wed(ctx, rt, ra, di->f_wed);
3099 static ExitStatus trans_fop_dew_0c(DisasContext *ctx, uint32_t insn,
3100 const DisasInsn *di)
3102 unsigned rt = extract32(insn, 0, 5);
3103 unsigned ra = extract32(insn, 21, 5);
3104 return do_fop_dew(ctx, rt, ra, di->f_dew);
3107 static ExitStatus trans_fop_dew_0e(DisasContext *ctx, uint32_t insn,
3108 const DisasInsn *di)
3110 unsigned rt = extract32(insn, 0, 5);
3111 unsigned ra = assemble_ra64(insn);
3112 return do_fop_dew(ctx, rt, ra, di->f_dew);
3115 static ExitStatus trans_fop_weww_0c(DisasContext *ctx, uint32_t insn,
3116 const DisasInsn *di)
3118 unsigned rt = extract32(insn, 0, 5);
3119 unsigned rb = extract32(insn, 16, 5);
3120 unsigned ra = extract32(insn, 21, 5);
3121 return do_fop_weww(ctx, rt, ra, rb, di->f_weww);
3124 static ExitStatus trans_fop_weww_0e(DisasContext *ctx, uint32_t insn,
3125 const DisasInsn *di)
3127 unsigned rt = assemble_rt64(insn);
3128 unsigned rb = assemble_rb64(insn);
3129 unsigned ra = assemble_ra64(insn);
3130 return do_fop_weww(ctx, rt, ra, rb, di->f_weww);
3133 static ExitStatus trans_fop_dedd(DisasContext *ctx, uint32_t insn,
3134 const DisasInsn *di)
3136 unsigned rt = extract32(insn, 0, 5);
3137 unsigned rb = extract32(insn, 16, 5);
3138 unsigned ra = extract32(insn, 21, 5);
3139 return do_fop_dedd(ctx, rt, ra, rb, di->f_dedd);
3142 static void gen_fcpy_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3144 tcg_gen_mov_i32(dst, src);
3147 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3149 tcg_gen_mov_i64(dst, src);
3152 static void gen_fabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3154 tcg_gen_andi_i32(dst, src, INT32_MAX);
3157 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3159 tcg_gen_andi_i64(dst, src, INT64_MAX);
3162 static void gen_fneg_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3164 tcg_gen_xori_i32(dst, src, INT32_MIN);
3167 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3169 tcg_gen_xori_i64(dst, src, INT64_MIN);
3172 static void gen_fnegabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3174 tcg_gen_ori_i32(dst, src, INT32_MIN);
3177 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3179 tcg_gen_ori_i64(dst, src, INT64_MIN);
3182 static ExitStatus do_fcmp_s(DisasContext *ctx, unsigned ra, unsigned rb,
3183 unsigned y, unsigned c)
3185 TCGv_i32 ta, tb, tc, ty;
3189 ta = load_frw0_i32(ra);
3190 tb = load_frw0_i32(rb);
3191 ty = tcg_const_i32(y);
3192 tc = tcg_const_i32(c);
3194 gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3196 tcg_temp_free_i32(ta);
3197 tcg_temp_free_i32(tb);
3198 tcg_temp_free_i32(ty);
3199 tcg_temp_free_i32(tc);
3201 return nullify_end(ctx, NO_EXIT);
3204 static ExitStatus trans_fcmp_s_0c(DisasContext *ctx, uint32_t insn,
3205 const DisasInsn *di)
3207 unsigned c = extract32(insn, 0, 5);
3208 unsigned y = extract32(insn, 13, 3);
3209 unsigned rb = extract32(insn, 16, 5);
3210 unsigned ra = extract32(insn, 21, 5);
3211 return do_fcmp_s(ctx, ra, rb, y, c);
3214 static ExitStatus trans_fcmp_s_0e(DisasContext *ctx, uint32_t insn,
3215 const DisasInsn *di)
3217 unsigned c = extract32(insn, 0, 5);
3218 unsigned y = extract32(insn, 13, 3);
3219 unsigned rb = assemble_rb64(insn);
3220 unsigned ra = assemble_ra64(insn);
3221 return do_fcmp_s(ctx, ra, rb, y, c);
3224 static ExitStatus trans_fcmp_d(DisasContext *ctx, uint32_t insn,
3225 const DisasInsn *di)
3227 unsigned c = extract32(insn, 0, 5);
3228 unsigned y = extract32(insn, 13, 3);
3229 unsigned rb = extract32(insn, 16, 5);
3230 unsigned ra = extract32(insn, 21, 5);
3238 ty = tcg_const_i32(y);
3239 tc = tcg_const_i32(c);
3241 gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3243 tcg_temp_free_i64(ta);
3244 tcg_temp_free_i64(tb);
3245 tcg_temp_free_i32(ty);
3246 tcg_temp_free_i32(tc);
3248 return nullify_end(ctx, NO_EXIT);
3251 static ExitStatus trans_ftest_t(DisasContext *ctx, uint32_t insn,
3252 const DisasInsn *di)
3254 unsigned y = extract32(insn, 13, 3);
3255 unsigned cbit = (y ^ 1) - 1;
3261 tcg_gen_ld32u_tl(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3262 tcg_gen_extract_tl(t, t, 21 - cbit, 1);
3263 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3266 return nullify_end(ctx, NO_EXIT);
3269 static ExitStatus trans_ftest_q(DisasContext *ctx, uint32_t insn,
3270 const DisasInsn *di)
3272 unsigned c = extract32(insn, 0, 5);
3280 tcg_gen_ld32u_tl(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3283 case 0: /* simple */
3284 tcg_gen_andi_tl(t, t, 0x4000000);
3285 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3309 return gen_illegal(ctx);
3312 TCGv c = load_const(ctx, mask);
3313 tcg_gen_or_tl(t, t, c);
3314 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3316 tcg_gen_andi_tl(t, t, mask);
3317 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3320 return nullify_end(ctx, NO_EXIT);
3323 static ExitStatus trans_xmpyu(DisasContext *ctx, uint32_t insn,
3324 const DisasInsn *di)
3326 unsigned rt = extract32(insn, 0, 5);
3327 unsigned rb = assemble_rb64(insn);
3328 unsigned ra = assemble_ra64(insn);
3333 a = load_frw0_i64(ra);
3334 b = load_frw0_i64(rb);
3335 tcg_gen_mul_i64(a, a, b);
3337 tcg_temp_free_i64(a);
3338 tcg_temp_free_i64(b);
3340 return nullify_end(ctx, NO_EXIT);
3343 #define FOP_DED trans_fop_ded, .f_ded
3344 #define FOP_DEDD trans_fop_dedd, .f_dedd
3346 #define FOP_WEW trans_fop_wew_0c, .f_wew
3347 #define FOP_DEW trans_fop_dew_0c, .f_dew
3348 #define FOP_WED trans_fop_wed_0c, .f_wed
3349 #define FOP_WEWW trans_fop_weww_0c, .f_weww
3351 static const DisasInsn table_float_0c[] = {
3352 /* floating point class zero */
3353 { 0x30004000, 0xfc1fffe0, FOP_WEW = gen_fcpy_s },
3354 { 0x30006000, 0xfc1fffe0, FOP_WEW = gen_fabs_s },
3355 { 0x30008000, 0xfc1fffe0, FOP_WEW = gen_helper_fsqrt_s },
3356 { 0x3000a000, 0xfc1fffe0, FOP_WEW = gen_helper_frnd_s },
3357 { 0x3000c000, 0xfc1fffe0, FOP_WEW = gen_fneg_s },
3358 { 0x3000e000, 0xfc1fffe0, FOP_WEW = gen_fnegabs_s },
3360 { 0x30004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
3361 { 0x30006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
3362 { 0x30008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
3363 { 0x3000a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
3364 { 0x3000c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
3365 { 0x3000e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
3367 /* floating point class three */
3368 { 0x30000600, 0xfc00ffe0, FOP_WEWW = gen_helper_fadd_s },
3369 { 0x30002600, 0xfc00ffe0, FOP_WEWW = gen_helper_fsub_s },
3370 { 0x30004600, 0xfc00ffe0, FOP_WEWW = gen_helper_fmpy_s },
3371 { 0x30006600, 0xfc00ffe0, FOP_WEWW = gen_helper_fdiv_s },
3373 { 0x30000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
3374 { 0x30002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
3375 { 0x30004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
3376 { 0x30006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
3378 /* floating point class one */
3380 { 0x30000a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_s },
3381 { 0x30002200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_d },
3383 { 0x30008200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_w_s },
3384 { 0x30008a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_dw_s },
3385 { 0x3000a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_w_d },
3386 { 0x3000aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
3388 { 0x30010200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_w },
3389 { 0x30010a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_w },
3390 { 0x30012200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_dw },
3391 { 0x30012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
3392 /* float/int truncate */
3393 { 0x30018200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_w },
3394 { 0x30018a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_w },
3395 { 0x3001a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_dw },
3396 { 0x3001aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
3398 { 0x30028200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_uw_s },
3399 { 0x30028a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_udw_s },
3400 { 0x3002a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_uw_d },
3401 { 0x3002aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
3403 { 0x30030200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_uw },
3404 { 0x30030a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_uw },
3405 { 0x30032200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_udw },
3406 { 0x30032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
3407 /* float/uint truncate */
3408 { 0x30038200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_uw },
3409 { 0x30038a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_uw },
3410 { 0x3003a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_udw },
3411 { 0x3003aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
3413 /* floating point class two */
3414 { 0x30000400, 0xfc001fe0, trans_fcmp_s_0c },
3415 { 0x30000c00, 0xfc001fe0, trans_fcmp_d },
3416 { 0x30002420, 0xffffffe0, trans_ftest_q },
3417 { 0x30000420, 0xffff1fff, trans_ftest_t },
3419 /* FID. Note that ra == rt == 0, which via fcpy puts 0 into fr0.
3420 This is machine/revision == 0, which is reserved for simulator. */
3421 { 0x30000000, 0xffffffff, FOP_WEW = gen_fcpy_s },
3428 #define FOP_WEW trans_fop_wew_0e, .f_wew
3429 #define FOP_DEW trans_fop_dew_0e, .f_dew
3430 #define FOP_WED trans_fop_wed_0e, .f_wed
3431 #define FOP_WEWW trans_fop_weww_0e, .f_weww
3433 static const DisasInsn table_float_0e[] = {
3434 /* floating point class zero */
3435 { 0x38004000, 0xfc1fff20, FOP_WEW = gen_fcpy_s },
3436 { 0x38006000, 0xfc1fff20, FOP_WEW = gen_fabs_s },
3437 { 0x38008000, 0xfc1fff20, FOP_WEW = gen_helper_fsqrt_s },
3438 { 0x3800a000, 0xfc1fff20, FOP_WEW = gen_helper_frnd_s },
3439 { 0x3800c000, 0xfc1fff20, FOP_WEW = gen_fneg_s },
3440 { 0x3800e000, 0xfc1fff20, FOP_WEW = gen_fnegabs_s },
3442 { 0x38004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
3443 { 0x38006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
3444 { 0x38008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
3445 { 0x3800a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
3446 { 0x3800c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
3447 { 0x3800e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
3449 /* floating point class three */
3450 { 0x38000600, 0xfc00ef20, FOP_WEWW = gen_helper_fadd_s },
3451 { 0x38002600, 0xfc00ef20, FOP_WEWW = gen_helper_fsub_s },
3452 { 0x38004600, 0xfc00ef20, FOP_WEWW = gen_helper_fmpy_s },
3453 { 0x38006600, 0xfc00ef20, FOP_WEWW = gen_helper_fdiv_s },
3455 { 0x38000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
3456 { 0x38002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
3457 { 0x38004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
3458 { 0x38006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
3460 { 0x38004700, 0xfc00ef60, trans_xmpyu },
3462 /* floating point class one */
3464 { 0x38000a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_s },
3465 { 0x38002200, 0xfc1fffc0, FOP_DEW = gen_helper_fcnv_s_d },
3467 { 0x38008200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_w_s },
3468 { 0x38008a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_dw_s },
3469 { 0x3800a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_w_d },
3470 { 0x3800aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
3472 { 0x38010200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_w },
3473 { 0x38010a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_w },
3474 { 0x38012200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_dw },
3475 { 0x38012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
3476 /* float/int truncate */
3477 { 0x38018200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_w },
3478 { 0x38018a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_w },
3479 { 0x3801a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_dw },
3480 { 0x3801aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
3482 { 0x38028200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_uw_s },
3483 { 0x38028a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_udw_s },
3484 { 0x3802a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_uw_d },
3485 { 0x3802aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
3487 { 0x38030200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_uw },
3488 { 0x38030a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_uw },
3489 { 0x38032200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_udw },
3490 { 0x38032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
3491 /* float/uint truncate */
3492 { 0x38038200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_uw },
3493 { 0x38038a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_uw },
3494 { 0x3803a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_udw },
3495 { 0x3803aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
3497 /* floating point class two */
3498 { 0x38000400, 0xfc000f60, trans_fcmp_s_0e },
3499 { 0x38000c00, 0xfc001fe0, trans_fcmp_d },
3509 /* Convert the fmpyadd single-precision register encodings to standard. */
3510 static inline int fmpyadd_s_reg(unsigned r)
3512 return (r & 16) * 2 + 16 + (r & 15);
3515 static ExitStatus trans_fmpyadd(DisasContext *ctx, uint32_t insn, bool is_sub)
3517 unsigned tm = extract32(insn, 0, 5);
3518 unsigned f = extract32(insn, 5, 1);
3519 unsigned ra = extract32(insn, 6, 5);
3520 unsigned ta = extract32(insn, 11, 5);
3521 unsigned rm2 = extract32(insn, 16, 5);
3522 unsigned rm1 = extract32(insn, 21, 5);
3526 /* Independent multiply & add/sub, with undefined behaviour
3527 if outputs overlap inputs. */
3529 tm = fmpyadd_s_reg(tm);
3530 ra = fmpyadd_s_reg(ra);
3531 ta = fmpyadd_s_reg(ta);
3532 rm2 = fmpyadd_s_reg(rm2);
3533 rm1 = fmpyadd_s_reg(rm1);
3534 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
3535 do_fop_weww(ctx, ta, ta, ra,
3536 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
3538 do_fop_dedd(ctx, tm, rm1, rm2, gen_helper_fmpy_d);
3539 do_fop_dedd(ctx, ta, ta, ra,
3540 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
3543 return nullify_end(ctx, NO_EXIT);
3546 static ExitStatus trans_fmpyfadd_s(DisasContext *ctx, uint32_t insn,
3547 const DisasInsn *di)
3549 unsigned rt = assemble_rt64(insn);
3550 unsigned neg = extract32(insn, 5, 1);
3551 unsigned rm1 = assemble_ra64(insn);
3552 unsigned rm2 = assemble_rb64(insn);
3553 unsigned ra3 = assemble_rc64(insn);
3557 a = load_frw0_i32(rm1);
3558 b = load_frw0_i32(rm2);
3559 c = load_frw0_i32(ra3);
3562 gen_helper_fmpynfadd_s(a, cpu_env, a, b, c);
3564 gen_helper_fmpyfadd_s(a, cpu_env, a, b, c);
3567 tcg_temp_free_i32(b);
3568 tcg_temp_free_i32(c);
3569 save_frw_i32(rt, a);
3570 tcg_temp_free_i32(a);
3571 return nullify_end(ctx, NO_EXIT);
3574 static ExitStatus trans_fmpyfadd_d(DisasContext *ctx, uint32_t insn,
3575 const DisasInsn *di)
3577 unsigned rt = extract32(insn, 0, 5);
3578 unsigned neg = extract32(insn, 5, 1);
3579 unsigned rm1 = extract32(insn, 21, 5);
3580 unsigned rm2 = extract32(insn, 16, 5);
3581 unsigned ra3 = assemble_rc64(insn);
3590 gen_helper_fmpynfadd_d(a, cpu_env, a, b, c);
3592 gen_helper_fmpyfadd_d(a, cpu_env, a, b, c);
3595 tcg_temp_free_i64(b);
3596 tcg_temp_free_i64(c);
3598 tcg_temp_free_i64(a);
3599 return nullify_end(ctx, NO_EXIT);
3602 static const DisasInsn table_fp_fused[] = {
3603 { 0xb8000000u, 0xfc000800u, trans_fmpyfadd_s },
3604 { 0xb8000800u, 0xfc0019c0u, trans_fmpyfadd_d }
3607 static ExitStatus translate_table_int(DisasContext *ctx, uint32_t insn,
3608 const DisasInsn table[], size_t n)
3611 for (i = 0; i < n; ++i) {
3612 if ((insn & table[i].mask) == table[i].insn) {
3613 return table[i].trans(ctx, insn, &table[i]);
3616 return gen_illegal(ctx);
3619 #define translate_table(ctx, insn, table) \
3620 translate_table_int(ctx, insn, table, ARRAY_SIZE(table))
3622 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
3624 uint32_t opc = extract32(insn, 26, 6);
3627 case 0x00: /* system op */
3628 return translate_table(ctx, insn, table_system);
3630 return translate_table(ctx, insn, table_mem_mgmt);
3632 return translate_table(ctx, insn, table_arith_log);
3634 return translate_table(ctx, insn, table_index_mem);
3636 return trans_fmpyadd(ctx, insn, false);
3638 return trans_ldil(ctx, insn);
3640 return trans_copr_w(ctx, insn);
3642 return trans_addil(ctx, insn);
3644 return trans_copr_dw(ctx, insn);
3646 return translate_table(ctx, insn, table_float_0c);
3648 return trans_ldo(ctx, insn);
3650 return translate_table(ctx, insn, table_float_0e);
3653 return trans_load(ctx, insn, false, MO_UB);
3655 return trans_load(ctx, insn, false, MO_TEUW);
3657 return trans_load(ctx, insn, false, MO_TEUL);
3659 return trans_load(ctx, insn, true, MO_TEUL);
3661 return trans_fload_mod(ctx, insn);
3663 return trans_load_w(ctx, insn);
3665 return trans_store(ctx, insn, false, MO_UB);
3667 return trans_store(ctx, insn, false, MO_TEUW);
3669 return trans_store(ctx, insn, false, MO_TEUL);
3671 return trans_store(ctx, insn, true, MO_TEUL);
3673 return trans_fstore_mod(ctx, insn);
3675 return trans_store_w(ctx, insn);
3678 return trans_cmpb(ctx, insn, true, false, false);
3680 return trans_cmpb(ctx, insn, true, true, false);
3682 return trans_cmpb(ctx, insn, false, false, false);
3684 return trans_cmpb(ctx, insn, false, true, false);
3686 return trans_cmpiclr(ctx, insn);
3688 return trans_subi(ctx, insn);
3690 return trans_fmpyadd(ctx, insn, true);
3692 return trans_cmpb(ctx, insn, true, false, true);
3694 return trans_addb(ctx, insn, true, false);
3696 return trans_addb(ctx, insn, true, true);
3698 return trans_addb(ctx, insn, false, false);
3700 return trans_addb(ctx, insn, false, true);
3703 return trans_addi(ctx, insn);
3705 return translate_table(ctx, insn, table_fp_fused);
3707 return trans_cmpb(ctx, insn, false, false, true);
3711 return trans_bb(ctx, insn);
3713 return trans_movb(ctx, insn, false);
3715 return trans_movb(ctx, insn, true);
3717 return translate_table(ctx, insn, table_sh_ex);
3719 return translate_table(ctx, insn, table_depw);
3721 return trans_be(ctx, insn, false);
3723 return trans_be(ctx, insn, true);
3725 return translate_table(ctx, insn, table_branch);
3727 case 0x04: /* spopn */
3728 case 0x05: /* diag */
3729 case 0x0F: /* product specific */
3732 case 0x07: /* unassigned */
3733 case 0x15: /* unassigned */
3734 case 0x1D: /* unassigned */
3735 case 0x37: /* unassigned */
3736 case 0x3F: /* unassigned */
3740 return gen_illegal(ctx);
3743 void gen_intermediate_code(CPUHPPAState *env, struct TranslationBlock *tb)
3745 HPPACPU *cpu = hppa_env_get_cpu(env);
3746 CPUState *cs = CPU(cpu);
3749 int num_insns, max_insns, i;
3753 ctx.iaoq_f = tb->pc;
3754 ctx.iaoq_b = tb->cs_base;
3755 ctx.singlestep_enabled = cs->singlestep_enabled;
3758 for (i = 0; i < ARRAY_SIZE(ctx.temps); ++i) {
3759 TCGV_UNUSED(ctx.temps[i]);
3762 /* Compute the maximum number of insns to execute, as bounded by
3763 (1) icount, (2) single-stepping, (3) branch delay slots, or
3764 (4) the number of insns remaining on the current page. */
3765 max_insns = tb->cflags & CF_COUNT_MASK;
3766 if (max_insns == 0) {
3767 max_insns = CF_COUNT_MASK;
3769 if (ctx.singlestep_enabled || singlestep) {
3771 } else if (max_insns > TCG_MAX_INSNS) {
3772 max_insns = TCG_MAX_INSNS;
3778 /* Seed the nullification status from PSW[N], as shown in TB->FLAGS. */
3779 ctx.null_cond = cond_make_f();
3780 ctx.psw_n_nonzero = false;
3781 if (tb->flags & 1) {
3782 ctx.null_cond.c = TCG_COND_ALWAYS;
3783 ctx.psw_n_nonzero = true;
3785 ctx.null_lab = NULL;
3788 tcg_gen_insn_start(ctx.iaoq_f, ctx.iaoq_b);
3791 if (unlikely(cpu_breakpoint_test(cs, ctx.iaoq_f, BP_ANY))) {
3792 ret = gen_excp(&ctx, EXCP_DEBUG);
3795 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
3799 if (ctx.iaoq_f < TARGET_PAGE_SIZE) {
3800 ret = do_page_zero(&ctx);
3801 assert(ret != NO_EXIT);
3803 /* Always fetch the insn, even if nullified, so that we check
3804 the page permissions for execute. */
3805 uint32_t insn = cpu_ldl_code(env, ctx.iaoq_f);
3807 /* Set up the IA queue for the next insn.
3808 This will be overwritten by a branch. */
3809 if (ctx.iaoq_b == -1) {
3811 ctx.iaoq_n_var = get_temp(&ctx);
3812 tcg_gen_addi_tl(ctx.iaoq_n_var, cpu_iaoq_b, 4);
3814 ctx.iaoq_n = ctx.iaoq_b + 4;
3815 TCGV_UNUSED(ctx.iaoq_n_var);
3818 if (unlikely(ctx.null_cond.c == TCG_COND_ALWAYS)) {
3819 ctx.null_cond.c = TCG_COND_NEVER;
3822 ret = translate_one(&ctx, insn);
3823 assert(ctx.null_lab == NULL);
3827 for (i = 0; i < ctx.ntemps; ++i) {
3828 tcg_temp_free(ctx.temps[i]);
3829 TCGV_UNUSED(ctx.temps[i]);
3833 /* If we see non-linear instructions, exhaust instruction count,
3834 or run out of buffer space, stop generation. */
3835 /* ??? The non-linear instruction restriction is purely due to
3836 the debugging dump. Otherwise we *could* follow unconditional
3837 branches within the same page. */
3839 && (ctx.iaoq_b != ctx.iaoq_f + 4
3840 || num_insns >= max_insns
3841 || tcg_op_buf_full())) {
3842 if (ctx.null_cond.c == TCG_COND_NEVER
3843 || ctx.null_cond.c == TCG_COND_ALWAYS) {
3844 nullify_set(&ctx, ctx.null_cond.c == TCG_COND_ALWAYS);
3845 gen_goto_tb(&ctx, 0, ctx.iaoq_b, ctx.iaoq_n);
3848 ret = EXIT_IAQ_N_STALE;
3852 ctx.iaoq_f = ctx.iaoq_b;
3853 ctx.iaoq_b = ctx.iaoq_n;
3854 if (ret == EXIT_NORETURN
3855 || ret == EXIT_GOTO_TB
3856 || ret == EXIT_IAQ_N_UPDATED) {
3859 if (ctx.iaoq_f == -1) {
3860 tcg_gen_mov_tl(cpu_iaoq_f, cpu_iaoq_b);
3861 copy_iaoq_entry(cpu_iaoq_b, ctx.iaoq_n, ctx.iaoq_n_var);
3863 ret = EXIT_IAQ_N_UPDATED;
3866 if (ctx.iaoq_b == -1) {
3867 tcg_gen_mov_tl(cpu_iaoq_b, ctx.iaoq_n_var);
3869 } while (ret == NO_EXIT);
3871 if (tb->cflags & CF_LAST_IO) {
3879 case EXIT_IAQ_N_STALE:
3880 copy_iaoq_entry(cpu_iaoq_f, ctx.iaoq_f, cpu_iaoq_f);
3881 copy_iaoq_entry(cpu_iaoq_b, ctx.iaoq_b, cpu_iaoq_b);
3884 case EXIT_IAQ_N_UPDATED:
3885 if (ctx.singlestep_enabled) {
3886 gen_excp_1(EXCP_DEBUG);
3895 gen_tb_end(tb, num_insns);
3897 tb->size = num_insns * 4;
3898 tb->icount = num_insns;
3901 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
3902 && qemu_log_in_addr_range(tb->pc)) {
3906 qemu_log("IN:\n0x00000000: (null)\n\n");
3909 qemu_log("IN:\n0x000000b0: light-weight-syscall\n\n");
3912 qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n\n");
3915 qemu_log("IN:\n0x00000100: syscall\n\n");
3918 qemu_log("IN: %s\n", lookup_symbol(tb->pc));
3919 log_target_disas(cs, tb->pc, tb->size, 1);
3928 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
3931 env->iaoq_f = data[0];
3932 if (data[1] != -1) {
3933 env->iaoq_b = data[1];
3935 /* Since we were executing the instruction at IAOQ_F, and took some
3936 sort of action that provoked the cpu_restore_state, we can infer
3937 that the instruction was not nullified. */