2 * HPPA emulation cpu translation for qemu.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "trace-tcg.h"
33 typedef struct DisasCond {
40 typedef struct DisasContext {
41 DisasContextBase base;
58 /* Target-specific return values from translate_one, indicating the
59 state of the TB. Note that DISAS_NEXT indicates that we are not
62 /* We are not using a goto_tb (for whatever reason), but have updated
63 the iaq (for whatever reason), so don't do it again on exit. */
64 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
66 /* We are exiting the TB, but have neither emitted a goto_tb, nor
67 updated the iaq for the next instruction to be executed. */
68 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
70 typedef struct DisasInsn {
72 DisasJumpType (*trans)(DisasContext *ctx, uint32_t insn,
73 const struct DisasInsn *f);
75 void (*ttt)(TCGv, TCGv, TCGv);
76 void (*weww)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32);
77 void (*dedd)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64);
78 void (*wew)(TCGv_i32, TCGv_env, TCGv_i32);
79 void (*ded)(TCGv_i64, TCGv_env, TCGv_i64);
80 void (*wed)(TCGv_i32, TCGv_env, TCGv_i64);
81 void (*dew)(TCGv_i64, TCGv_env, TCGv_i32);
85 /* global register indexes */
86 static TCGv_env cpu_env;
87 static TCGv cpu_gr[32];
88 static TCGv cpu_iaoq_f;
89 static TCGv cpu_iaoq_b;
91 static TCGv cpu_psw_n;
92 static TCGv cpu_psw_v;
93 static TCGv cpu_psw_cb;
94 static TCGv cpu_psw_cb_msb;
98 #include "exec/gen-icount.h"
100 void hppa_translate_init(void)
102 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
104 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
105 static const GlobalVar vars[] = {
119 /* Use the symbolic register names that match the disassembler. */
120 static const char gr_names[32][4] = {
121 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
122 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
123 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
124 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
129 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
130 tcg_ctx.tcg_env = cpu_env;
132 TCGV_UNUSED(cpu_gr[0]);
133 for (i = 1; i < 32; i++) {
134 cpu_gr[i] = tcg_global_mem_new(cpu_env,
135 offsetof(CPUHPPAState, gr[i]),
139 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
140 const GlobalVar *v = &vars[i];
141 *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
145 static DisasCond cond_make_f(void)
147 DisasCond r = { .c = TCG_COND_NEVER };
153 static DisasCond cond_make_n(void)
155 DisasCond r = { .c = TCG_COND_NE, .a0_is_n = true, .a1_is_0 = true };
161 static DisasCond cond_make_0(TCGCond c, TCGv a0)
163 DisasCond r = { .c = c, .a1_is_0 = true };
165 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
166 r.a0 = tcg_temp_new();
167 tcg_gen_mov_tl(r.a0, a0);
173 static DisasCond cond_make(TCGCond c, TCGv a0, TCGv a1)
175 DisasCond r = { .c = c };
177 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
178 r.a0 = tcg_temp_new();
179 tcg_gen_mov_tl(r.a0, a0);
180 r.a1 = tcg_temp_new();
181 tcg_gen_mov_tl(r.a1, a1);
186 static void cond_prep(DisasCond *cond)
189 cond->a1_is_0 = false;
190 cond->a1 = tcg_const_tl(0);
194 static void cond_free(DisasCond *cond)
198 if (!cond->a0_is_n) {
199 tcg_temp_free(cond->a0);
201 if (!cond->a1_is_0) {
202 tcg_temp_free(cond->a1);
204 cond->a0_is_n = false;
205 cond->a1_is_0 = false;
206 TCGV_UNUSED(cond->a0);
207 TCGV_UNUSED(cond->a1);
209 case TCG_COND_ALWAYS:
210 cond->c = TCG_COND_NEVER;
217 static TCGv get_temp(DisasContext *ctx)
219 unsigned i = ctx->ntemps++;
220 g_assert(i < ARRAY_SIZE(ctx->temps));
221 return ctx->temps[i] = tcg_temp_new();
224 static TCGv load_const(DisasContext *ctx, target_long v)
226 TCGv t = get_temp(ctx);
227 tcg_gen_movi_tl(t, v);
231 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
234 TCGv t = get_temp(ctx);
235 tcg_gen_movi_tl(t, 0);
242 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
244 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
245 return get_temp(ctx);
251 static void save_or_nullify(DisasContext *ctx, TCGv dest, TCGv t)
253 if (ctx->null_cond.c != TCG_COND_NEVER) {
254 cond_prep(&ctx->null_cond);
255 tcg_gen_movcond_tl(ctx->null_cond.c, dest, ctx->null_cond.a0,
256 ctx->null_cond.a1, dest, t);
258 tcg_gen_mov_tl(dest, t);
262 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv t)
265 save_or_nullify(ctx, cpu_gr[reg], t);
269 #ifdef HOST_WORDS_BIGENDIAN
277 static TCGv_i32 load_frw_i32(unsigned rt)
279 TCGv_i32 ret = tcg_temp_new_i32();
280 tcg_gen_ld_i32(ret, cpu_env,
281 offsetof(CPUHPPAState, fr[rt & 31])
282 + (rt & 32 ? LO_OFS : HI_OFS));
286 static TCGv_i32 load_frw0_i32(unsigned rt)
289 return tcg_const_i32(0);
291 return load_frw_i32(rt);
295 static TCGv_i64 load_frw0_i64(unsigned rt)
298 return tcg_const_i64(0);
300 TCGv_i64 ret = tcg_temp_new_i64();
301 tcg_gen_ld32u_i64(ret, cpu_env,
302 offsetof(CPUHPPAState, fr[rt & 31])
303 + (rt & 32 ? LO_OFS : HI_OFS));
308 static void save_frw_i32(unsigned rt, TCGv_i32 val)
310 tcg_gen_st_i32(val, cpu_env,
311 offsetof(CPUHPPAState, fr[rt & 31])
312 + (rt & 32 ? LO_OFS : HI_OFS));
318 static TCGv_i64 load_frd(unsigned rt)
320 TCGv_i64 ret = tcg_temp_new_i64();
321 tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
325 static TCGv_i64 load_frd0(unsigned rt)
328 return tcg_const_i64(0);
334 static void save_frd(unsigned rt, TCGv_i64 val)
336 tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
339 /* Skip over the implementation of an insn that has been nullified.
340 Use this when the insn is too complex for a conditional move. */
341 static void nullify_over(DisasContext *ctx)
343 if (ctx->null_cond.c != TCG_COND_NEVER) {
344 /* The always condition should have been handled in the main loop. */
345 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
347 ctx->null_lab = gen_new_label();
348 cond_prep(&ctx->null_cond);
350 /* If we're using PSW[N], copy it to a temp because... */
351 if (ctx->null_cond.a0_is_n) {
352 ctx->null_cond.a0_is_n = false;
353 ctx->null_cond.a0 = tcg_temp_new();
354 tcg_gen_mov_tl(ctx->null_cond.a0, cpu_psw_n);
356 /* ... we clear it before branching over the implementation,
357 so that (1) it's clear after nullifying this insn and
358 (2) if this insn nullifies the next, PSW[N] is valid. */
359 if (ctx->psw_n_nonzero) {
360 ctx->psw_n_nonzero = false;
361 tcg_gen_movi_tl(cpu_psw_n, 0);
364 tcg_gen_brcond_tl(ctx->null_cond.c, ctx->null_cond.a0,
365 ctx->null_cond.a1, ctx->null_lab);
366 cond_free(&ctx->null_cond);
370 /* Save the current nullification state to PSW[N]. */
371 static void nullify_save(DisasContext *ctx)
373 if (ctx->null_cond.c == TCG_COND_NEVER) {
374 if (ctx->psw_n_nonzero) {
375 tcg_gen_movi_tl(cpu_psw_n, 0);
379 if (!ctx->null_cond.a0_is_n) {
380 cond_prep(&ctx->null_cond);
381 tcg_gen_setcond_tl(ctx->null_cond.c, cpu_psw_n,
382 ctx->null_cond.a0, ctx->null_cond.a1);
383 ctx->psw_n_nonzero = true;
385 cond_free(&ctx->null_cond);
388 /* Set a PSW[N] to X. The intention is that this is used immediately
389 before a goto_tb/exit_tb, so that there is no fallthru path to other
390 code within the TB. Therefore we do not update psw_n_nonzero. */
391 static void nullify_set(DisasContext *ctx, bool x)
393 if (ctx->psw_n_nonzero || x) {
394 tcg_gen_movi_tl(cpu_psw_n, x);
398 /* Mark the end of an instruction that may have been nullified.
399 This is the pair to nullify_over. */
400 static DisasJumpType nullify_end(DisasContext *ctx, DisasJumpType status)
402 TCGLabel *null_lab = ctx->null_lab;
404 if (likely(null_lab == NULL)) {
405 /* The current insn wasn't conditional or handled the condition
406 applied to it without a branch, so the (new) setting of
407 NULL_COND can be applied directly to the next insn. */
410 ctx->null_lab = NULL;
412 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
413 /* The next instruction will be unconditional,
414 and NULL_COND already reflects that. */
415 gen_set_label(null_lab);
417 /* The insn that we just executed is itself nullifying the next
418 instruction. Store the condition in the PSW[N] global.
419 We asserted PSW[N] = 0 in nullify_over, so that after the
420 label we have the proper value in place. */
422 gen_set_label(null_lab);
423 ctx->null_cond = cond_make_n();
426 assert(status != DISAS_NORETURN && status != DISAS_IAQ_N_UPDATED);
427 if (status == DISAS_NORETURN) {
433 static void copy_iaoq_entry(TCGv dest, target_ulong ival, TCGv vval)
435 if (unlikely(ival == -1)) {
436 tcg_gen_mov_tl(dest, vval);
438 tcg_gen_movi_tl(dest, ival);
442 static inline target_ulong iaoq_dest(DisasContext *ctx, target_long disp)
444 return ctx->iaoq_f + disp + 8;
447 static void gen_excp_1(int exception)
449 TCGv_i32 t = tcg_const_i32(exception);
450 gen_helper_excp(cpu_env, t);
451 tcg_temp_free_i32(t);
454 static DisasJumpType gen_excp(DisasContext *ctx, int exception)
456 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
457 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
459 gen_excp_1(exception);
460 return DISAS_NORETURN;
463 static DisasJumpType gen_illegal(DisasContext *ctx)
466 return nullify_end(ctx, gen_excp(ctx, EXCP_SIGILL));
469 static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
471 /* Suppress goto_tb in the case of single-steping and IO. */
472 if ((tb_cflags(ctx->base.tb) & CF_LAST_IO) || ctx->base.singlestep_enabled) {
478 /* If the next insn is to be nullified, and it's on the same page,
479 and we're not attempting to set a breakpoint on it, then we can
480 totally skip the nullified insn. This avoids creating and
481 executing a TB that merely branches to the next TB. */
482 static bool use_nullify_skip(DisasContext *ctx)
484 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
485 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
488 static void gen_goto_tb(DisasContext *ctx, int which,
489 target_ulong f, target_ulong b)
491 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
492 tcg_gen_goto_tb(which);
493 tcg_gen_movi_tl(cpu_iaoq_f, f);
494 tcg_gen_movi_tl(cpu_iaoq_b, b);
495 tcg_gen_exit_tb((uintptr_t)ctx->base.tb + which);
497 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
498 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
499 if (ctx->base.singlestep_enabled) {
500 gen_excp_1(EXCP_DEBUG);
502 tcg_gen_lookup_and_goto_ptr();
507 /* PA has a habit of taking the LSB of a field and using that as the sign,
508 with the rest of the field becoming the least significant bits. */
509 static target_long low_sextract(uint32_t val, int pos, int len)
511 target_ulong x = -(target_ulong)extract32(val, pos, 1);
512 x = (x << (len - 1)) | extract32(val, pos + 1, len - 1);
516 static unsigned assemble_rt64(uint32_t insn)
518 unsigned r1 = extract32(insn, 6, 1);
519 unsigned r0 = extract32(insn, 0, 5);
523 static unsigned assemble_ra64(uint32_t insn)
525 unsigned r1 = extract32(insn, 7, 1);
526 unsigned r0 = extract32(insn, 21, 5);
530 static unsigned assemble_rb64(uint32_t insn)
532 unsigned r1 = extract32(insn, 12, 1);
533 unsigned r0 = extract32(insn, 16, 5);
537 static unsigned assemble_rc64(uint32_t insn)
539 unsigned r2 = extract32(insn, 8, 1);
540 unsigned r1 = extract32(insn, 13, 3);
541 unsigned r0 = extract32(insn, 9, 2);
542 return r2 * 32 + r1 * 4 + r0;
545 static target_long assemble_12(uint32_t insn)
547 target_ulong x = -(target_ulong)(insn & 1);
548 x = (x << 1) | extract32(insn, 2, 1);
549 x = (x << 10) | extract32(insn, 3, 10);
553 static target_long assemble_16(uint32_t insn)
555 /* Take the name from PA2.0, which produces a 16-bit number
556 only with wide mode; otherwise a 14-bit number. Since we don't
557 implement wide mode, this is always the 14-bit number. */
558 return low_sextract(insn, 0, 14);
561 static target_long assemble_16a(uint32_t insn)
563 /* Take the name from PA2.0, which produces a 14-bit shifted number
564 only with wide mode; otherwise a 12-bit shifted number. Since we
565 don't implement wide mode, this is always the 12-bit number. */
566 target_ulong x = -(target_ulong)(insn & 1);
567 x = (x << 11) | extract32(insn, 2, 11);
571 static target_long assemble_17(uint32_t insn)
573 target_ulong x = -(target_ulong)(insn & 1);
574 x = (x << 5) | extract32(insn, 16, 5);
575 x = (x << 1) | extract32(insn, 2, 1);
576 x = (x << 10) | extract32(insn, 3, 10);
580 static target_long assemble_21(uint32_t insn)
582 target_ulong x = -(target_ulong)(insn & 1);
583 x = (x << 11) | extract32(insn, 1, 11);
584 x = (x << 2) | extract32(insn, 14, 2);
585 x = (x << 5) | extract32(insn, 16, 5);
586 x = (x << 2) | extract32(insn, 12, 2);
590 static target_long assemble_22(uint32_t insn)
592 target_ulong x = -(target_ulong)(insn & 1);
593 x = (x << 10) | extract32(insn, 16, 10);
594 x = (x << 1) | extract32(insn, 2, 1);
595 x = (x << 10) | extract32(insn, 3, 10);
599 /* The parisc documentation describes only the general interpretation of
600 the conditions, without describing their exact implementation. The
601 interpretations do not stand up well when considering ADD,C and SUB,B.
602 However, considering the Addition, Subtraction and Logical conditions
603 as a whole it would appear that these relations are similar to what
604 a traditional NZCV set of flags would produce. */
606 static DisasCond do_cond(unsigned cf, TCGv res, TCGv cb_msb, TCGv sv)
612 case 0: /* Never / TR */
613 cond = cond_make_f();
615 case 1: /* = / <> (Z / !Z) */
616 cond = cond_make_0(TCG_COND_EQ, res);
618 case 2: /* < / >= (N / !N) */
619 cond = cond_make_0(TCG_COND_LT, res);
621 case 3: /* <= / > (N | Z / !N & !Z) */
622 cond = cond_make_0(TCG_COND_LE, res);
624 case 4: /* NUV / UV (!C / C) */
625 cond = cond_make_0(TCG_COND_EQ, cb_msb);
627 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
628 tmp = tcg_temp_new();
629 tcg_gen_neg_tl(tmp, cb_msb);
630 tcg_gen_and_tl(tmp, tmp, res);
631 cond = cond_make_0(TCG_COND_EQ, tmp);
634 case 6: /* SV / NSV (V / !V) */
635 cond = cond_make_0(TCG_COND_LT, sv);
637 case 7: /* OD / EV */
638 tmp = tcg_temp_new();
639 tcg_gen_andi_tl(tmp, res, 1);
640 cond = cond_make_0(TCG_COND_NE, tmp);
644 g_assert_not_reached();
647 cond.c = tcg_invert_cond(cond.c);
653 /* Similar, but for the special case of subtraction without borrow, we
654 can use the inputs directly. This can allow other computation to be
655 deleted as unused. */
657 static DisasCond do_sub_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2, TCGv sv)
663 cond = cond_make(TCG_COND_EQ, in1, in2);
666 cond = cond_make(TCG_COND_LT, in1, in2);
669 cond = cond_make(TCG_COND_LE, in1, in2);
671 case 4: /* << / >>= */
672 cond = cond_make(TCG_COND_LTU, in1, in2);
674 case 5: /* <<= / >> */
675 cond = cond_make(TCG_COND_LEU, in1, in2);
678 return do_cond(cf, res, sv, sv);
681 cond.c = tcg_invert_cond(cond.c);
687 /* Similar, but for logicals, where the carry and overflow bits are not
688 computed, and use of them is undefined. */
690 static DisasCond do_log_cond(unsigned cf, TCGv res)
693 case 4: case 5: case 6:
697 return do_cond(cf, res, res, res);
700 /* Similar, but for shift/extract/deposit conditions. */
702 static DisasCond do_sed_cond(unsigned orig, TCGv res)
706 /* Convert the compressed condition codes to standard.
707 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
708 4-7 are the reverse of 0-3. */
715 return do_log_cond(c * 2 + f, res);
718 /* Similar, but for unit conditions. */
720 static DisasCond do_unit_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2)
727 /* Since we want to test lots of carry-out bits all at once, do not
728 * do our normal thing and compute carry-in of bit B+1 since that
729 * leaves us with carry bits spread across two words.
732 tmp = tcg_temp_new();
733 tcg_gen_or_tl(cb, in1, in2);
734 tcg_gen_and_tl(tmp, in1, in2);
735 tcg_gen_andc_tl(cb, cb, res);
736 tcg_gen_or_tl(cb, cb, tmp);
741 case 0: /* never / TR */
742 case 1: /* undefined */
743 case 5: /* undefined */
744 cond = cond_make_f();
747 case 2: /* SBZ / NBZ */
748 /* See hasless(v,1) from
749 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
751 tmp = tcg_temp_new();
752 tcg_gen_subi_tl(tmp, res, 0x01010101u);
753 tcg_gen_andc_tl(tmp, tmp, res);
754 tcg_gen_andi_tl(tmp, tmp, 0x80808080u);
755 cond = cond_make_0(TCG_COND_NE, tmp);
759 case 3: /* SHZ / NHZ */
760 tmp = tcg_temp_new();
761 tcg_gen_subi_tl(tmp, res, 0x00010001u);
762 tcg_gen_andc_tl(tmp, tmp, res);
763 tcg_gen_andi_tl(tmp, tmp, 0x80008000u);
764 cond = cond_make_0(TCG_COND_NE, tmp);
768 case 4: /* SDC / NDC */
769 tcg_gen_andi_tl(cb, cb, 0x88888888u);
770 cond = cond_make_0(TCG_COND_NE, cb);
773 case 6: /* SBC / NBC */
774 tcg_gen_andi_tl(cb, cb, 0x80808080u);
775 cond = cond_make_0(TCG_COND_NE, cb);
778 case 7: /* SHC / NHC */
779 tcg_gen_andi_tl(cb, cb, 0x80008000u);
780 cond = cond_make_0(TCG_COND_NE, cb);
784 g_assert_not_reached();
790 cond.c = tcg_invert_cond(cond.c);
796 /* Compute signed overflow for addition. */
797 static TCGv do_add_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
799 TCGv sv = get_temp(ctx);
800 TCGv tmp = tcg_temp_new();
802 tcg_gen_xor_tl(sv, res, in1);
803 tcg_gen_xor_tl(tmp, in1, in2);
804 tcg_gen_andc_tl(sv, sv, tmp);
810 /* Compute signed overflow for subtraction. */
811 static TCGv do_sub_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
813 TCGv sv = get_temp(ctx);
814 TCGv tmp = tcg_temp_new();
816 tcg_gen_xor_tl(sv, res, in1);
817 tcg_gen_xor_tl(tmp, in1, in2);
818 tcg_gen_and_tl(sv, sv, tmp);
824 static DisasJumpType do_add(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
825 unsigned shift, bool is_l, bool is_tsv, bool is_tc,
826 bool is_c, unsigned cf)
828 TCGv dest, cb, cb_msb, sv, tmp;
829 unsigned c = cf >> 1;
832 dest = tcg_temp_new();
838 tcg_gen_shli_tl(tmp, in1, shift);
842 if (!is_l || c == 4 || c == 5) {
843 TCGv zero = tcg_const_tl(0);
844 cb_msb = get_temp(ctx);
845 tcg_gen_add2_tl(dest, cb_msb, in1, zero, in2, zero);
847 tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
852 tcg_gen_xor_tl(cb, in1, in2);
853 tcg_gen_xor_tl(cb, cb, dest);
856 tcg_gen_add_tl(dest, in1, in2);
858 tcg_gen_add_tl(dest, dest, cpu_psw_cb_msb);
862 /* Compute signed overflow if required. */
864 if (is_tsv || c == 6) {
865 sv = do_add_sv(ctx, dest, in1, in2);
867 /* ??? Need to include overflow from shift. */
868 gen_helper_tsv(cpu_env, sv);
872 /* Emit any conditional trap before any writeback. */
873 cond = do_cond(cf, dest, cb_msb, sv);
876 tmp = tcg_temp_new();
877 tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
878 gen_helper_tcond(cpu_env, tmp);
882 /* Write back the result. */
884 save_or_nullify(ctx, cpu_psw_cb, cb);
885 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
887 save_gpr(ctx, rt, dest);
890 /* Install the new nullification. */
891 cond_free(&ctx->null_cond);
892 ctx->null_cond = cond;
896 static DisasJumpType do_sub(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
897 bool is_tsv, bool is_b, bool is_tc, unsigned cf)
899 TCGv dest, sv, cb, cb_msb, zero, tmp;
900 unsigned c = cf >> 1;
903 dest = tcg_temp_new();
905 cb_msb = tcg_temp_new();
907 zero = tcg_const_tl(0);
909 /* DEST,C = IN1 + ~IN2 + C. */
910 tcg_gen_not_tl(cb, in2);
911 tcg_gen_add2_tl(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
912 tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cb, zero);
913 tcg_gen_xor_tl(cb, cb, in1);
914 tcg_gen_xor_tl(cb, cb, dest);
916 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
917 operations by seeding the high word with 1 and subtracting. */
918 tcg_gen_movi_tl(cb_msb, 1);
919 tcg_gen_sub2_tl(dest, cb_msb, in1, cb_msb, in2, zero);
920 tcg_gen_eqv_tl(cb, in1, in2);
921 tcg_gen_xor_tl(cb, cb, dest);
925 /* Compute signed overflow if required. */
927 if (is_tsv || c == 6) {
928 sv = do_sub_sv(ctx, dest, in1, in2);
930 gen_helper_tsv(cpu_env, sv);
934 /* Compute the condition. We cannot use the special case for borrow. */
936 cond = do_sub_cond(cf, dest, in1, in2, sv);
938 cond = do_cond(cf, dest, cb_msb, sv);
941 /* Emit any conditional trap before any writeback. */
944 tmp = tcg_temp_new();
945 tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
946 gen_helper_tcond(cpu_env, tmp);
950 /* Write back the result. */
951 save_or_nullify(ctx, cpu_psw_cb, cb);
952 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
953 save_gpr(ctx, rt, dest);
956 /* Install the new nullification. */
957 cond_free(&ctx->null_cond);
958 ctx->null_cond = cond;
962 static DisasJumpType do_cmpclr(DisasContext *ctx, unsigned rt, TCGv in1,
963 TCGv in2, unsigned cf)
968 dest = tcg_temp_new();
969 tcg_gen_sub_tl(dest, in1, in2);
971 /* Compute signed overflow if required. */
973 if ((cf >> 1) == 6) {
974 sv = do_sub_sv(ctx, dest, in1, in2);
977 /* Form the condition for the compare. */
978 cond = do_sub_cond(cf, dest, in1, in2, sv);
981 tcg_gen_movi_tl(dest, 0);
982 save_gpr(ctx, rt, dest);
985 /* Install the new nullification. */
986 cond_free(&ctx->null_cond);
987 ctx->null_cond = cond;
991 static DisasJumpType do_log(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
992 unsigned cf, void (*fn)(TCGv, TCGv, TCGv))
994 TCGv dest = dest_gpr(ctx, rt);
996 /* Perform the operation, and writeback. */
998 save_gpr(ctx, rt, dest);
1000 /* Install the new nullification. */
1001 cond_free(&ctx->null_cond);
1003 ctx->null_cond = do_log_cond(cf, dest);
1008 static DisasJumpType do_unit(DisasContext *ctx, unsigned rt, TCGv in1,
1009 TCGv in2, unsigned cf, bool is_tc,
1010 void (*fn)(TCGv, TCGv, TCGv))
1016 dest = dest_gpr(ctx, rt);
1018 save_gpr(ctx, rt, dest);
1019 cond_free(&ctx->null_cond);
1021 dest = tcg_temp_new();
1024 cond = do_unit_cond(cf, dest, in1, in2);
1027 TCGv tmp = tcg_temp_new();
1029 tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
1030 gen_helper_tcond(cpu_env, tmp);
1033 save_gpr(ctx, rt, dest);
1035 cond_free(&ctx->null_cond);
1036 ctx->null_cond = cond;
1041 /* Emit a memory load. The modify parameter should be
1042 * < 0 for pre-modify,
1043 * > 0 for post-modify,
1044 * = 0 for no base register update.
1046 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1047 unsigned rx, int scale, target_long disp,
1048 int modify, TCGMemOp mop)
1052 /* Caller uses nullify_over/nullify_end. */
1053 assert(ctx->null_cond.c == TCG_COND_NEVER);
1055 addr = tcg_temp_new();
1056 base = load_gpr(ctx, rb);
1058 /* Note that RX is mutually exclusive with DISP. */
1060 tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1061 tcg_gen_add_tl(addr, addr, base);
1063 tcg_gen_addi_tl(addr, base, disp);
1067 tcg_gen_qemu_ld_i32(dest, addr, MMU_USER_IDX, mop);
1069 tcg_gen_qemu_ld_i32(dest, (modify < 0 ? addr : base),
1071 save_gpr(ctx, rb, addr);
1073 tcg_temp_free(addr);
1076 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1077 unsigned rx, int scale, target_long disp,
1078 int modify, TCGMemOp mop)
1082 /* Caller uses nullify_over/nullify_end. */
1083 assert(ctx->null_cond.c == TCG_COND_NEVER);
1085 addr = tcg_temp_new();
1086 base = load_gpr(ctx, rb);
1088 /* Note that RX is mutually exclusive with DISP. */
1090 tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1091 tcg_gen_add_tl(addr, addr, base);
1093 tcg_gen_addi_tl(addr, base, disp);
1097 tcg_gen_qemu_ld_i64(dest, addr, MMU_USER_IDX, mop);
1099 tcg_gen_qemu_ld_i64(dest, (modify < 0 ? addr : base),
1101 save_gpr(ctx, rb, addr);
1103 tcg_temp_free(addr);
1106 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1107 unsigned rx, int scale, target_long disp,
1108 int modify, TCGMemOp mop)
1112 /* Caller uses nullify_over/nullify_end. */
1113 assert(ctx->null_cond.c == TCG_COND_NEVER);
1115 addr = tcg_temp_new();
1116 base = load_gpr(ctx, rb);
1118 /* Note that RX is mutually exclusive with DISP. */
1120 tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1121 tcg_gen_add_tl(addr, addr, base);
1123 tcg_gen_addi_tl(addr, base, disp);
1126 tcg_gen_qemu_st_i32(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop);
1129 save_gpr(ctx, rb, addr);
1131 tcg_temp_free(addr);
1134 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1135 unsigned rx, int scale, target_long disp,
1136 int modify, TCGMemOp mop)
1140 /* Caller uses nullify_over/nullify_end. */
1141 assert(ctx->null_cond.c == TCG_COND_NEVER);
1143 addr = tcg_temp_new();
1144 base = load_gpr(ctx, rb);
1146 /* Note that RX is mutually exclusive with DISP. */
1148 tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
1149 tcg_gen_add_tl(addr, addr, base);
1151 tcg_gen_addi_tl(addr, base, disp);
1154 tcg_gen_qemu_st_i64(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop);
1157 save_gpr(ctx, rb, addr);
1159 tcg_temp_free(addr);
1162 #if TARGET_LONG_BITS == 64
1163 #define do_load_tl do_load_64
1164 #define do_store_tl do_store_64
1166 #define do_load_tl do_load_32
1167 #define do_store_tl do_store_32
1170 static DisasJumpType do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1171 unsigned rx, int scale, target_long disp,
1172 int modify, TCGMemOp mop)
1179 /* No base register update. */
1180 dest = dest_gpr(ctx, rt);
1182 /* Make sure if RT == RB, we see the result of the load. */
1183 dest = get_temp(ctx);
1185 do_load_tl(ctx, dest, rb, rx, scale, disp, modify, mop);
1186 save_gpr(ctx, rt, dest);
1188 return nullify_end(ctx, DISAS_NEXT);
1191 static DisasJumpType do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1192 unsigned rx, int scale, target_long disp,
1199 tmp = tcg_temp_new_i32();
1200 do_load_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
1201 save_frw_i32(rt, tmp);
1202 tcg_temp_free_i32(tmp);
1205 gen_helper_loaded_fr0(cpu_env);
1208 return nullify_end(ctx, DISAS_NEXT);
1211 static DisasJumpType do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1212 unsigned rx, int scale, target_long disp,
1219 tmp = tcg_temp_new_i64();
1220 do_load_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
1222 tcg_temp_free_i64(tmp);
1225 gen_helper_loaded_fr0(cpu_env);
1228 return nullify_end(ctx, DISAS_NEXT);
1231 static DisasJumpType do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1232 target_long disp, int modify, TCGMemOp mop)
1235 do_store_tl(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, modify, mop);
1236 return nullify_end(ctx, DISAS_NEXT);
1239 static DisasJumpType do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1240 unsigned rx, int scale, target_long disp,
1247 tmp = load_frw_i32(rt);
1248 do_store_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
1249 tcg_temp_free_i32(tmp);
1251 return nullify_end(ctx, DISAS_NEXT);
1254 static DisasJumpType do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1255 unsigned rx, int scale, target_long disp,
1263 do_store_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
1264 tcg_temp_free_i64(tmp);
1266 return nullify_end(ctx, DISAS_NEXT);
1269 static DisasJumpType do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1270 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1275 tmp = load_frw0_i32(ra);
1277 func(tmp, cpu_env, tmp);
1279 save_frw_i32(rt, tmp);
1280 tcg_temp_free_i32(tmp);
1281 return nullify_end(ctx, DISAS_NEXT);
1284 static DisasJumpType do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1285 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1292 dst = tcg_temp_new_i32();
1294 func(dst, cpu_env, src);
1296 tcg_temp_free_i64(src);
1297 save_frw_i32(rt, dst);
1298 tcg_temp_free_i32(dst);
1299 return nullify_end(ctx, DISAS_NEXT);
1302 static DisasJumpType do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1303 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1308 tmp = load_frd0(ra);
1310 func(tmp, cpu_env, tmp);
1313 tcg_temp_free_i64(tmp);
1314 return nullify_end(ctx, DISAS_NEXT);
1317 static DisasJumpType do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1318 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1324 src = load_frw0_i32(ra);
1325 dst = tcg_temp_new_i64();
1327 func(dst, cpu_env, src);
1329 tcg_temp_free_i32(src);
1331 tcg_temp_free_i64(dst);
1332 return nullify_end(ctx, DISAS_NEXT);
1335 static DisasJumpType do_fop_weww(DisasContext *ctx, unsigned rt,
1336 unsigned ra, unsigned rb,
1337 void (*func)(TCGv_i32, TCGv_env,
1338 TCGv_i32, TCGv_i32))
1343 a = load_frw0_i32(ra);
1344 b = load_frw0_i32(rb);
1346 func(a, cpu_env, a, b);
1348 tcg_temp_free_i32(b);
1349 save_frw_i32(rt, a);
1350 tcg_temp_free_i32(a);
1351 return nullify_end(ctx, DISAS_NEXT);
1354 static DisasJumpType do_fop_dedd(DisasContext *ctx, unsigned rt,
1355 unsigned ra, unsigned rb,
1356 void (*func)(TCGv_i64, TCGv_env,
1357 TCGv_i64, TCGv_i64))
1365 func(a, cpu_env, a, b);
1367 tcg_temp_free_i64(b);
1369 tcg_temp_free_i64(a);
1370 return nullify_end(ctx, DISAS_NEXT);
1373 /* Emit an unconditional branch to a direct target, which may or may not
1374 have already had nullification handled. */
1375 static DisasJumpType do_dbranch(DisasContext *ctx, target_ulong dest,
1376 unsigned link, bool is_n)
1378 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1380 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1384 ctx->null_cond.c = TCG_COND_ALWAYS;
1391 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1394 if (is_n && use_nullify_skip(ctx)) {
1395 nullify_set(ctx, 0);
1396 gen_goto_tb(ctx, 0, dest, dest + 4);
1398 nullify_set(ctx, is_n);
1399 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1402 nullify_end(ctx, DISAS_NEXT);
1404 nullify_set(ctx, 0);
1405 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1406 return DISAS_NORETURN;
1410 /* Emit a conditional branch to a direct target. If the branch itself
1411 is nullified, we should have already used nullify_over. */
1412 static DisasJumpType do_cbranch(DisasContext *ctx, target_long disp, bool is_n,
1415 target_ulong dest = iaoq_dest(ctx, disp);
1416 TCGLabel *taken = NULL;
1417 TCGCond c = cond->c;
1420 assert(ctx->null_cond.c == TCG_COND_NEVER);
1422 /* Handle TRUE and NEVER as direct branches. */
1423 if (c == TCG_COND_ALWAYS) {
1424 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1426 if (c == TCG_COND_NEVER) {
1427 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1430 taken = gen_new_label();
1432 tcg_gen_brcond_tl(c, cond->a0, cond->a1, taken);
1435 /* Not taken: Condition not satisfied; nullify on backward branches. */
1436 n = is_n && disp < 0;
1437 if (n && use_nullify_skip(ctx)) {
1438 nullify_set(ctx, 0);
1439 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1441 if (!n && ctx->null_lab) {
1442 gen_set_label(ctx->null_lab);
1443 ctx->null_lab = NULL;
1445 nullify_set(ctx, n);
1446 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1449 gen_set_label(taken);
1451 /* Taken: Condition satisfied; nullify on forward branches. */
1452 n = is_n && disp >= 0;
1453 if (n && use_nullify_skip(ctx)) {
1454 nullify_set(ctx, 0);
1455 gen_goto_tb(ctx, 1, dest, dest + 4);
1457 nullify_set(ctx, n);
1458 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1461 /* Not taken: the branch itself was nullified. */
1462 if (ctx->null_lab) {
1463 gen_set_label(ctx->null_lab);
1464 ctx->null_lab = NULL;
1465 return DISAS_IAQ_N_STALE;
1467 return DISAS_NORETURN;
1471 /* Emit an unconditional branch to an indirect target. This handles
1472 nullification of the branch itself. */
1473 static DisasJumpType do_ibranch(DisasContext *ctx, TCGv dest,
1474 unsigned link, bool is_n)
1476 TCGv a0, a1, next, tmp;
1479 assert(ctx->null_lab == NULL);
1481 if (ctx->null_cond.c == TCG_COND_NEVER) {
1483 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1485 next = get_temp(ctx);
1486 tcg_gen_mov_tl(next, dest);
1488 ctx->iaoq_n_var = next;
1490 ctx->null_cond.c = TCG_COND_ALWAYS;
1492 } else if (is_n && use_nullify_skip(ctx)) {
1493 /* The (conditional) branch, B, nullifies the next insn, N,
1494 and we're allowed to skip execution N (no single-step or
1495 tracepoint in effect). Since the goto_ptr that we must use
1496 for the indirect branch consumes no special resources, we
1497 can (conditionally) skip B and continue execution. */
1498 /* The use_nullify_skip test implies we have a known control path. */
1499 tcg_debug_assert(ctx->iaoq_b != -1);
1500 tcg_debug_assert(ctx->iaoq_n != -1);
1502 /* We do have to handle the non-local temporary, DEST, before
1503 branching. Since IOAQ_F is not really live at this point, we
1504 can simply store DEST optimistically. Similarly with IAOQ_B. */
1505 tcg_gen_mov_tl(cpu_iaoq_f, dest);
1506 tcg_gen_addi_tl(cpu_iaoq_b, dest, 4);
1510 tcg_gen_movi_tl(cpu_gr[link], ctx->iaoq_n);
1512 tcg_gen_lookup_and_goto_ptr();
1513 return nullify_end(ctx, DISAS_NEXT);
1515 cond_prep(&ctx->null_cond);
1516 c = ctx->null_cond.c;
1517 a0 = ctx->null_cond.a0;
1518 a1 = ctx->null_cond.a1;
1520 tmp = tcg_temp_new();
1521 next = get_temp(ctx);
1523 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1524 tcg_gen_movcond_tl(c, next, a0, a1, tmp, dest);
1526 ctx->iaoq_n_var = next;
1529 tcg_gen_movcond_tl(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1533 /* The branch nullifies the next insn, which means the state of N
1534 after the branch is the inverse of the state of N that applied
1536 tcg_gen_setcond_tl(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1537 cond_free(&ctx->null_cond);
1538 ctx->null_cond = cond_make_n();
1539 ctx->psw_n_nonzero = true;
1541 cond_free(&ctx->null_cond);
1548 /* On Linux, page zero is normally marked execute only + gateway.
1549 Therefore normal read or write is supposed to fail, but specific
1550 offsets have kernel code mapped to raise permissions to implement
1551 system calls. Handling this via an explicit check here, rather
1552 in than the "be disp(sr2,r0)" instruction that probably sent us
1553 here, is the easiest way to handle the branch delay slot on the
1554 aforementioned BE. */
1555 static DisasJumpType do_page_zero(DisasContext *ctx)
1557 /* If by some means we get here with PSW[N]=1, that implies that
1558 the B,GATE instruction would be skipped, and we'd fault on the
1559 next insn within the privilaged page. */
1560 switch (ctx->null_cond.c) {
1561 case TCG_COND_NEVER:
1563 case TCG_COND_ALWAYS:
1564 tcg_gen_movi_tl(cpu_psw_n, 0);
1567 /* Since this is always the first (and only) insn within the
1568 TB, we should know the state of PSW[N] from TB->FLAGS. */
1569 g_assert_not_reached();
1572 /* Check that we didn't arrive here via some means that allowed
1573 non-sequential instruction execution. Normally the PSW[B] bit
1574 detects this by disallowing the B,GATE instruction to execute
1575 under such conditions. */
1576 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1580 switch (ctx->iaoq_f) {
1581 case 0x00: /* Null pointer call */
1582 gen_excp_1(EXCP_SIGSEGV);
1583 return DISAS_NORETURN;
1585 case 0xb0: /* LWS */
1586 gen_excp_1(EXCP_SYSCALL_LWS);
1587 return DISAS_NORETURN;
1589 case 0xe0: /* SET_THREAD_POINTER */
1590 tcg_gen_mov_tl(cpu_cr27, cpu_gr[26]);
1591 tcg_gen_mov_tl(cpu_iaoq_f, cpu_gr[31]);
1592 tcg_gen_addi_tl(cpu_iaoq_b, cpu_iaoq_f, 4);
1593 return DISAS_IAQ_N_UPDATED;
1595 case 0x100: /* SYSCALL */
1596 gen_excp_1(EXCP_SYSCALL);
1597 return DISAS_NORETURN;
1601 gen_excp_1(EXCP_SIGILL);
1602 return DISAS_NORETURN;
1606 static DisasJumpType trans_nop(DisasContext *ctx, uint32_t insn,
1607 const DisasInsn *di)
1609 cond_free(&ctx->null_cond);
1613 static DisasJumpType trans_break(DisasContext *ctx, uint32_t insn,
1614 const DisasInsn *di)
1617 return nullify_end(ctx, gen_excp(ctx, EXCP_DEBUG));
1620 static DisasJumpType trans_sync(DisasContext *ctx, uint32_t insn,
1621 const DisasInsn *di)
1623 /* No point in nullifying the memory barrier. */
1624 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1626 cond_free(&ctx->null_cond);
1630 static DisasJumpType trans_mfia(DisasContext *ctx, uint32_t insn,
1631 const DisasInsn *di)
1633 unsigned rt = extract32(insn, 0, 5);
1634 TCGv tmp = dest_gpr(ctx, rt);
1635 tcg_gen_movi_tl(tmp, ctx->iaoq_f);
1636 save_gpr(ctx, rt, tmp);
1638 cond_free(&ctx->null_cond);
1642 static DisasJumpType trans_mfsp(DisasContext *ctx, uint32_t insn,
1643 const DisasInsn *di)
1645 unsigned rt = extract32(insn, 0, 5);
1646 TCGv tmp = dest_gpr(ctx, rt);
1648 /* ??? We don't implement space registers. */
1649 tcg_gen_movi_tl(tmp, 0);
1650 save_gpr(ctx, rt, tmp);
1652 cond_free(&ctx->null_cond);
1656 static DisasJumpType trans_mfctl(DisasContext *ctx, uint32_t insn,
1657 const DisasInsn *di)
1659 unsigned rt = extract32(insn, 0, 5);
1660 unsigned ctl = extract32(insn, 21, 5);
1665 #ifdef TARGET_HPPA64
1666 if (extract32(insn, 14, 1) == 0) {
1667 /* MFSAR without ,W masks low 5 bits. */
1668 tmp = dest_gpr(ctx, rt);
1669 tcg_gen_andi_tl(tmp, cpu_sar, 31);
1670 save_gpr(ctx, rt, tmp);
1674 save_gpr(ctx, rt, cpu_sar);
1676 case 16: /* Interval Timer */
1677 tmp = dest_gpr(ctx, rt);
1678 tcg_gen_movi_tl(tmp, 0); /* FIXME */
1679 save_gpr(ctx, rt, tmp);
1682 save_gpr(ctx, rt, cpu_cr26);
1685 save_gpr(ctx, rt, cpu_cr27);
1688 /* All other control registers are privileged. */
1689 return gen_illegal(ctx);
1692 cond_free(&ctx->null_cond);
1696 static DisasJumpType trans_mtctl(DisasContext *ctx, uint32_t insn,
1697 const DisasInsn *di)
1699 unsigned rin = extract32(insn, 16, 5);
1700 unsigned ctl = extract32(insn, 21, 5);
1703 if (ctl == 11) { /* SAR */
1704 tmp = tcg_temp_new();
1705 tcg_gen_andi_tl(tmp, load_gpr(ctx, rin), TARGET_LONG_BITS - 1);
1706 save_or_nullify(ctx, cpu_sar, tmp);
1709 /* All other control registers are privileged or read-only. */
1710 return gen_illegal(ctx);
1713 cond_free(&ctx->null_cond);
1717 static DisasJumpType trans_mtsarcm(DisasContext *ctx, uint32_t insn,
1718 const DisasInsn *di)
1720 unsigned rin = extract32(insn, 16, 5);
1721 TCGv tmp = tcg_temp_new();
1723 tcg_gen_not_tl(tmp, load_gpr(ctx, rin));
1724 tcg_gen_andi_tl(tmp, tmp, TARGET_LONG_BITS - 1);
1725 save_or_nullify(ctx, cpu_sar, tmp);
1728 cond_free(&ctx->null_cond);
1732 static DisasJumpType trans_ldsid(DisasContext *ctx, uint32_t insn,
1733 const DisasInsn *di)
1735 unsigned rt = extract32(insn, 0, 5);
1736 TCGv dest = dest_gpr(ctx, rt);
1738 /* Since we don't implement space registers, this returns zero. */
1739 tcg_gen_movi_tl(dest, 0);
1740 save_gpr(ctx, rt, dest);
1742 cond_free(&ctx->null_cond);
1746 static const DisasInsn table_system[] = {
1747 { 0x00000000u, 0xfc001fe0u, trans_break },
1748 /* We don't implement space register, so MTSP is a nop. */
1749 { 0x00001820u, 0xffe01fffu, trans_nop },
1750 { 0x00001840u, 0xfc00ffffu, trans_mtctl },
1751 { 0x016018c0u, 0xffe0ffffu, trans_mtsarcm },
1752 { 0x000014a0u, 0xffffffe0u, trans_mfia },
1753 { 0x000004a0u, 0xffff1fe0u, trans_mfsp },
1754 { 0x000008a0u, 0xfc1fffe0u, trans_mfctl },
1755 { 0x00000400u, 0xffffffffu, trans_sync },
1756 { 0x000010a0u, 0xfc1f3fe0u, trans_ldsid },
1759 static DisasJumpType trans_base_idx_mod(DisasContext *ctx, uint32_t insn,
1760 const DisasInsn *di)
1762 unsigned rb = extract32(insn, 21, 5);
1763 unsigned rx = extract32(insn, 16, 5);
1764 TCGv dest = dest_gpr(ctx, rb);
1765 TCGv src1 = load_gpr(ctx, rb);
1766 TCGv src2 = load_gpr(ctx, rx);
1768 /* The only thing we need to do is the base register modification. */
1769 tcg_gen_add_tl(dest, src1, src2);
1770 save_gpr(ctx, rb, dest);
1772 cond_free(&ctx->null_cond);
1776 static DisasJumpType trans_probe(DisasContext *ctx, uint32_t insn,
1777 const DisasInsn *di)
1779 unsigned rt = extract32(insn, 0, 5);
1780 unsigned rb = extract32(insn, 21, 5);
1781 unsigned is_write = extract32(insn, 6, 1);
1786 /* ??? Do something with priv level operand. */
1787 dest = dest_gpr(ctx, rt);
1789 gen_helper_probe_w(dest, load_gpr(ctx, rb));
1791 gen_helper_probe_r(dest, load_gpr(ctx, rb));
1793 save_gpr(ctx, rt, dest);
1794 return nullify_end(ctx, DISAS_NEXT);
1797 static const DisasInsn table_mem_mgmt[] = {
1798 { 0x04003280u, 0xfc003fffu, trans_nop }, /* fdc, disp */
1799 { 0x04001280u, 0xfc003fffu, trans_nop }, /* fdc, index */
1800 { 0x040012a0u, 0xfc003fffu, trans_base_idx_mod }, /* fdc, index, base mod */
1801 { 0x040012c0u, 0xfc003fffu, trans_nop }, /* fdce */
1802 { 0x040012e0u, 0xfc003fffu, trans_base_idx_mod }, /* fdce, base mod */
1803 { 0x04000280u, 0xfc001fffu, trans_nop }, /* fic 0a */
1804 { 0x040002a0u, 0xfc001fffu, trans_base_idx_mod }, /* fic 0a, base mod */
1805 { 0x040013c0u, 0xfc003fffu, trans_nop }, /* fic 4f */
1806 { 0x040013e0u, 0xfc003fffu, trans_base_idx_mod }, /* fic 4f, base mod */
1807 { 0x040002c0u, 0xfc001fffu, trans_nop }, /* fice */
1808 { 0x040002e0u, 0xfc001fffu, trans_base_idx_mod }, /* fice, base mod */
1809 { 0x04002700u, 0xfc003fffu, trans_nop }, /* pdc */
1810 { 0x04002720u, 0xfc003fffu, trans_base_idx_mod }, /* pdc, base mod */
1811 { 0x04001180u, 0xfc003fa0u, trans_probe }, /* probe */
1812 { 0x04003180u, 0xfc003fa0u, trans_probe }, /* probei */
1815 static DisasJumpType trans_add(DisasContext *ctx, uint32_t insn,
1816 const DisasInsn *di)
1818 unsigned r2 = extract32(insn, 21, 5);
1819 unsigned r1 = extract32(insn, 16, 5);
1820 unsigned cf = extract32(insn, 12, 4);
1821 unsigned ext = extract32(insn, 8, 4);
1822 unsigned shift = extract32(insn, 6, 2);
1823 unsigned rt = extract32(insn, 0, 5);
1824 TCGv tcg_r1, tcg_r2;
1828 bool is_tsv = false;
1832 case 0x6: /* ADD, SHLADD */
1834 case 0xa: /* ADD,L, SHLADD,L */
1837 case 0xe: /* ADD,TSV, SHLADD,TSV (1) */
1840 case 0x7: /* ADD,C */
1843 case 0xf: /* ADD,C,TSV */
1844 is_c = is_tsv = true;
1847 return gen_illegal(ctx);
1853 tcg_r1 = load_gpr(ctx, r1);
1854 tcg_r2 = load_gpr(ctx, r2);
1855 ret = do_add(ctx, rt, tcg_r1, tcg_r2, shift, is_l, is_tsv, is_tc, is_c, cf);
1856 return nullify_end(ctx, ret);
1859 static DisasJumpType trans_sub(DisasContext *ctx, uint32_t insn,
1860 const DisasInsn *di)
1862 unsigned r2 = extract32(insn, 21, 5);
1863 unsigned r1 = extract32(insn, 16, 5);
1864 unsigned cf = extract32(insn, 12, 4);
1865 unsigned ext = extract32(insn, 6, 6);
1866 unsigned rt = extract32(insn, 0, 5);
1867 TCGv tcg_r1, tcg_r2;
1870 bool is_tsv = false;
1874 case 0x10: /* SUB */
1876 case 0x30: /* SUB,TSV */
1879 case 0x14: /* SUB,B */
1882 case 0x34: /* SUB,B,TSV */
1883 is_b = is_tsv = true;
1885 case 0x13: /* SUB,TC */
1888 case 0x33: /* SUB,TSV,TC */
1889 is_tc = is_tsv = true;
1892 return gen_illegal(ctx);
1898 tcg_r1 = load_gpr(ctx, r1);
1899 tcg_r2 = load_gpr(ctx, r2);
1900 ret = do_sub(ctx, rt, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, cf);
1901 return nullify_end(ctx, ret);
1904 static DisasJumpType trans_log(DisasContext *ctx, uint32_t insn,
1905 const DisasInsn *di)
1907 unsigned r2 = extract32(insn, 21, 5);
1908 unsigned r1 = extract32(insn, 16, 5);
1909 unsigned cf = extract32(insn, 12, 4);
1910 unsigned rt = extract32(insn, 0, 5);
1911 TCGv tcg_r1, tcg_r2;
1917 tcg_r1 = load_gpr(ctx, r1);
1918 tcg_r2 = load_gpr(ctx, r2);
1919 ret = do_log(ctx, rt, tcg_r1, tcg_r2, cf, di->f.ttt);
1920 return nullify_end(ctx, ret);
1923 /* OR r,0,t -> COPY (according to gas) */
1924 static DisasJumpType trans_copy(DisasContext *ctx, uint32_t insn,
1925 const DisasInsn *di)
1927 unsigned r1 = extract32(insn, 16, 5);
1928 unsigned rt = extract32(insn, 0, 5);
1931 TCGv dest = dest_gpr(ctx, rt);
1932 tcg_gen_movi_tl(dest, 0);
1933 save_gpr(ctx, rt, dest);
1935 save_gpr(ctx, rt, cpu_gr[r1]);
1937 cond_free(&ctx->null_cond);
1941 static DisasJumpType trans_cmpclr(DisasContext *ctx, uint32_t insn,
1942 const DisasInsn *di)
1944 unsigned r2 = extract32(insn, 21, 5);
1945 unsigned r1 = extract32(insn, 16, 5);
1946 unsigned cf = extract32(insn, 12, 4);
1947 unsigned rt = extract32(insn, 0, 5);
1948 TCGv tcg_r1, tcg_r2;
1954 tcg_r1 = load_gpr(ctx, r1);
1955 tcg_r2 = load_gpr(ctx, r2);
1956 ret = do_cmpclr(ctx, rt, tcg_r1, tcg_r2, cf);
1957 return nullify_end(ctx, ret);
1960 static DisasJumpType trans_uxor(DisasContext *ctx, uint32_t insn,
1961 const DisasInsn *di)
1963 unsigned r2 = extract32(insn, 21, 5);
1964 unsigned r1 = extract32(insn, 16, 5);
1965 unsigned cf = extract32(insn, 12, 4);
1966 unsigned rt = extract32(insn, 0, 5);
1967 TCGv tcg_r1, tcg_r2;
1973 tcg_r1 = load_gpr(ctx, r1);
1974 tcg_r2 = load_gpr(ctx, r2);
1975 ret = do_unit(ctx, rt, tcg_r1, tcg_r2, cf, false, tcg_gen_xor_tl);
1976 return nullify_end(ctx, ret);
1979 static DisasJumpType trans_uaddcm(DisasContext *ctx, uint32_t insn,
1980 const DisasInsn *di)
1982 unsigned r2 = extract32(insn, 21, 5);
1983 unsigned r1 = extract32(insn, 16, 5);
1984 unsigned cf = extract32(insn, 12, 4);
1985 unsigned is_tc = extract32(insn, 6, 1);
1986 unsigned rt = extract32(insn, 0, 5);
1987 TCGv tcg_r1, tcg_r2, tmp;
1993 tcg_r1 = load_gpr(ctx, r1);
1994 tcg_r2 = load_gpr(ctx, r2);
1995 tmp = get_temp(ctx);
1996 tcg_gen_not_tl(tmp, tcg_r2);
1997 ret = do_unit(ctx, rt, tcg_r1, tmp, cf, is_tc, tcg_gen_add_tl);
1998 return nullify_end(ctx, ret);
2001 static DisasJumpType trans_dcor(DisasContext *ctx, uint32_t insn,
2002 const DisasInsn *di)
2004 unsigned r2 = extract32(insn, 21, 5);
2005 unsigned cf = extract32(insn, 12, 4);
2006 unsigned is_i = extract32(insn, 6, 1);
2007 unsigned rt = extract32(insn, 0, 5);
2013 tmp = get_temp(ctx);
2014 tcg_gen_shri_tl(tmp, cpu_psw_cb, 3);
2016 tcg_gen_not_tl(tmp, tmp);
2018 tcg_gen_andi_tl(tmp, tmp, 0x11111111);
2019 tcg_gen_muli_tl(tmp, tmp, 6);
2020 ret = do_unit(ctx, rt, tmp, load_gpr(ctx, r2), cf, false,
2021 is_i ? tcg_gen_add_tl : tcg_gen_sub_tl);
2023 return nullify_end(ctx, ret);
2026 static DisasJumpType trans_ds(DisasContext *ctx, uint32_t insn,
2027 const DisasInsn *di)
2029 unsigned r2 = extract32(insn, 21, 5);
2030 unsigned r1 = extract32(insn, 16, 5);
2031 unsigned cf = extract32(insn, 12, 4);
2032 unsigned rt = extract32(insn, 0, 5);
2033 TCGv dest, add1, add2, addc, zero, in1, in2;
2037 in1 = load_gpr(ctx, r1);
2038 in2 = load_gpr(ctx, r2);
2040 add1 = tcg_temp_new();
2041 add2 = tcg_temp_new();
2042 addc = tcg_temp_new();
2043 dest = tcg_temp_new();
2044 zero = tcg_const_tl(0);
2046 /* Form R1 << 1 | PSW[CB]{8}. */
2047 tcg_gen_add_tl(add1, in1, in1);
2048 tcg_gen_add_tl(add1, add1, cpu_psw_cb_msb);
2050 /* Add or subtract R2, depending on PSW[V]. Proper computation of
2051 carry{8} requires that we subtract via + ~R2 + 1, as described in
2052 the manual. By extracting and masking V, we can produce the
2053 proper inputs to the addition without movcond. */
2054 tcg_gen_sari_tl(addc, cpu_psw_v, TARGET_LONG_BITS - 1);
2055 tcg_gen_xor_tl(add2, in2, addc);
2056 tcg_gen_andi_tl(addc, addc, 1);
2057 /* ??? This is only correct for 32-bit. */
2058 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2059 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2061 tcg_temp_free(addc);
2062 tcg_temp_free(zero);
2064 /* Write back the result register. */
2065 save_gpr(ctx, rt, dest);
2067 /* Write back PSW[CB]. */
2068 tcg_gen_xor_tl(cpu_psw_cb, add1, add2);
2069 tcg_gen_xor_tl(cpu_psw_cb, cpu_psw_cb, dest);
2071 /* Write back PSW[V] for the division step. */
2072 tcg_gen_neg_tl(cpu_psw_v, cpu_psw_cb_msb);
2073 tcg_gen_xor_tl(cpu_psw_v, cpu_psw_v, in2);
2075 /* Install the new nullification. */
2080 /* ??? The lshift is supposed to contribute to overflow. */
2081 sv = do_add_sv(ctx, dest, add1, add2);
2083 ctx->null_cond = do_cond(cf, dest, cpu_psw_cb_msb, sv);
2086 tcg_temp_free(add1);
2087 tcg_temp_free(add2);
2088 tcg_temp_free(dest);
2090 return nullify_end(ctx, DISAS_NEXT);
2093 static const DisasInsn table_arith_log[] = {
2094 { 0x08000240u, 0xfc00ffffu, trans_nop }, /* or x,y,0 */
2095 { 0x08000240u, 0xffe0ffe0u, trans_copy }, /* or x,0,t */
2096 { 0x08000000u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_andc_tl },
2097 { 0x08000200u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_and_tl },
2098 { 0x08000240u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_or_tl },
2099 { 0x08000280u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_xor_tl },
2100 { 0x08000880u, 0xfc000fe0u, trans_cmpclr },
2101 { 0x08000380u, 0xfc000fe0u, trans_uxor },
2102 { 0x08000980u, 0xfc000fa0u, trans_uaddcm },
2103 { 0x08000b80u, 0xfc1f0fa0u, trans_dcor },
2104 { 0x08000440u, 0xfc000fe0u, trans_ds },
2105 { 0x08000700u, 0xfc0007e0u, trans_add }, /* add */
2106 { 0x08000400u, 0xfc0006e0u, trans_sub }, /* sub; sub,b; sub,tsv */
2107 { 0x080004c0u, 0xfc0007e0u, trans_sub }, /* sub,tc; sub,tsv,tc */
2108 { 0x08000200u, 0xfc000320u, trans_add }, /* shladd */
2111 static DisasJumpType trans_addi(DisasContext *ctx, uint32_t insn)
2113 target_long im = low_sextract(insn, 0, 11);
2114 unsigned e1 = extract32(insn, 11, 1);
2115 unsigned cf = extract32(insn, 12, 4);
2116 unsigned rt = extract32(insn, 16, 5);
2117 unsigned r2 = extract32(insn, 21, 5);
2118 unsigned o1 = extract32(insn, 26, 1);
2119 TCGv tcg_im, tcg_r2;
2126 tcg_im = load_const(ctx, im);
2127 tcg_r2 = load_gpr(ctx, r2);
2128 ret = do_add(ctx, rt, tcg_im, tcg_r2, 0, false, e1, !o1, false, cf);
2130 return nullify_end(ctx, ret);
2133 static DisasJumpType trans_subi(DisasContext *ctx, uint32_t insn)
2135 target_long im = low_sextract(insn, 0, 11);
2136 unsigned e1 = extract32(insn, 11, 1);
2137 unsigned cf = extract32(insn, 12, 4);
2138 unsigned rt = extract32(insn, 16, 5);
2139 unsigned r2 = extract32(insn, 21, 5);
2140 TCGv tcg_im, tcg_r2;
2147 tcg_im = load_const(ctx, im);
2148 tcg_r2 = load_gpr(ctx, r2);
2149 ret = do_sub(ctx, rt, tcg_im, tcg_r2, e1, false, false, cf);
2151 return nullify_end(ctx, ret);
2154 static DisasJumpType trans_cmpiclr(DisasContext *ctx, uint32_t insn)
2156 target_long im = low_sextract(insn, 0, 11);
2157 unsigned cf = extract32(insn, 12, 4);
2158 unsigned rt = extract32(insn, 16, 5);
2159 unsigned r2 = extract32(insn, 21, 5);
2160 TCGv tcg_im, tcg_r2;
2167 tcg_im = load_const(ctx, im);
2168 tcg_r2 = load_gpr(ctx, r2);
2169 ret = do_cmpclr(ctx, rt, tcg_im, tcg_r2, cf);
2171 return nullify_end(ctx, ret);
2174 static DisasJumpType trans_ld_idx_i(DisasContext *ctx, uint32_t insn,
2175 const DisasInsn *di)
2177 unsigned rt = extract32(insn, 0, 5);
2178 unsigned m = extract32(insn, 5, 1);
2179 unsigned sz = extract32(insn, 6, 2);
2180 unsigned a = extract32(insn, 13, 1);
2181 int disp = low_sextract(insn, 16, 5);
2182 unsigned rb = extract32(insn, 21, 5);
2183 int modify = (m ? (a ? -1 : 1) : 0);
2184 TCGMemOp mop = MO_TE | sz;
2186 return do_load(ctx, rt, rb, 0, 0, disp, modify, mop);
2189 static DisasJumpType trans_ld_idx_x(DisasContext *ctx, uint32_t insn,
2190 const DisasInsn *di)
2192 unsigned rt = extract32(insn, 0, 5);
2193 unsigned m = extract32(insn, 5, 1);
2194 unsigned sz = extract32(insn, 6, 2);
2195 unsigned u = extract32(insn, 13, 1);
2196 unsigned rx = extract32(insn, 16, 5);
2197 unsigned rb = extract32(insn, 21, 5);
2198 TCGMemOp mop = MO_TE | sz;
2200 return do_load(ctx, rt, rb, rx, u ? sz : 0, 0, m, mop);
2203 static DisasJumpType trans_st_idx_i(DisasContext *ctx, uint32_t insn,
2204 const DisasInsn *di)
2206 int disp = low_sextract(insn, 0, 5);
2207 unsigned m = extract32(insn, 5, 1);
2208 unsigned sz = extract32(insn, 6, 2);
2209 unsigned a = extract32(insn, 13, 1);
2210 unsigned rr = extract32(insn, 16, 5);
2211 unsigned rb = extract32(insn, 21, 5);
2212 int modify = (m ? (a ? -1 : 1) : 0);
2213 TCGMemOp mop = MO_TE | sz;
2215 return do_store(ctx, rr, rb, disp, modify, mop);
2218 static DisasJumpType trans_ldcw(DisasContext *ctx, uint32_t insn,
2219 const DisasInsn *di)
2221 unsigned rt = extract32(insn, 0, 5);
2222 unsigned m = extract32(insn, 5, 1);
2223 unsigned i = extract32(insn, 12, 1);
2224 unsigned au = extract32(insn, 13, 1);
2225 unsigned rx = extract32(insn, 16, 5);
2226 unsigned rb = extract32(insn, 21, 5);
2227 TCGMemOp mop = MO_TEUL | MO_ALIGN_16;
2228 TCGv zero, addr, base, dest;
2229 int modify, disp = 0, scale = 0;
2233 /* ??? Share more code with do_load and do_load_{32,64}. */
2236 modify = (m ? (au ? -1 : 1) : 0);
2237 disp = low_sextract(rx, 0, 5);
2242 scale = mop & MO_SIZE;
2246 /* Base register modification. Make sure if RT == RB, we see
2247 the result of the load. */
2248 dest = get_temp(ctx);
2250 dest = dest_gpr(ctx, rt);
2253 addr = tcg_temp_new();
2254 base = load_gpr(ctx, rb);
2256 tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
2257 tcg_gen_add_tl(addr, addr, base);
2259 tcg_gen_addi_tl(addr, base, disp);
2262 zero = tcg_const_tl(0);
2263 tcg_gen_atomic_xchg_tl(dest, (modify <= 0 ? addr : base),
2264 zero, MMU_USER_IDX, mop);
2266 save_gpr(ctx, rb, addr);
2268 save_gpr(ctx, rt, dest);
2270 return nullify_end(ctx, DISAS_NEXT);
2273 static DisasJumpType trans_stby(DisasContext *ctx, uint32_t insn,
2274 const DisasInsn *di)
2276 target_long disp = low_sextract(insn, 0, 5);
2277 unsigned m = extract32(insn, 5, 1);
2278 unsigned a = extract32(insn, 13, 1);
2279 unsigned rt = extract32(insn, 16, 5);
2280 unsigned rb = extract32(insn, 21, 5);
2285 addr = tcg_temp_new();
2286 if (m || disp == 0) {
2287 tcg_gen_mov_tl(addr, load_gpr(ctx, rb));
2289 tcg_gen_addi_tl(addr, load_gpr(ctx, rb), disp);
2291 val = load_gpr(ctx, rt);
2294 gen_helper_stby_e(cpu_env, addr, val);
2296 gen_helper_stby_b(cpu_env, addr, val);
2300 tcg_gen_addi_tl(addr, addr, disp);
2301 tcg_gen_andi_tl(addr, addr, ~3);
2302 save_gpr(ctx, rb, addr);
2304 tcg_temp_free(addr);
2306 return nullify_end(ctx, DISAS_NEXT);
2309 static const DisasInsn table_index_mem[] = {
2310 { 0x0c001000u, 0xfc001300, trans_ld_idx_i }, /* LD[BHWD], im */
2311 { 0x0c000000u, 0xfc001300, trans_ld_idx_x }, /* LD[BHWD], rx */
2312 { 0x0c001200u, 0xfc001300, trans_st_idx_i }, /* ST[BHWD] */
2313 { 0x0c0001c0u, 0xfc0003c0, trans_ldcw },
2314 { 0x0c001300u, 0xfc0013c0, trans_stby },
2317 static DisasJumpType trans_ldil(DisasContext *ctx, uint32_t insn)
2319 unsigned rt = extract32(insn, 21, 5);
2320 target_long i = assemble_21(insn);
2321 TCGv tcg_rt = dest_gpr(ctx, rt);
2323 tcg_gen_movi_tl(tcg_rt, i);
2324 save_gpr(ctx, rt, tcg_rt);
2325 cond_free(&ctx->null_cond);
2330 static DisasJumpType trans_addil(DisasContext *ctx, uint32_t insn)
2332 unsigned rt = extract32(insn, 21, 5);
2333 target_long i = assemble_21(insn);
2334 TCGv tcg_rt = load_gpr(ctx, rt);
2335 TCGv tcg_r1 = dest_gpr(ctx, 1);
2337 tcg_gen_addi_tl(tcg_r1, tcg_rt, i);
2338 save_gpr(ctx, 1, tcg_r1);
2339 cond_free(&ctx->null_cond);
2344 static DisasJumpType trans_ldo(DisasContext *ctx, uint32_t insn)
2346 unsigned rb = extract32(insn, 21, 5);
2347 unsigned rt = extract32(insn, 16, 5);
2348 target_long i = assemble_16(insn);
2349 TCGv tcg_rt = dest_gpr(ctx, rt);
2351 /* Special case rb == 0, for the LDI pseudo-op.
2352 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
2354 tcg_gen_movi_tl(tcg_rt, i);
2356 tcg_gen_addi_tl(tcg_rt, cpu_gr[rb], i);
2358 save_gpr(ctx, rt, tcg_rt);
2359 cond_free(&ctx->null_cond);
2364 static DisasJumpType trans_load(DisasContext *ctx, uint32_t insn,
2365 bool is_mod, TCGMemOp mop)
2367 unsigned rb = extract32(insn, 21, 5);
2368 unsigned rt = extract32(insn, 16, 5);
2369 target_long i = assemble_16(insn);
2371 return do_load(ctx, rt, rb, 0, 0, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
2374 static DisasJumpType trans_load_w(DisasContext *ctx, uint32_t insn)
2376 unsigned rb = extract32(insn, 21, 5);
2377 unsigned rt = extract32(insn, 16, 5);
2378 target_long i = assemble_16a(insn);
2379 unsigned ext2 = extract32(insn, 1, 2);
2384 /* FLDW without modification. */
2385 return do_floadw(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
2387 /* LDW with modification. Note that the sign of I selects
2388 post-dec vs pre-inc. */
2389 return do_load(ctx, rt, rb, 0, 0, i, (i < 0 ? 1 : -1), MO_TEUL);
2391 return gen_illegal(ctx);
2395 static DisasJumpType trans_fload_mod(DisasContext *ctx, uint32_t insn)
2397 target_long i = assemble_16a(insn);
2398 unsigned t1 = extract32(insn, 1, 1);
2399 unsigned a = extract32(insn, 2, 1);
2400 unsigned t0 = extract32(insn, 16, 5);
2401 unsigned rb = extract32(insn, 21, 5);
2403 /* FLDW with modification. */
2404 return do_floadw(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
2407 static DisasJumpType trans_store(DisasContext *ctx, uint32_t insn,
2408 bool is_mod, TCGMemOp mop)
2410 unsigned rb = extract32(insn, 21, 5);
2411 unsigned rt = extract32(insn, 16, 5);
2412 target_long i = assemble_16(insn);
2414 return do_store(ctx, rt, rb, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
2417 static DisasJumpType trans_store_w(DisasContext *ctx, uint32_t insn)
2419 unsigned rb = extract32(insn, 21, 5);
2420 unsigned rt = extract32(insn, 16, 5);
2421 target_long i = assemble_16a(insn);
2422 unsigned ext2 = extract32(insn, 1, 2);
2427 /* FSTW without modification. */
2428 return do_fstorew(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
2430 /* LDW with modification. */
2431 return do_store(ctx, rt, rb, i, (i < 0 ? 1 : -1), MO_TEUL);
2433 return gen_illegal(ctx);
2437 static DisasJumpType trans_fstore_mod(DisasContext *ctx, uint32_t insn)
2439 target_long i = assemble_16a(insn);
2440 unsigned t1 = extract32(insn, 1, 1);
2441 unsigned a = extract32(insn, 2, 1);
2442 unsigned t0 = extract32(insn, 16, 5);
2443 unsigned rb = extract32(insn, 21, 5);
2445 /* FSTW with modification. */
2446 return do_fstorew(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
2449 static DisasJumpType trans_copr_w(DisasContext *ctx, uint32_t insn)
2451 unsigned t0 = extract32(insn, 0, 5);
2452 unsigned m = extract32(insn, 5, 1);
2453 unsigned t1 = extract32(insn, 6, 1);
2454 unsigned ext3 = extract32(insn, 7, 3);
2455 /* unsigned cc = extract32(insn, 10, 2); */
2456 unsigned i = extract32(insn, 12, 1);
2457 unsigned ua = extract32(insn, 13, 1);
2458 unsigned rx = extract32(insn, 16, 5);
2459 unsigned rb = extract32(insn, 21, 5);
2460 unsigned rt = t1 * 32 + t0;
2461 int modify = (m ? (ua ? -1 : 1) : 0);
2465 scale = (ua ? 2 : 0);
2469 disp = low_sextract(rx, 0, 5);
2472 modify = (m ? (ua ? -1 : 1) : 0);
2477 return do_floadw(ctx, rt, rb, rx, scale, disp, modify);
2479 return do_fstorew(ctx, rt, rb, rx, scale, disp, modify);
2481 return gen_illegal(ctx);
2484 static DisasJumpType trans_copr_dw(DisasContext *ctx, uint32_t insn)
2486 unsigned rt = extract32(insn, 0, 5);
2487 unsigned m = extract32(insn, 5, 1);
2488 unsigned ext4 = extract32(insn, 6, 4);
2489 /* unsigned cc = extract32(insn, 10, 2); */
2490 unsigned i = extract32(insn, 12, 1);
2491 unsigned ua = extract32(insn, 13, 1);
2492 unsigned rx = extract32(insn, 16, 5);
2493 unsigned rb = extract32(insn, 21, 5);
2494 int modify = (m ? (ua ? -1 : 1) : 0);
2498 scale = (ua ? 3 : 0);
2502 disp = low_sextract(rx, 0, 5);
2505 modify = (m ? (ua ? -1 : 1) : 0);
2510 return do_floadd(ctx, rt, rb, rx, scale, disp, modify);
2512 return do_fstored(ctx, rt, rb, rx, scale, disp, modify);
2514 return gen_illegal(ctx);
2518 static DisasJumpType trans_cmpb(DisasContext *ctx, uint32_t insn,
2519 bool is_true, bool is_imm, bool is_dw)
2521 target_long disp = assemble_12(insn) * 4;
2522 unsigned n = extract32(insn, 1, 1);
2523 unsigned c = extract32(insn, 13, 3);
2524 unsigned r = extract32(insn, 21, 5);
2525 unsigned cf = c * 2 + !is_true;
2526 TCGv dest, in1, in2, sv;
2532 in1 = load_const(ctx, low_sextract(insn, 16, 5));
2534 in1 = load_gpr(ctx, extract32(insn, 16, 5));
2536 in2 = load_gpr(ctx, r);
2537 dest = get_temp(ctx);
2539 tcg_gen_sub_tl(dest, in1, in2);
2543 sv = do_sub_sv(ctx, dest, in1, in2);
2546 cond = do_sub_cond(cf, dest, in1, in2, sv);
2547 return do_cbranch(ctx, disp, n, &cond);
2550 static DisasJumpType trans_addb(DisasContext *ctx, uint32_t insn,
2551 bool is_true, bool is_imm)
2553 target_long disp = assemble_12(insn) * 4;
2554 unsigned n = extract32(insn, 1, 1);
2555 unsigned c = extract32(insn, 13, 3);
2556 unsigned r = extract32(insn, 21, 5);
2557 unsigned cf = c * 2 + !is_true;
2558 TCGv dest, in1, in2, sv, cb_msb;
2564 in1 = load_const(ctx, low_sextract(insn, 16, 5));
2566 in1 = load_gpr(ctx, extract32(insn, 16, 5));
2568 in2 = load_gpr(ctx, r);
2569 dest = dest_gpr(ctx, r);
2571 TCGV_UNUSED(cb_msb);
2575 tcg_gen_add_tl(dest, in1, in2);
2578 cb_msb = get_temp(ctx);
2579 tcg_gen_movi_tl(cb_msb, 0);
2580 tcg_gen_add2_tl(dest, cb_msb, in1, cb_msb, in2, cb_msb);
2583 tcg_gen_add_tl(dest, in1, in2);
2584 sv = do_add_sv(ctx, dest, in1, in2);
2588 cond = do_cond(cf, dest, cb_msb, sv);
2589 return do_cbranch(ctx, disp, n, &cond);
2592 static DisasJumpType trans_bb(DisasContext *ctx, uint32_t insn)
2594 target_long disp = assemble_12(insn) * 4;
2595 unsigned n = extract32(insn, 1, 1);
2596 unsigned c = extract32(insn, 15, 1);
2597 unsigned r = extract32(insn, 16, 5);
2598 unsigned p = extract32(insn, 21, 5);
2599 unsigned i = extract32(insn, 26, 1);
2605 tmp = tcg_temp_new();
2606 tcg_r = load_gpr(ctx, r);
2608 tcg_gen_shli_tl(tmp, tcg_r, p);
2610 tcg_gen_shl_tl(tmp, tcg_r, cpu_sar);
2613 cond = cond_make_0(c ? TCG_COND_GE : TCG_COND_LT, tmp);
2615 return do_cbranch(ctx, disp, n, &cond);
2618 static DisasJumpType trans_movb(DisasContext *ctx, uint32_t insn, bool is_imm)
2620 target_long disp = assemble_12(insn) * 4;
2621 unsigned n = extract32(insn, 1, 1);
2622 unsigned c = extract32(insn, 13, 3);
2623 unsigned t = extract32(insn, 16, 5);
2624 unsigned r = extract32(insn, 21, 5);
2630 dest = dest_gpr(ctx, r);
2632 tcg_gen_movi_tl(dest, low_sextract(t, 0, 5));
2633 } else if (t == 0) {
2634 tcg_gen_movi_tl(dest, 0);
2636 tcg_gen_mov_tl(dest, cpu_gr[t]);
2639 cond = do_sed_cond(c, dest);
2640 return do_cbranch(ctx, disp, n, &cond);
2643 static DisasJumpType trans_shrpw_sar(DisasContext *ctx, uint32_t insn,
2644 const DisasInsn *di)
2646 unsigned rt = extract32(insn, 0, 5);
2647 unsigned c = extract32(insn, 13, 3);
2648 unsigned r1 = extract32(insn, 16, 5);
2649 unsigned r2 = extract32(insn, 21, 5);
2656 dest = dest_gpr(ctx, rt);
2658 tcg_gen_ext32u_tl(dest, load_gpr(ctx, r2));
2659 tcg_gen_shr_tl(dest, dest, cpu_sar);
2660 } else if (r1 == r2) {
2661 TCGv_i32 t32 = tcg_temp_new_i32();
2662 tcg_gen_trunc_tl_i32(t32, load_gpr(ctx, r2));
2663 tcg_gen_rotr_i32(t32, t32, cpu_sar);
2664 tcg_gen_extu_i32_tl(dest, t32);
2665 tcg_temp_free_i32(t32);
2667 TCGv_i64 t = tcg_temp_new_i64();
2668 TCGv_i64 s = tcg_temp_new_i64();
2670 tcg_gen_concat_tl_i64(t, load_gpr(ctx, r2), load_gpr(ctx, r1));
2671 tcg_gen_extu_tl_i64(s, cpu_sar);
2672 tcg_gen_shr_i64(t, t, s);
2673 tcg_gen_trunc_i64_tl(dest, t);
2675 tcg_temp_free_i64(t);
2676 tcg_temp_free_i64(s);
2678 save_gpr(ctx, rt, dest);
2680 /* Install the new nullification. */
2681 cond_free(&ctx->null_cond);
2683 ctx->null_cond = do_sed_cond(c, dest);
2685 return nullify_end(ctx, DISAS_NEXT);
2688 static DisasJumpType trans_shrpw_imm(DisasContext *ctx, uint32_t insn,
2689 const DisasInsn *di)
2691 unsigned rt = extract32(insn, 0, 5);
2692 unsigned cpos = extract32(insn, 5, 5);
2693 unsigned c = extract32(insn, 13, 3);
2694 unsigned r1 = extract32(insn, 16, 5);
2695 unsigned r2 = extract32(insn, 21, 5);
2696 unsigned sa = 31 - cpos;
2703 dest = dest_gpr(ctx, rt);
2704 t2 = load_gpr(ctx, r2);
2706 TCGv_i32 t32 = tcg_temp_new_i32();
2707 tcg_gen_trunc_tl_i32(t32, t2);
2708 tcg_gen_rotri_i32(t32, t32, sa);
2709 tcg_gen_extu_i32_tl(dest, t32);
2710 tcg_temp_free_i32(t32);
2711 } else if (r1 == 0) {
2712 tcg_gen_extract_tl(dest, t2, sa, 32 - sa);
2714 TCGv t0 = tcg_temp_new();
2715 tcg_gen_extract_tl(t0, t2, sa, 32 - sa);
2716 tcg_gen_deposit_tl(dest, t0, cpu_gr[r1], 32 - sa, sa);
2719 save_gpr(ctx, rt, dest);
2721 /* Install the new nullification. */
2722 cond_free(&ctx->null_cond);
2724 ctx->null_cond = do_sed_cond(c, dest);
2726 return nullify_end(ctx, DISAS_NEXT);
2729 static DisasJumpType trans_extrw_sar(DisasContext *ctx, uint32_t insn,
2730 const DisasInsn *di)
2732 unsigned clen = extract32(insn, 0, 5);
2733 unsigned is_se = extract32(insn, 10, 1);
2734 unsigned c = extract32(insn, 13, 3);
2735 unsigned rt = extract32(insn, 16, 5);
2736 unsigned rr = extract32(insn, 21, 5);
2737 unsigned len = 32 - clen;
2738 TCGv dest, src, tmp;
2744 dest = dest_gpr(ctx, rt);
2745 src = load_gpr(ctx, rr);
2746 tmp = tcg_temp_new();
2748 /* Recall that SAR is using big-endian bit numbering. */
2749 tcg_gen_xori_tl(tmp, cpu_sar, TARGET_LONG_BITS - 1);
2751 tcg_gen_sar_tl(dest, src, tmp);
2752 tcg_gen_sextract_tl(dest, dest, 0, len);
2754 tcg_gen_shr_tl(dest, src, tmp);
2755 tcg_gen_extract_tl(dest, dest, 0, len);
2758 save_gpr(ctx, rt, dest);
2760 /* Install the new nullification. */
2761 cond_free(&ctx->null_cond);
2763 ctx->null_cond = do_sed_cond(c, dest);
2765 return nullify_end(ctx, DISAS_NEXT);
2768 static DisasJumpType trans_extrw_imm(DisasContext *ctx, uint32_t insn,
2769 const DisasInsn *di)
2771 unsigned clen = extract32(insn, 0, 5);
2772 unsigned pos = extract32(insn, 5, 5);
2773 unsigned is_se = extract32(insn, 10, 1);
2774 unsigned c = extract32(insn, 13, 3);
2775 unsigned rt = extract32(insn, 16, 5);
2776 unsigned rr = extract32(insn, 21, 5);
2777 unsigned len = 32 - clen;
2778 unsigned cpos = 31 - pos;
2785 dest = dest_gpr(ctx, rt);
2786 src = load_gpr(ctx, rr);
2788 tcg_gen_sextract_tl(dest, src, cpos, len);
2790 tcg_gen_extract_tl(dest, src, cpos, len);
2792 save_gpr(ctx, rt, dest);
2794 /* Install the new nullification. */
2795 cond_free(&ctx->null_cond);
2797 ctx->null_cond = do_sed_cond(c, dest);
2799 return nullify_end(ctx, DISAS_NEXT);
2802 static const DisasInsn table_sh_ex[] = {
2803 { 0xd0000000u, 0xfc001fe0u, trans_shrpw_sar },
2804 { 0xd0000800u, 0xfc001c00u, trans_shrpw_imm },
2805 { 0xd0001000u, 0xfc001be0u, trans_extrw_sar },
2806 { 0xd0001800u, 0xfc001800u, trans_extrw_imm },
2809 static DisasJumpType trans_depw_imm_c(DisasContext *ctx, uint32_t insn,
2810 const DisasInsn *di)
2812 unsigned clen = extract32(insn, 0, 5);
2813 unsigned cpos = extract32(insn, 5, 5);
2814 unsigned nz = extract32(insn, 10, 1);
2815 unsigned c = extract32(insn, 13, 3);
2816 target_long val = low_sextract(insn, 16, 5);
2817 unsigned rt = extract32(insn, 21, 5);
2818 unsigned len = 32 - clen;
2819 target_long mask0, mask1;
2825 if (cpos + len > 32) {
2829 dest = dest_gpr(ctx, rt);
2830 mask0 = deposit64(0, cpos, len, val);
2831 mask1 = deposit64(-1, cpos, len, val);
2834 TCGv src = load_gpr(ctx, rt);
2836 tcg_gen_andi_tl(dest, src, mask1);
2839 tcg_gen_ori_tl(dest, src, mask0);
2841 tcg_gen_movi_tl(dest, mask0);
2843 save_gpr(ctx, rt, dest);
2845 /* Install the new nullification. */
2846 cond_free(&ctx->null_cond);
2848 ctx->null_cond = do_sed_cond(c, dest);
2850 return nullify_end(ctx, DISAS_NEXT);
2853 static DisasJumpType trans_depw_imm(DisasContext *ctx, uint32_t insn,
2854 const DisasInsn *di)
2856 unsigned clen = extract32(insn, 0, 5);
2857 unsigned cpos = extract32(insn, 5, 5);
2858 unsigned nz = extract32(insn, 10, 1);
2859 unsigned c = extract32(insn, 13, 3);
2860 unsigned rr = extract32(insn, 16, 5);
2861 unsigned rt = extract32(insn, 21, 5);
2862 unsigned rs = nz ? rt : 0;
2863 unsigned len = 32 - clen;
2869 if (cpos + len > 32) {
2873 dest = dest_gpr(ctx, rt);
2874 val = load_gpr(ctx, rr);
2876 tcg_gen_deposit_z_tl(dest, val, cpos, len);
2878 tcg_gen_deposit_tl(dest, cpu_gr[rs], val, cpos, len);
2880 save_gpr(ctx, rt, dest);
2882 /* Install the new nullification. */
2883 cond_free(&ctx->null_cond);
2885 ctx->null_cond = do_sed_cond(c, dest);
2887 return nullify_end(ctx, DISAS_NEXT);
2890 static DisasJumpType trans_depw_sar(DisasContext *ctx, uint32_t insn,
2891 const DisasInsn *di)
2893 unsigned clen = extract32(insn, 0, 5);
2894 unsigned nz = extract32(insn, 10, 1);
2895 unsigned i = extract32(insn, 12, 1);
2896 unsigned c = extract32(insn, 13, 3);
2897 unsigned rt = extract32(insn, 21, 5);
2898 unsigned rs = nz ? rt : 0;
2899 unsigned len = 32 - clen;
2900 TCGv val, mask, tmp, shift, dest;
2901 unsigned msb = 1U << (len - 1);
2908 val = load_const(ctx, low_sextract(insn, 16, 5));
2910 val = load_gpr(ctx, extract32(insn, 16, 5));
2912 dest = dest_gpr(ctx, rt);
2913 shift = tcg_temp_new();
2914 tmp = tcg_temp_new();
2916 /* Convert big-endian bit numbering in SAR to left-shift. */
2917 tcg_gen_xori_tl(shift, cpu_sar, TARGET_LONG_BITS - 1);
2919 mask = tcg_const_tl(msb + (msb - 1));
2920 tcg_gen_and_tl(tmp, val, mask);
2922 tcg_gen_shl_tl(mask, mask, shift);
2923 tcg_gen_shl_tl(tmp, tmp, shift);
2924 tcg_gen_andc_tl(dest, cpu_gr[rs], mask);
2925 tcg_gen_or_tl(dest, dest, tmp);
2927 tcg_gen_shl_tl(dest, tmp, shift);
2929 tcg_temp_free(shift);
2930 tcg_temp_free(mask);
2932 save_gpr(ctx, rt, dest);
2934 /* Install the new nullification. */
2935 cond_free(&ctx->null_cond);
2937 ctx->null_cond = do_sed_cond(c, dest);
2939 return nullify_end(ctx, DISAS_NEXT);
2942 static const DisasInsn table_depw[] = {
2943 { 0xd4000000u, 0xfc000be0u, trans_depw_sar },
2944 { 0xd4000800u, 0xfc001800u, trans_depw_imm },
2945 { 0xd4001800u, 0xfc001800u, trans_depw_imm_c },
2948 static DisasJumpType trans_be(DisasContext *ctx, uint32_t insn, bool is_l)
2950 unsigned n = extract32(insn, 1, 1);
2951 unsigned b = extract32(insn, 21, 5);
2952 target_long disp = assemble_17(insn);
2954 /* unsigned s = low_uextract(insn, 13, 3); */
2955 /* ??? It seems like there should be a good way of using
2956 "be disp(sr2, r0)", the canonical gateway entry mechanism
2957 to our advantage. But that appears to be inconvenient to
2958 manage along side branch delay slots. Therefore we handle
2959 entry into the gateway page via absolute address. */
2961 /* Since we don't implement spaces, just branch. Do notice the special
2962 case of "be disp(*,r0)" using a direct branch to disp, so that we can
2963 goto_tb to the TB containing the syscall. */
2965 return do_dbranch(ctx, disp, is_l ? 31 : 0, n);
2967 TCGv tmp = get_temp(ctx);
2968 tcg_gen_addi_tl(tmp, load_gpr(ctx, b), disp);
2969 return do_ibranch(ctx, tmp, is_l ? 31 : 0, n);
2973 static DisasJumpType trans_bl(DisasContext *ctx, uint32_t insn,
2974 const DisasInsn *di)
2976 unsigned n = extract32(insn, 1, 1);
2977 unsigned link = extract32(insn, 21, 5);
2978 target_long disp = assemble_17(insn);
2980 return do_dbranch(ctx, iaoq_dest(ctx, disp), link, n);
2983 static DisasJumpType trans_bl_long(DisasContext *ctx, uint32_t insn,
2984 const DisasInsn *di)
2986 unsigned n = extract32(insn, 1, 1);
2987 target_long disp = assemble_22(insn);
2989 return do_dbranch(ctx, iaoq_dest(ctx, disp), 2, n);
2992 static DisasJumpType trans_blr(DisasContext *ctx, uint32_t insn,
2993 const DisasInsn *di)
2995 unsigned n = extract32(insn, 1, 1);
2996 unsigned rx = extract32(insn, 16, 5);
2997 unsigned link = extract32(insn, 21, 5);
2998 TCGv tmp = get_temp(ctx);
3000 tcg_gen_shli_tl(tmp, load_gpr(ctx, rx), 3);
3001 tcg_gen_addi_tl(tmp, tmp, ctx->iaoq_f + 8);
3002 return do_ibranch(ctx, tmp, link, n);
3005 static DisasJumpType trans_bv(DisasContext *ctx, uint32_t insn,
3006 const DisasInsn *di)
3008 unsigned n = extract32(insn, 1, 1);
3009 unsigned rx = extract32(insn, 16, 5);
3010 unsigned rb = extract32(insn, 21, 5);
3014 dest = load_gpr(ctx, rb);
3016 dest = get_temp(ctx);
3017 tcg_gen_shli_tl(dest, load_gpr(ctx, rx), 3);
3018 tcg_gen_add_tl(dest, dest, load_gpr(ctx, rb));
3020 return do_ibranch(ctx, dest, 0, n);
3023 static DisasJumpType trans_bve(DisasContext *ctx, uint32_t insn,
3024 const DisasInsn *di)
3026 unsigned n = extract32(insn, 1, 1);
3027 unsigned rb = extract32(insn, 21, 5);
3028 unsigned link = extract32(insn, 13, 1) ? 2 : 0;
3030 return do_ibranch(ctx, load_gpr(ctx, rb), link, n);
3033 static const DisasInsn table_branch[] = {
3034 { 0xe8000000u, 0xfc006000u, trans_bl }, /* B,L and B,L,PUSH */
3035 { 0xe800a000u, 0xfc00e000u, trans_bl_long },
3036 { 0xe8004000u, 0xfc00fffdu, trans_blr },
3037 { 0xe800c000u, 0xfc00fffdu, trans_bv },
3038 { 0xe800d000u, 0xfc00dffcu, trans_bve },
3041 static DisasJumpType trans_fop_wew_0c(DisasContext *ctx, uint32_t insn,
3042 const DisasInsn *di)
3044 unsigned rt = extract32(insn, 0, 5);
3045 unsigned ra = extract32(insn, 21, 5);
3046 return do_fop_wew(ctx, rt, ra, di->f.wew);
3049 static DisasJumpType trans_fop_wew_0e(DisasContext *ctx, uint32_t insn,
3050 const DisasInsn *di)
3052 unsigned rt = assemble_rt64(insn);
3053 unsigned ra = assemble_ra64(insn);
3054 return do_fop_wew(ctx, rt, ra, di->f.wew);
3057 static DisasJumpType trans_fop_ded(DisasContext *ctx, uint32_t insn,
3058 const DisasInsn *di)
3060 unsigned rt = extract32(insn, 0, 5);
3061 unsigned ra = extract32(insn, 21, 5);
3062 return do_fop_ded(ctx, rt, ra, di->f.ded);
3065 static DisasJumpType trans_fop_wed_0c(DisasContext *ctx, uint32_t insn,
3066 const DisasInsn *di)
3068 unsigned rt = extract32(insn, 0, 5);
3069 unsigned ra = extract32(insn, 21, 5);
3070 return do_fop_wed(ctx, rt, ra, di->f.wed);
3073 static DisasJumpType trans_fop_wed_0e(DisasContext *ctx, uint32_t insn,
3074 const DisasInsn *di)
3076 unsigned rt = assemble_rt64(insn);
3077 unsigned ra = extract32(insn, 21, 5);
3078 return do_fop_wed(ctx, rt, ra, di->f.wed);
3081 static DisasJumpType trans_fop_dew_0c(DisasContext *ctx, uint32_t insn,
3082 const DisasInsn *di)
3084 unsigned rt = extract32(insn, 0, 5);
3085 unsigned ra = extract32(insn, 21, 5);
3086 return do_fop_dew(ctx, rt, ra, di->f.dew);
3089 static DisasJumpType trans_fop_dew_0e(DisasContext *ctx, uint32_t insn,
3090 const DisasInsn *di)
3092 unsigned rt = extract32(insn, 0, 5);
3093 unsigned ra = assemble_ra64(insn);
3094 return do_fop_dew(ctx, rt, ra, di->f.dew);
3097 static DisasJumpType trans_fop_weww_0c(DisasContext *ctx, uint32_t insn,
3098 const DisasInsn *di)
3100 unsigned rt = extract32(insn, 0, 5);
3101 unsigned rb = extract32(insn, 16, 5);
3102 unsigned ra = extract32(insn, 21, 5);
3103 return do_fop_weww(ctx, rt, ra, rb, di->f.weww);
3106 static DisasJumpType trans_fop_weww_0e(DisasContext *ctx, uint32_t insn,
3107 const DisasInsn *di)
3109 unsigned rt = assemble_rt64(insn);
3110 unsigned rb = assemble_rb64(insn);
3111 unsigned ra = assemble_ra64(insn);
3112 return do_fop_weww(ctx, rt, ra, rb, di->f.weww);
3115 static DisasJumpType trans_fop_dedd(DisasContext *ctx, uint32_t insn,
3116 const DisasInsn *di)
3118 unsigned rt = extract32(insn, 0, 5);
3119 unsigned rb = extract32(insn, 16, 5);
3120 unsigned ra = extract32(insn, 21, 5);
3121 return do_fop_dedd(ctx, rt, ra, rb, di->f.dedd);
3124 static void gen_fcpy_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3126 tcg_gen_mov_i32(dst, src);
3129 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3131 tcg_gen_mov_i64(dst, src);
3134 static void gen_fabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3136 tcg_gen_andi_i32(dst, src, INT32_MAX);
3139 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3141 tcg_gen_andi_i64(dst, src, INT64_MAX);
3144 static void gen_fneg_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3146 tcg_gen_xori_i32(dst, src, INT32_MIN);
3149 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3151 tcg_gen_xori_i64(dst, src, INT64_MIN);
3154 static void gen_fnegabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3156 tcg_gen_ori_i32(dst, src, INT32_MIN);
3159 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3161 tcg_gen_ori_i64(dst, src, INT64_MIN);
3164 static DisasJumpType do_fcmp_s(DisasContext *ctx, unsigned ra, unsigned rb,
3165 unsigned y, unsigned c)
3167 TCGv_i32 ta, tb, tc, ty;
3171 ta = load_frw0_i32(ra);
3172 tb = load_frw0_i32(rb);
3173 ty = tcg_const_i32(y);
3174 tc = tcg_const_i32(c);
3176 gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3178 tcg_temp_free_i32(ta);
3179 tcg_temp_free_i32(tb);
3180 tcg_temp_free_i32(ty);
3181 tcg_temp_free_i32(tc);
3183 return nullify_end(ctx, DISAS_NEXT);
3186 static DisasJumpType trans_fcmp_s_0c(DisasContext *ctx, uint32_t insn,
3187 const DisasInsn *di)
3189 unsigned c = extract32(insn, 0, 5);
3190 unsigned y = extract32(insn, 13, 3);
3191 unsigned rb = extract32(insn, 16, 5);
3192 unsigned ra = extract32(insn, 21, 5);
3193 return do_fcmp_s(ctx, ra, rb, y, c);
3196 static DisasJumpType trans_fcmp_s_0e(DisasContext *ctx, uint32_t insn,
3197 const DisasInsn *di)
3199 unsigned c = extract32(insn, 0, 5);
3200 unsigned y = extract32(insn, 13, 3);
3201 unsigned rb = assemble_rb64(insn);
3202 unsigned ra = assemble_ra64(insn);
3203 return do_fcmp_s(ctx, ra, rb, y, c);
3206 static DisasJumpType trans_fcmp_d(DisasContext *ctx, uint32_t insn,
3207 const DisasInsn *di)
3209 unsigned c = extract32(insn, 0, 5);
3210 unsigned y = extract32(insn, 13, 3);
3211 unsigned rb = extract32(insn, 16, 5);
3212 unsigned ra = extract32(insn, 21, 5);
3220 ty = tcg_const_i32(y);
3221 tc = tcg_const_i32(c);
3223 gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3225 tcg_temp_free_i64(ta);
3226 tcg_temp_free_i64(tb);
3227 tcg_temp_free_i32(ty);
3228 tcg_temp_free_i32(tc);
3230 return nullify_end(ctx, DISAS_NEXT);
3233 static DisasJumpType trans_ftest_t(DisasContext *ctx, uint32_t insn,
3234 const DisasInsn *di)
3236 unsigned y = extract32(insn, 13, 3);
3237 unsigned cbit = (y ^ 1) - 1;
3243 tcg_gen_ld32u_tl(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3244 tcg_gen_extract_tl(t, t, 21 - cbit, 1);
3245 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3248 return nullify_end(ctx, DISAS_NEXT);
3251 static DisasJumpType trans_ftest_q(DisasContext *ctx, uint32_t insn,
3252 const DisasInsn *di)
3254 unsigned c = extract32(insn, 0, 5);
3262 tcg_gen_ld32u_tl(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3265 case 0: /* simple */
3266 tcg_gen_andi_tl(t, t, 0x4000000);
3267 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3291 return gen_illegal(ctx);
3294 TCGv c = load_const(ctx, mask);
3295 tcg_gen_or_tl(t, t, c);
3296 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3298 tcg_gen_andi_tl(t, t, mask);
3299 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3302 return nullify_end(ctx, DISAS_NEXT);
3305 static DisasJumpType trans_xmpyu(DisasContext *ctx, uint32_t insn,
3306 const DisasInsn *di)
3308 unsigned rt = extract32(insn, 0, 5);
3309 unsigned rb = assemble_rb64(insn);
3310 unsigned ra = assemble_ra64(insn);
3315 a = load_frw0_i64(ra);
3316 b = load_frw0_i64(rb);
3317 tcg_gen_mul_i64(a, a, b);
3319 tcg_temp_free_i64(a);
3320 tcg_temp_free_i64(b);
3322 return nullify_end(ctx, DISAS_NEXT);
3325 #define FOP_DED trans_fop_ded, .f.ded
3326 #define FOP_DEDD trans_fop_dedd, .f.dedd
3328 #define FOP_WEW trans_fop_wew_0c, .f.wew
3329 #define FOP_DEW trans_fop_dew_0c, .f.dew
3330 #define FOP_WED trans_fop_wed_0c, .f.wed
3331 #define FOP_WEWW trans_fop_weww_0c, .f.weww
3333 static const DisasInsn table_float_0c[] = {
3334 /* floating point class zero */
3335 { 0x30004000, 0xfc1fffe0, FOP_WEW = gen_fcpy_s },
3336 { 0x30006000, 0xfc1fffe0, FOP_WEW = gen_fabs_s },
3337 { 0x30008000, 0xfc1fffe0, FOP_WEW = gen_helper_fsqrt_s },
3338 { 0x3000a000, 0xfc1fffe0, FOP_WEW = gen_helper_frnd_s },
3339 { 0x3000c000, 0xfc1fffe0, FOP_WEW = gen_fneg_s },
3340 { 0x3000e000, 0xfc1fffe0, FOP_WEW = gen_fnegabs_s },
3342 { 0x30004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
3343 { 0x30006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
3344 { 0x30008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
3345 { 0x3000a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
3346 { 0x3000c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
3347 { 0x3000e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
3349 /* floating point class three */
3350 { 0x30000600, 0xfc00ffe0, FOP_WEWW = gen_helper_fadd_s },
3351 { 0x30002600, 0xfc00ffe0, FOP_WEWW = gen_helper_fsub_s },
3352 { 0x30004600, 0xfc00ffe0, FOP_WEWW = gen_helper_fmpy_s },
3353 { 0x30006600, 0xfc00ffe0, FOP_WEWW = gen_helper_fdiv_s },
3355 { 0x30000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
3356 { 0x30002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
3357 { 0x30004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
3358 { 0x30006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
3360 /* floating point class one */
3362 { 0x30000a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_s },
3363 { 0x30002200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_d },
3365 { 0x30008200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_w_s },
3366 { 0x30008a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_dw_s },
3367 { 0x3000a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_w_d },
3368 { 0x3000aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
3370 { 0x30010200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_w },
3371 { 0x30010a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_w },
3372 { 0x30012200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_dw },
3373 { 0x30012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
3374 /* float/int truncate */
3375 { 0x30018200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_w },
3376 { 0x30018a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_w },
3377 { 0x3001a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_dw },
3378 { 0x3001aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
3380 { 0x30028200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_uw_s },
3381 { 0x30028a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_udw_s },
3382 { 0x3002a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_uw_d },
3383 { 0x3002aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
3385 { 0x30030200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_uw },
3386 { 0x30030a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_uw },
3387 { 0x30032200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_udw },
3388 { 0x30032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
3389 /* float/uint truncate */
3390 { 0x30038200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_uw },
3391 { 0x30038a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_uw },
3392 { 0x3003a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_udw },
3393 { 0x3003aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
3395 /* floating point class two */
3396 { 0x30000400, 0xfc001fe0, trans_fcmp_s_0c },
3397 { 0x30000c00, 0xfc001fe0, trans_fcmp_d },
3398 { 0x30002420, 0xffffffe0, trans_ftest_q },
3399 { 0x30000420, 0xffff1fff, trans_ftest_t },
3401 /* FID. Note that ra == rt == 0, which via fcpy puts 0 into fr0.
3402 This is machine/revision == 0, which is reserved for simulator. */
3403 { 0x30000000, 0xffffffff, FOP_WEW = gen_fcpy_s },
3410 #define FOP_WEW trans_fop_wew_0e, .f.wew
3411 #define FOP_DEW trans_fop_dew_0e, .f.dew
3412 #define FOP_WED trans_fop_wed_0e, .f.wed
3413 #define FOP_WEWW trans_fop_weww_0e, .f.weww
3415 static const DisasInsn table_float_0e[] = {
3416 /* floating point class zero */
3417 { 0x38004000, 0xfc1fff20, FOP_WEW = gen_fcpy_s },
3418 { 0x38006000, 0xfc1fff20, FOP_WEW = gen_fabs_s },
3419 { 0x38008000, 0xfc1fff20, FOP_WEW = gen_helper_fsqrt_s },
3420 { 0x3800a000, 0xfc1fff20, FOP_WEW = gen_helper_frnd_s },
3421 { 0x3800c000, 0xfc1fff20, FOP_WEW = gen_fneg_s },
3422 { 0x3800e000, 0xfc1fff20, FOP_WEW = gen_fnegabs_s },
3424 { 0x38004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
3425 { 0x38006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
3426 { 0x38008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
3427 { 0x3800a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
3428 { 0x3800c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
3429 { 0x3800e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
3431 /* floating point class three */
3432 { 0x38000600, 0xfc00ef20, FOP_WEWW = gen_helper_fadd_s },
3433 { 0x38002600, 0xfc00ef20, FOP_WEWW = gen_helper_fsub_s },
3434 { 0x38004600, 0xfc00ef20, FOP_WEWW = gen_helper_fmpy_s },
3435 { 0x38006600, 0xfc00ef20, FOP_WEWW = gen_helper_fdiv_s },
3437 { 0x38000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
3438 { 0x38002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
3439 { 0x38004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
3440 { 0x38006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
3442 { 0x38004700, 0xfc00ef60, trans_xmpyu },
3444 /* floating point class one */
3446 { 0x38000a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_s },
3447 { 0x38002200, 0xfc1fffc0, FOP_DEW = gen_helper_fcnv_s_d },
3449 { 0x38008200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_w_s },
3450 { 0x38008a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_dw_s },
3451 { 0x3800a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_w_d },
3452 { 0x3800aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
3454 { 0x38010200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_w },
3455 { 0x38010a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_w },
3456 { 0x38012200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_dw },
3457 { 0x38012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
3458 /* float/int truncate */
3459 { 0x38018200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_w },
3460 { 0x38018a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_w },
3461 { 0x3801a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_dw },
3462 { 0x3801aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
3464 { 0x38028200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_uw_s },
3465 { 0x38028a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_udw_s },
3466 { 0x3802a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_uw_d },
3467 { 0x3802aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
3469 { 0x38030200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_uw },
3470 { 0x38030a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_uw },
3471 { 0x38032200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_udw },
3472 { 0x38032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
3473 /* float/uint truncate */
3474 { 0x38038200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_uw },
3475 { 0x38038a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_uw },
3476 { 0x3803a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_udw },
3477 { 0x3803aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
3479 /* floating point class two */
3480 { 0x38000400, 0xfc000f60, trans_fcmp_s_0e },
3481 { 0x38000c00, 0xfc001fe0, trans_fcmp_d },
3491 /* Convert the fmpyadd single-precision register encodings to standard. */
3492 static inline int fmpyadd_s_reg(unsigned r)
3494 return (r & 16) * 2 + 16 + (r & 15);
3497 static DisasJumpType trans_fmpyadd(DisasContext *ctx,
3498 uint32_t insn, bool is_sub)
3500 unsigned tm = extract32(insn, 0, 5);
3501 unsigned f = extract32(insn, 5, 1);
3502 unsigned ra = extract32(insn, 6, 5);
3503 unsigned ta = extract32(insn, 11, 5);
3504 unsigned rm2 = extract32(insn, 16, 5);
3505 unsigned rm1 = extract32(insn, 21, 5);
3509 /* Independent multiply & add/sub, with undefined behaviour
3510 if outputs overlap inputs. */
3512 tm = fmpyadd_s_reg(tm);
3513 ra = fmpyadd_s_reg(ra);
3514 ta = fmpyadd_s_reg(ta);
3515 rm2 = fmpyadd_s_reg(rm2);
3516 rm1 = fmpyadd_s_reg(rm1);
3517 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
3518 do_fop_weww(ctx, ta, ta, ra,
3519 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
3521 do_fop_dedd(ctx, tm, rm1, rm2, gen_helper_fmpy_d);
3522 do_fop_dedd(ctx, ta, ta, ra,
3523 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
3526 return nullify_end(ctx, DISAS_NEXT);
3529 static DisasJumpType trans_fmpyfadd_s(DisasContext *ctx, uint32_t insn,
3530 const DisasInsn *di)
3532 unsigned rt = assemble_rt64(insn);
3533 unsigned neg = extract32(insn, 5, 1);
3534 unsigned rm1 = assemble_ra64(insn);
3535 unsigned rm2 = assemble_rb64(insn);
3536 unsigned ra3 = assemble_rc64(insn);
3540 a = load_frw0_i32(rm1);
3541 b = load_frw0_i32(rm2);
3542 c = load_frw0_i32(ra3);
3545 gen_helper_fmpynfadd_s(a, cpu_env, a, b, c);
3547 gen_helper_fmpyfadd_s(a, cpu_env, a, b, c);
3550 tcg_temp_free_i32(b);
3551 tcg_temp_free_i32(c);
3552 save_frw_i32(rt, a);
3553 tcg_temp_free_i32(a);
3554 return nullify_end(ctx, DISAS_NEXT);
3557 static DisasJumpType trans_fmpyfadd_d(DisasContext *ctx, uint32_t insn,
3558 const DisasInsn *di)
3560 unsigned rt = extract32(insn, 0, 5);
3561 unsigned neg = extract32(insn, 5, 1);
3562 unsigned rm1 = extract32(insn, 21, 5);
3563 unsigned rm2 = extract32(insn, 16, 5);
3564 unsigned ra3 = assemble_rc64(insn);
3573 gen_helper_fmpynfadd_d(a, cpu_env, a, b, c);
3575 gen_helper_fmpyfadd_d(a, cpu_env, a, b, c);
3578 tcg_temp_free_i64(b);
3579 tcg_temp_free_i64(c);
3581 tcg_temp_free_i64(a);
3582 return nullify_end(ctx, DISAS_NEXT);
3585 static const DisasInsn table_fp_fused[] = {
3586 { 0xb8000000u, 0xfc000800u, trans_fmpyfadd_s },
3587 { 0xb8000800u, 0xfc0019c0u, trans_fmpyfadd_d }
3590 static DisasJumpType translate_table_int(DisasContext *ctx, uint32_t insn,
3591 const DisasInsn table[], size_t n)
3594 for (i = 0; i < n; ++i) {
3595 if ((insn & table[i].mask) == table[i].insn) {
3596 return table[i].trans(ctx, insn, &table[i]);
3599 return gen_illegal(ctx);
3602 #define translate_table(ctx, insn, table) \
3603 translate_table_int(ctx, insn, table, ARRAY_SIZE(table))
3605 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
3607 uint32_t opc = extract32(insn, 26, 6);
3610 case 0x00: /* system op */
3611 return translate_table(ctx, insn, table_system);
3613 return translate_table(ctx, insn, table_mem_mgmt);
3615 return translate_table(ctx, insn, table_arith_log);
3617 return translate_table(ctx, insn, table_index_mem);
3619 return trans_fmpyadd(ctx, insn, false);
3621 return trans_ldil(ctx, insn);
3623 return trans_copr_w(ctx, insn);
3625 return trans_addil(ctx, insn);
3627 return trans_copr_dw(ctx, insn);
3629 return translate_table(ctx, insn, table_float_0c);
3631 return trans_ldo(ctx, insn);
3633 return translate_table(ctx, insn, table_float_0e);
3636 return trans_load(ctx, insn, false, MO_UB);
3638 return trans_load(ctx, insn, false, MO_TEUW);
3640 return trans_load(ctx, insn, false, MO_TEUL);
3642 return trans_load(ctx, insn, true, MO_TEUL);
3644 return trans_fload_mod(ctx, insn);
3646 return trans_load_w(ctx, insn);
3648 return trans_store(ctx, insn, false, MO_UB);
3650 return trans_store(ctx, insn, false, MO_TEUW);
3652 return trans_store(ctx, insn, false, MO_TEUL);
3654 return trans_store(ctx, insn, true, MO_TEUL);
3656 return trans_fstore_mod(ctx, insn);
3658 return trans_store_w(ctx, insn);
3661 return trans_cmpb(ctx, insn, true, false, false);
3663 return trans_cmpb(ctx, insn, true, true, false);
3665 return trans_cmpb(ctx, insn, false, false, false);
3667 return trans_cmpb(ctx, insn, false, true, false);
3669 return trans_cmpiclr(ctx, insn);
3671 return trans_subi(ctx, insn);
3673 return trans_fmpyadd(ctx, insn, true);
3675 return trans_cmpb(ctx, insn, true, false, true);
3677 return trans_addb(ctx, insn, true, false);
3679 return trans_addb(ctx, insn, true, true);
3681 return trans_addb(ctx, insn, false, false);
3683 return trans_addb(ctx, insn, false, true);
3686 return trans_addi(ctx, insn);
3688 return translate_table(ctx, insn, table_fp_fused);
3690 return trans_cmpb(ctx, insn, false, false, true);
3694 return trans_bb(ctx, insn);
3696 return trans_movb(ctx, insn, false);
3698 return trans_movb(ctx, insn, true);
3700 return translate_table(ctx, insn, table_sh_ex);
3702 return translate_table(ctx, insn, table_depw);
3704 return trans_be(ctx, insn, false);
3706 return trans_be(ctx, insn, true);
3708 return translate_table(ctx, insn, table_branch);
3710 case 0x04: /* spopn */
3711 case 0x05: /* diag */
3712 case 0x0F: /* product specific */
3715 case 0x07: /* unassigned */
3716 case 0x15: /* unassigned */
3717 case 0x1D: /* unassigned */
3718 case 0x37: /* unassigned */
3719 case 0x3F: /* unassigned */
3723 return gen_illegal(ctx);
3726 static int hppa_tr_init_disas_context(DisasContextBase *dcbase,
3727 CPUState *cs, int max_insns)
3729 DisasContext *ctx = container_of(dcbase, DisasContext, base);
3730 TranslationBlock *tb = ctx->base.tb;
3734 ctx->iaoq_f = tb->pc;
3735 ctx->iaoq_b = tb->cs_base;
3737 TCGV_UNUSED(ctx->iaoq_n_var);
3740 for (i = 0; i < ARRAY_SIZE(ctx->temps); ++i) {
3741 TCGV_UNUSED(ctx->temps[i]);
3744 bound = -(tb->pc | TARGET_PAGE_MASK) / 4;
3745 return MIN(max_insns, bound);
3748 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
3750 DisasContext *ctx = container_of(dcbase, DisasContext, base);
3752 /* Seed the nullification status from PSW[N], as shown in TB->FLAGS. */
3753 ctx->null_cond = cond_make_f();
3754 ctx->psw_n_nonzero = false;
3755 if (ctx->base.tb->flags & 1) {
3756 ctx->null_cond.c = TCG_COND_ALWAYS;
3757 ctx->psw_n_nonzero = true;
3759 ctx->null_lab = NULL;
3762 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
3764 DisasContext *ctx = container_of(dcbase, DisasContext, base);
3766 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
3769 static bool hppa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
3770 const CPUBreakpoint *bp)
3772 DisasContext *ctx = container_of(dcbase, DisasContext, base);
3774 ctx->base.is_jmp = gen_excp(ctx, EXCP_DEBUG);
3775 ctx->base.pc_next = ctx->iaoq_f + 4;
3779 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
3781 DisasContext *ctx = container_of(dcbase, DisasContext, base);
3782 CPUHPPAState *env = cs->env_ptr;
3786 /* Execute one insn. */
3787 if (ctx->iaoq_f < TARGET_PAGE_SIZE) {
3788 ret = do_page_zero(ctx);
3789 assert(ret != DISAS_NEXT);
3791 /* Always fetch the insn, even if nullified, so that we check
3792 the page permissions for execute. */
3793 uint32_t insn = cpu_ldl_code(env, ctx->iaoq_f);
3795 /* Set up the IA queue for the next insn.
3796 This will be overwritten by a branch. */
3797 if (ctx->iaoq_b == -1) {
3799 ctx->iaoq_n_var = get_temp(ctx);
3800 tcg_gen_addi_tl(ctx->iaoq_n_var, cpu_iaoq_b, 4);
3802 ctx->iaoq_n = ctx->iaoq_b + 4;
3803 TCGV_UNUSED(ctx->iaoq_n_var);
3806 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
3807 ctx->null_cond.c = TCG_COND_NEVER;
3810 ret = translate_one(ctx, insn);
3811 assert(ctx->null_lab == NULL);
3815 /* Free any temporaries allocated. */
3816 for (i = 0, n = ctx->ntemps; i < n; ++i) {
3817 tcg_temp_free(ctx->temps[i]);
3818 TCGV_UNUSED(ctx->temps[i]);
3822 /* Advance the insn queue. */
3823 /* ??? The non-linear instruction restriction is purely due to
3824 the debugging dump. Otherwise we *could* follow unconditional
3825 branches within the same page. */
3826 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
3827 if (ctx->null_cond.c == TCG_COND_NEVER
3828 || ctx->null_cond.c == TCG_COND_ALWAYS) {
3829 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
3830 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
3831 ret = DISAS_NORETURN;
3833 ret = DISAS_IAQ_N_STALE;
3836 ctx->iaoq_f = ctx->iaoq_b;
3837 ctx->iaoq_b = ctx->iaoq_n;
3838 ctx->base.is_jmp = ret;
3840 if (ret == DISAS_NORETURN || ret == DISAS_IAQ_N_UPDATED) {
3843 if (ctx->iaoq_f == -1) {
3844 tcg_gen_mov_tl(cpu_iaoq_f, cpu_iaoq_b);
3845 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
3847 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
3848 } else if (ctx->iaoq_b == -1) {
3849 tcg_gen_mov_tl(cpu_iaoq_b, ctx->iaoq_n_var);
3853 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
3855 DisasContext *ctx = container_of(dcbase, DisasContext, base);
3857 switch (ctx->base.is_jmp) {
3858 case DISAS_NORETURN:
3860 case DISAS_TOO_MANY:
3861 case DISAS_IAQ_N_STALE:
3862 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
3863 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
3866 case DISAS_IAQ_N_UPDATED:
3867 if (ctx->base.singlestep_enabled) {
3868 gen_excp_1(EXCP_DEBUG);
3870 tcg_gen_lookup_and_goto_ptr();
3874 g_assert_not_reached();
3877 /* We don't actually use this during normal translation,
3878 but we should interact with the generic main loop. */
3879 ctx->base.pc_next = ctx->base.tb->pc + 4 * ctx->base.num_insns;
3882 static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
3884 TranslationBlock *tb = dcbase->tb;
3888 qemu_log("IN:\n0x00000000: (null)\n");
3891 qemu_log("IN:\n0x000000b0: light-weight-syscall\n");
3894 qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n");
3897 qemu_log("IN:\n0x00000100: syscall\n");
3900 qemu_log("IN: %s\n", lookup_symbol(tb->pc));
3901 log_target_disas(cs, tb->pc, tb->size, 1);
3906 static const TranslatorOps hppa_tr_ops = {
3907 .init_disas_context = hppa_tr_init_disas_context,
3908 .tb_start = hppa_tr_tb_start,
3909 .insn_start = hppa_tr_insn_start,
3910 .breakpoint_check = hppa_tr_breakpoint_check,
3911 .translate_insn = hppa_tr_translate_insn,
3912 .tb_stop = hppa_tr_tb_stop,
3913 .disas_log = hppa_tr_disas_log,
3916 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
3920 translator_loop(&hppa_tr_ops, &ctx.base, cs, tb);
3923 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
3926 env->iaoq_f = data[0];
3927 if (data[1] != -1) {
3928 env->iaoq_b = data[1];
3930 /* Since we were executing the instruction at IAOQ_F, and took some
3931 sort of action that provoked the cpu_restore_state, we can infer
3932 that the instruction was not nullified. */