7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "disas/disas.h"
26 #include "qemu-common.h"
28 #include "qemu/bitops.h"
29 #include "exec/cpu_ldst.h"
30 #include "exec/translator.h"
32 #include "exec/helper-proto.h"
33 #include "exec/helper-gen.h"
35 #include "trace-tcg.h"
38 #define LOG_DIS(str, ...) \
39 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%08x: " str, dc->base.pc_next, \
42 /* is_jmp field values */
43 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
44 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
45 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
47 typedef struct DisasContext {
48 DisasContextBase base;
51 uint32_t delayed_branch;
55 static TCGv cpu_R[32];
58 static TCGv jmp_pc; /* l.jr/l.jalr temp pc */
60 static TCGv cpu_sr_f; /* bf/bnf, F flag taken */
61 static TCGv cpu_sr_cy; /* carry (unsigned overflow) */
62 static TCGv cpu_sr_ov; /* signed overflow */
63 static TCGv cpu_lock_addr;
64 static TCGv cpu_lock_value;
65 static TCGv_i32 fpcsr;
66 static TCGv_i64 cpu_mac; /* MACHI:MACLO */
67 static TCGv_i32 cpu_dflag;
68 #include "exec/gen-icount.h"
70 void openrisc_translate_init(void)
72 static const char * const regnames[] = {
73 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
74 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
75 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
76 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
80 cpu_sr = tcg_global_mem_new(cpu_env,
81 offsetof(CPUOpenRISCState, sr), "sr");
82 cpu_dflag = tcg_global_mem_new_i32(cpu_env,
83 offsetof(CPUOpenRISCState, dflag),
85 cpu_pc = tcg_global_mem_new(cpu_env,
86 offsetof(CPUOpenRISCState, pc), "pc");
87 cpu_ppc = tcg_global_mem_new(cpu_env,
88 offsetof(CPUOpenRISCState, ppc), "ppc");
89 jmp_pc = tcg_global_mem_new(cpu_env,
90 offsetof(CPUOpenRISCState, jmp_pc), "jmp_pc");
91 cpu_sr_f = tcg_global_mem_new(cpu_env,
92 offsetof(CPUOpenRISCState, sr_f), "sr_f");
93 cpu_sr_cy = tcg_global_mem_new(cpu_env,
94 offsetof(CPUOpenRISCState, sr_cy), "sr_cy");
95 cpu_sr_ov = tcg_global_mem_new(cpu_env,
96 offsetof(CPUOpenRISCState, sr_ov), "sr_ov");
97 cpu_lock_addr = tcg_global_mem_new(cpu_env,
98 offsetof(CPUOpenRISCState, lock_addr),
100 cpu_lock_value = tcg_global_mem_new(cpu_env,
101 offsetof(CPUOpenRISCState, lock_value),
103 fpcsr = tcg_global_mem_new_i32(cpu_env,
104 offsetof(CPUOpenRISCState, fpcsr),
106 cpu_mac = tcg_global_mem_new_i64(cpu_env,
107 offsetof(CPUOpenRISCState, mac),
109 for (i = 0; i < 32; i++) {
110 cpu_R[i] = tcg_global_mem_new(cpu_env,
111 offsetof(CPUOpenRISCState,
118 static void gen_exception(DisasContext *dc, unsigned int excp)
120 TCGv_i32 tmp = tcg_const_i32(excp);
121 gen_helper_exception(cpu_env, tmp);
122 tcg_temp_free_i32(tmp);
125 static void gen_illegal_exception(DisasContext *dc)
127 tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
128 gen_exception(dc, EXCP_ILLEGAL);
129 dc->base.is_jmp = DISAS_NORETURN;
132 /* not used yet, open it when we need or64. */
133 /*#ifdef TARGET_OPENRISC64
134 static void check_ob64s(DisasContext *dc)
136 if (!(dc->flags & CPUCFGR_OB64S)) {
137 gen_illegal_exception(dc);
141 static void check_of64s(DisasContext *dc)
143 if (!(dc->flags & CPUCFGR_OF64S)) {
144 gen_illegal_exception(dc);
148 static void check_ov64s(DisasContext *dc)
150 if (!(dc->flags & CPUCFGR_OV64S)) {
151 gen_illegal_exception(dc);
156 /* We're about to write to REG. On the off-chance that the user is
157 writing to R0, re-instate the architectural register. */
158 #define check_r0_write(reg) \
160 if (unlikely(reg == 0)) { \
165 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
167 if (unlikely(dc->base.singlestep_enabled)) {
171 #ifndef CONFIG_USER_ONLY
172 return (dc->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
178 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
180 if (use_goto_tb(dc, dest)) {
181 tcg_gen_movi_tl(cpu_pc, dest);
183 tcg_gen_exit_tb((uintptr_t)dc->base.tb + n);
185 tcg_gen_movi_tl(cpu_pc, dest);
186 if (dc->base.singlestep_enabled) {
187 gen_exception(dc, EXCP_DEBUG);
193 static void gen_jump(DisasContext *dc, int32_t n26, uint32_t reg, uint32_t op0)
195 target_ulong tmp_pc = dc->base.pc_next + n26 * 4;
199 tcg_gen_movi_tl(jmp_pc, tmp_pc);
201 case 0x01: /* l.jal */
202 tcg_gen_movi_tl(cpu_R[9], dc->base.pc_next + 8);
203 /* Optimize jal being used to load the PC for PIC. */
204 if (tmp_pc == dc->base.pc_next + 8) {
207 tcg_gen_movi_tl(jmp_pc, tmp_pc);
209 case 0x03: /* l.bnf */
210 case 0x04: /* l.bf */
212 TCGv t_next = tcg_const_tl(dc->base.pc_next + 8);
213 TCGv t_true = tcg_const_tl(tmp_pc);
214 TCGv t_zero = tcg_const_tl(0);
216 tcg_gen_movcond_tl(op0 == 0x03 ? TCG_COND_EQ : TCG_COND_NE,
217 jmp_pc, cpu_sr_f, t_zero, t_true, t_next);
219 tcg_temp_free(t_next);
220 tcg_temp_free(t_true);
221 tcg_temp_free(t_zero);
224 case 0x11: /* l.jr */
225 tcg_gen_mov_tl(jmp_pc, cpu_R[reg]);
227 case 0x12: /* l.jalr */
228 tcg_gen_movi_tl(cpu_R[9], (dc->base.pc_next + 8));
229 tcg_gen_mov_tl(jmp_pc, cpu_R[reg]);
232 gen_illegal_exception(dc);
236 dc->delayed_branch = 2;
239 static void gen_ove_cy(DisasContext *dc)
241 if (dc->tb_flags & SR_OVE) {
242 gen_helper_ove_cy(cpu_env);
246 static void gen_ove_ov(DisasContext *dc)
248 if (dc->tb_flags & SR_OVE) {
249 gen_helper_ove_ov(cpu_env);
253 static void gen_ove_cyov(DisasContext *dc)
255 if (dc->tb_flags & SR_OVE) {
256 gen_helper_ove_cyov(cpu_env);
260 static void gen_add(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
262 TCGv t0 = tcg_const_tl(0);
263 TCGv res = tcg_temp_new();
265 tcg_gen_add2_tl(res, cpu_sr_cy, srca, t0, srcb, t0);
266 tcg_gen_xor_tl(cpu_sr_ov, srca, srcb);
267 tcg_gen_xor_tl(t0, res, srcb);
268 tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov);
271 tcg_gen_mov_tl(dest, res);
277 static void gen_addc(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
279 TCGv t0 = tcg_const_tl(0);
280 TCGv res = tcg_temp_new();
282 tcg_gen_add2_tl(res, cpu_sr_cy, srca, t0, cpu_sr_cy, t0);
283 tcg_gen_add2_tl(res, cpu_sr_cy, res, cpu_sr_cy, srcb, t0);
284 tcg_gen_xor_tl(cpu_sr_ov, srca, srcb);
285 tcg_gen_xor_tl(t0, res, srcb);
286 tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov);
289 tcg_gen_mov_tl(dest, res);
295 static void gen_sub(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
297 TCGv res = tcg_temp_new();
299 tcg_gen_sub_tl(res, srca, srcb);
300 tcg_gen_xor_tl(cpu_sr_cy, srca, srcb);
301 tcg_gen_xor_tl(cpu_sr_ov, res, srcb);
302 tcg_gen_and_tl(cpu_sr_ov, cpu_sr_ov, cpu_sr_cy);
303 tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_cy, srca, srcb);
305 tcg_gen_mov_tl(dest, res);
311 static void gen_mul(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
313 TCGv t0 = tcg_temp_new();
315 tcg_gen_muls2_tl(dest, cpu_sr_ov, srca, srcb);
316 tcg_gen_sari_tl(t0, dest, TARGET_LONG_BITS - 1);
317 tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_ov, cpu_sr_ov, t0);
320 tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
324 static void gen_mulu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
326 tcg_gen_muls2_tl(dest, cpu_sr_cy, srca, srcb);
327 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_cy, cpu_sr_cy, 0);
332 static void gen_div(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
334 TCGv t0 = tcg_temp_new();
336 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_ov, srcb, 0);
337 /* The result of divide-by-zero is undefined.
338 Supress the host-side exception by dividing by 1. */
339 tcg_gen_or_tl(t0, srcb, cpu_sr_ov);
340 tcg_gen_div_tl(dest, srca, t0);
343 tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
347 static void gen_divu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
349 TCGv t0 = tcg_temp_new();
351 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_cy, srcb, 0);
352 /* The result of divide-by-zero is undefined.
353 Supress the host-side exception by dividing by 1. */
354 tcg_gen_or_tl(t0, srcb, cpu_sr_cy);
355 tcg_gen_divu_tl(dest, srca, t0);
361 static void gen_muld(DisasContext *dc, TCGv srca, TCGv srcb)
363 TCGv_i64 t1 = tcg_temp_new_i64();
364 TCGv_i64 t2 = tcg_temp_new_i64();
366 tcg_gen_ext_tl_i64(t1, srca);
367 tcg_gen_ext_tl_i64(t2, srcb);
368 if (TARGET_LONG_BITS == 32) {
369 tcg_gen_mul_i64(cpu_mac, t1, t2);
370 tcg_gen_movi_tl(cpu_sr_ov, 0);
372 TCGv_i64 high = tcg_temp_new_i64();
374 tcg_gen_muls2_i64(cpu_mac, high, t1, t2);
375 tcg_gen_sari_i64(t1, cpu_mac, 63);
376 tcg_gen_setcond_i64(TCG_COND_NE, t1, t1, high);
377 tcg_temp_free_i64(high);
378 tcg_gen_trunc_i64_tl(cpu_sr_ov, t1);
379 tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
383 tcg_temp_free_i64(t1);
384 tcg_temp_free_i64(t2);
387 static void gen_muldu(DisasContext *dc, TCGv srca, TCGv srcb)
389 TCGv_i64 t1 = tcg_temp_new_i64();
390 TCGv_i64 t2 = tcg_temp_new_i64();
392 tcg_gen_extu_tl_i64(t1, srca);
393 tcg_gen_extu_tl_i64(t2, srcb);
394 if (TARGET_LONG_BITS == 32) {
395 tcg_gen_mul_i64(cpu_mac, t1, t2);
396 tcg_gen_movi_tl(cpu_sr_cy, 0);
398 TCGv_i64 high = tcg_temp_new_i64();
400 tcg_gen_mulu2_i64(cpu_mac, high, t1, t2);
401 tcg_gen_setcondi_i64(TCG_COND_NE, high, high, 0);
402 tcg_gen_trunc_i64_tl(cpu_sr_cy, high);
403 tcg_temp_free_i64(high);
407 tcg_temp_free_i64(t1);
408 tcg_temp_free_i64(t2);
411 static void gen_mac(DisasContext *dc, TCGv srca, TCGv srcb)
413 TCGv_i64 t1 = tcg_temp_new_i64();
414 TCGv_i64 t2 = tcg_temp_new_i64();
416 tcg_gen_ext_tl_i64(t1, srca);
417 tcg_gen_ext_tl_i64(t2, srcb);
418 tcg_gen_mul_i64(t1, t1, t2);
420 /* Note that overflow is only computed during addition stage. */
421 tcg_gen_xor_i64(t2, cpu_mac, t1);
422 tcg_gen_add_i64(cpu_mac, cpu_mac, t1);
423 tcg_gen_xor_i64(t1, t1, cpu_mac);
424 tcg_gen_andc_i64(t1, t1, t2);
425 tcg_temp_free_i64(t2);
427 #if TARGET_LONG_BITS == 32
428 tcg_gen_extrh_i64_i32(cpu_sr_ov, t1);
430 tcg_gen_mov_i64(cpu_sr_ov, t1);
432 tcg_temp_free_i64(t1);
437 static void gen_macu(DisasContext *dc, TCGv srca, TCGv srcb)
439 TCGv_i64 t1 = tcg_temp_new_i64();
440 TCGv_i64 t2 = tcg_temp_new_i64();
442 tcg_gen_extu_tl_i64(t1, srca);
443 tcg_gen_extu_tl_i64(t2, srcb);
444 tcg_gen_mul_i64(t1, t1, t2);
445 tcg_temp_free_i64(t2);
447 /* Note that overflow is only computed during addition stage. */
448 tcg_gen_add_i64(cpu_mac, cpu_mac, t1);
449 tcg_gen_setcond_i64(TCG_COND_LTU, t1, cpu_mac, t1);
450 tcg_gen_trunc_i64_tl(cpu_sr_cy, t1);
451 tcg_temp_free_i64(t1);
456 static void gen_msb(DisasContext *dc, TCGv srca, TCGv srcb)
458 TCGv_i64 t1 = tcg_temp_new_i64();
459 TCGv_i64 t2 = tcg_temp_new_i64();
461 tcg_gen_ext_tl_i64(t1, srca);
462 tcg_gen_ext_tl_i64(t2, srcb);
463 tcg_gen_mul_i64(t1, t1, t2);
465 /* Note that overflow is only computed during subtraction stage. */
466 tcg_gen_xor_i64(t2, cpu_mac, t1);
467 tcg_gen_sub_i64(cpu_mac, cpu_mac, t1);
468 tcg_gen_xor_i64(t1, t1, cpu_mac);
469 tcg_gen_and_i64(t1, t1, t2);
470 tcg_temp_free_i64(t2);
472 #if TARGET_LONG_BITS == 32
473 tcg_gen_extrh_i64_i32(cpu_sr_ov, t1);
475 tcg_gen_mov_i64(cpu_sr_ov, t1);
477 tcg_temp_free_i64(t1);
482 static void gen_msbu(DisasContext *dc, TCGv srca, TCGv srcb)
484 TCGv_i64 t1 = tcg_temp_new_i64();
485 TCGv_i64 t2 = tcg_temp_new_i64();
487 tcg_gen_extu_tl_i64(t1, srca);
488 tcg_gen_extu_tl_i64(t2, srcb);
489 tcg_gen_mul_i64(t1, t1, t2);
491 /* Note that overflow is only computed during subtraction stage. */
492 tcg_gen_setcond_i64(TCG_COND_LTU, t2, cpu_mac, t1);
493 tcg_gen_sub_i64(cpu_mac, cpu_mac, t1);
494 tcg_gen_trunc_i64_tl(cpu_sr_cy, t2);
495 tcg_temp_free_i64(t2);
496 tcg_temp_free_i64(t1);
501 static void gen_lwa(DisasContext *dc, TCGv rd, TCGv ra, int32_t ofs)
503 TCGv ea = tcg_temp_new();
505 tcg_gen_addi_tl(ea, ra, ofs);
506 tcg_gen_qemu_ld_tl(rd, ea, dc->mem_idx, MO_TEUL);
507 tcg_gen_mov_tl(cpu_lock_addr, ea);
508 tcg_gen_mov_tl(cpu_lock_value, rd);
512 static void gen_swa(DisasContext *dc, int b, TCGv ra, int32_t ofs)
515 TCGLabel *lab_fail, *lab_done;
518 tcg_gen_addi_tl(ea, ra, ofs);
520 /* For TB_FLAGS_R0_0, the branch below invalidates the temporary assigned
521 to cpu_R[0]. Since l.swa is quite often immediately followed by a
522 branch, don't bother reallocating; finish the TB using the "real" R0.
523 This also takes care of RB input across the branch. */
526 lab_fail = gen_new_label();
527 lab_done = gen_new_label();
528 tcg_gen_brcond_tl(TCG_COND_NE, ea, cpu_lock_addr, lab_fail);
531 val = tcg_temp_new();
532 tcg_gen_atomic_cmpxchg_tl(val, cpu_lock_addr, cpu_lock_value,
533 cpu_R[b], dc->mem_idx, MO_TEUL);
534 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, val, cpu_lock_value);
537 tcg_gen_br(lab_done);
539 gen_set_label(lab_fail);
540 tcg_gen_movi_tl(cpu_sr_f, 0);
542 gen_set_label(lab_done);
543 tcg_gen_movi_tl(cpu_lock_addr, -1);
546 static void dec_calc(DisasContext *dc, uint32_t insn)
548 uint32_t op0, op1, op2;
550 op0 = extract32(insn, 0, 4);
551 op1 = extract32(insn, 8, 2);
552 op2 = extract32(insn, 6, 2);
553 ra = extract32(insn, 16, 5);
554 rb = extract32(insn, 11, 5);
555 rd = extract32(insn, 21, 5);
560 case 0x0: /* l.add */
561 LOG_DIS("l.add r%d, r%d, r%d\n", rd, ra, rb);
562 gen_add(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]);
565 case 0x1: /* l.addc */
566 LOG_DIS("l.addc r%d, r%d, r%d\n", rd, ra, rb);
567 gen_addc(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]);
570 case 0x2: /* l.sub */
571 LOG_DIS("l.sub r%d, r%d, r%d\n", rd, ra, rb);
572 gen_sub(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]);
575 case 0x3: /* l.and */
576 LOG_DIS("l.and r%d, r%d, r%d\n", rd, ra, rb);
577 tcg_gen_and_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
581 LOG_DIS("l.or r%d, r%d, r%d\n", rd, ra, rb);
582 tcg_gen_or_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
585 case 0x5: /* l.xor */
586 LOG_DIS("l.xor r%d, r%d, r%d\n", rd, ra, rb);
587 tcg_gen_xor_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
593 LOG_DIS("l.sll r%d, r%d, r%d\n", rd, ra, rb);
594 tcg_gen_shl_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
597 LOG_DIS("l.srl r%d, r%d, r%d\n", rd, ra, rb);
598 tcg_gen_shr_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
601 LOG_DIS("l.sra r%d, r%d, r%d\n", rd, ra, rb);
602 tcg_gen_sar_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
605 LOG_DIS("l.ror r%d, r%d, r%d\n", rd, ra, rb);
606 tcg_gen_rotr_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
613 case 0: /* l.exths */
614 LOG_DIS("l.exths r%d, r%d\n", rd, ra);
615 tcg_gen_ext16s_tl(cpu_R[rd], cpu_R[ra]);
617 case 1: /* l.extbs */
618 LOG_DIS("l.extbs r%d, r%d\n", rd, ra);
619 tcg_gen_ext8s_tl(cpu_R[rd], cpu_R[ra]);
621 case 2: /* l.exthz */
622 LOG_DIS("l.exthz r%d, r%d\n", rd, ra);
623 tcg_gen_ext16u_tl(cpu_R[rd], cpu_R[ra]);
625 case 3: /* l.extbz */
626 LOG_DIS("l.extbz r%d, r%d\n", rd, ra);
627 tcg_gen_ext8u_tl(cpu_R[rd], cpu_R[ra]);
634 case 0: /* l.extws */
635 LOG_DIS("l.extws r%d, r%d\n", rd, ra);
636 tcg_gen_ext32s_tl(cpu_R[rd], cpu_R[ra]);
638 case 1: /* l.extwz */
639 LOG_DIS("l.extwz r%d, r%d\n", rd, ra);
640 tcg_gen_ext32u_tl(cpu_R[rd], cpu_R[ra]);
645 case 0xe: /* l.cmov */
646 LOG_DIS("l.cmov r%d, r%d, r%d\n", rd, ra, rb);
648 TCGv zero = tcg_const_tl(0);
649 tcg_gen_movcond_tl(TCG_COND_NE, cpu_R[rd], cpu_sr_f, zero,
650 cpu_R[ra], cpu_R[rb]);
655 case 0xf: /* l.ff1 */
656 LOG_DIS("l.ff1 r%d, r%d, r%d\n", rd, ra, rb);
657 tcg_gen_ctzi_tl(cpu_R[rd], cpu_R[ra], -1);
658 tcg_gen_addi_tl(cpu_R[rd], cpu_R[rd], 1);
665 case 0xf: /* l.fl1 */
666 LOG_DIS("l.fl1 r%d, r%d, r%d\n", rd, ra, rb);
667 tcg_gen_clzi_tl(cpu_R[rd], cpu_R[ra], TARGET_LONG_BITS);
668 tcg_gen_subfi_tl(cpu_R[rd], TARGET_LONG_BITS, cpu_R[rd]);
678 case 0x6: /* l.mul */
679 LOG_DIS("l.mul r%d, r%d, r%d\n", rd, ra, rb);
680 gen_mul(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]);
683 case 0x7: /* l.muld */
684 LOG_DIS("l.muld r%d, r%d\n", ra, rb);
685 gen_muld(dc, cpu_R[ra], cpu_R[rb]);
688 case 0x9: /* l.div */
689 LOG_DIS("l.div r%d, r%d, r%d\n", rd, ra, rb);
690 gen_div(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]);
693 case 0xa: /* l.divu */
694 LOG_DIS("l.divu r%d, r%d, r%d\n", rd, ra, rb);
695 gen_divu(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]);
698 case 0xb: /* l.mulu */
699 LOG_DIS("l.mulu r%d, r%d, r%d\n", rd, ra, rb);
700 gen_mulu(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]);
703 case 0xc: /* l.muldu */
704 LOG_DIS("l.muldu r%d, r%d\n", ra, rb);
705 gen_muldu(dc, cpu_R[ra], cpu_R[rb]);
710 gen_illegal_exception(dc);
713 static void dec_misc(DisasContext *dc, uint32_t insn)
717 uint32_t L6, K5, K16, K5_11;
718 int32_t I16, I5_11, N26;
722 op0 = extract32(insn, 26, 6);
723 op1 = extract32(insn, 24, 2);
724 ra = extract32(insn, 16, 5);
725 rb = extract32(insn, 11, 5);
726 rd = extract32(insn, 21, 5);
727 L6 = extract32(insn, 5, 6);
728 K5 = extract32(insn, 0, 5);
729 K16 = extract32(insn, 0, 16);
731 N26 = sextract32(insn, 0, 26);
732 K5_11 = (extract32(insn, 21, 5) << 11) | extract32(insn, 0, 11);
733 I5_11 = (int16_t)K5_11;
737 LOG_DIS("l.j %d\n", N26);
738 gen_jump(dc, N26, 0, op0);
741 case 0x01: /* l.jal */
742 LOG_DIS("l.jal %d\n", N26);
743 gen_jump(dc, N26, 0, op0);
746 case 0x03: /* l.bnf */
747 LOG_DIS("l.bnf %d\n", N26);
748 gen_jump(dc, N26, 0, op0);
751 case 0x04: /* l.bf */
752 LOG_DIS("l.bf %d\n", N26);
753 gen_jump(dc, N26, 0, op0);
758 case 0x01: /* l.nop */
759 LOG_DIS("l.nop %d\n", I16);
763 gen_illegal_exception(dc);
768 case 0x11: /* l.jr */
769 LOG_DIS("l.jr r%d\n", rb);
770 gen_jump(dc, 0, rb, op0);
773 case 0x12: /* l.jalr */
774 LOG_DIS("l.jalr r%d\n", rb);
775 gen_jump(dc, 0, rb, op0);
778 case 0x13: /* l.maci */
779 LOG_DIS("l.maci r%d, %d\n", ra, I16);
780 t0 = tcg_const_tl(I16);
781 gen_mac(dc, cpu_R[ra], t0);
785 case 0x09: /* l.rfe */
788 #if defined(CONFIG_USER_ONLY)
791 if (dc->mem_idx == MMU_USER_IDX) {
792 gen_illegal_exception(dc);
795 gen_helper_rfe(cpu_env);
796 dc->base.is_jmp = DISAS_UPDATE;
801 case 0x1b: /* l.lwa */
802 LOG_DIS("l.lwa r%d, r%d, %d\n", rd, ra, I16);
804 gen_lwa(dc, cpu_R[rd], cpu_R[ra], I16);
807 case 0x1c: /* l.cust1 */
808 LOG_DIS("l.cust1\n");
811 case 0x1d: /* l.cust2 */
812 LOG_DIS("l.cust2\n");
815 case 0x1e: /* l.cust3 */
816 LOG_DIS("l.cust3\n");
819 case 0x1f: /* l.cust4 */
820 LOG_DIS("l.cust4\n");
823 case 0x3c: /* l.cust5 */
824 LOG_DIS("l.cust5 r%d, r%d, r%d, %d, %d\n", rd, ra, rb, L6, K5);
827 case 0x3d: /* l.cust6 */
828 LOG_DIS("l.cust6\n");
831 case 0x3e: /* l.cust7 */
832 LOG_DIS("l.cust7\n");
835 case 0x3f: /* l.cust8 */
836 LOG_DIS("l.cust8\n");
839 /* not used yet, open it when we need or64. */
840 /*#ifdef TARGET_OPENRISC64
842 LOG_DIS("l.ld r%d, r%d, %d\n", rd, ra, I16);
848 case 0x21: /* l.lwz */
849 LOG_DIS("l.lwz r%d, r%d, %d\n", rd, ra, I16);
853 case 0x22: /* l.lws */
854 LOG_DIS("l.lws r%d, r%d, %d\n", rd, ra, I16);
858 case 0x23: /* l.lbz */
859 LOG_DIS("l.lbz r%d, r%d, %d\n", rd, ra, I16);
863 case 0x24: /* l.lbs */
864 LOG_DIS("l.lbs r%d, r%d, %d\n", rd, ra, I16);
868 case 0x25: /* l.lhz */
869 LOG_DIS("l.lhz r%d, r%d, %d\n", rd, ra, I16);
873 case 0x26: /* l.lhs */
874 LOG_DIS("l.lhs r%d, r%d, %d\n", rd, ra, I16);
881 tcg_gen_addi_tl(t0, cpu_R[ra], I16);
882 tcg_gen_qemu_ld_tl(cpu_R[rd], t0, dc->mem_idx, mop);
886 case 0x27: /* l.addi */
887 LOG_DIS("l.addi r%d, r%d, %d\n", rd, ra, I16);
889 t0 = tcg_const_tl(I16);
890 gen_add(dc, cpu_R[rd], cpu_R[ra], t0);
894 case 0x28: /* l.addic */
895 LOG_DIS("l.addic r%d, r%d, %d\n", rd, ra, I16);
897 t0 = tcg_const_tl(I16);
898 gen_addc(dc, cpu_R[rd], cpu_R[ra], t0);
902 case 0x29: /* l.andi */
903 LOG_DIS("l.andi r%d, r%d, %d\n", rd, ra, K16);
905 tcg_gen_andi_tl(cpu_R[rd], cpu_R[ra], K16);
908 case 0x2a: /* l.ori */
909 LOG_DIS("l.ori r%d, r%d, %d\n", rd, ra, K16);
911 tcg_gen_ori_tl(cpu_R[rd], cpu_R[ra], K16);
914 case 0x2b: /* l.xori */
915 LOG_DIS("l.xori r%d, r%d, %d\n", rd, ra, I16);
917 tcg_gen_xori_tl(cpu_R[rd], cpu_R[ra], I16);
920 case 0x2c: /* l.muli */
921 LOG_DIS("l.muli r%d, r%d, %d\n", rd, ra, I16);
923 t0 = tcg_const_tl(I16);
924 gen_mul(dc, cpu_R[rd], cpu_R[ra], t0);
928 case 0x2d: /* l.mfspr */
929 LOG_DIS("l.mfspr r%d, r%d, %d\n", rd, ra, K16);
932 #if defined(CONFIG_USER_ONLY)
935 TCGv_i32 ti = tcg_const_i32(K16);
936 if (dc->mem_idx == MMU_USER_IDX) {
937 gen_illegal_exception(dc);
940 gen_helper_mfspr(cpu_R[rd], cpu_env, cpu_R[rd], cpu_R[ra], ti);
941 tcg_temp_free_i32(ti);
946 case 0x30: /* l.mtspr */
947 LOG_DIS("l.mtspr r%d, r%d, %d\n", ra, rb, K5_11);
949 #if defined(CONFIG_USER_ONLY)
952 TCGv_i32 im = tcg_const_i32(K5_11);
953 if (dc->mem_idx == MMU_USER_IDX) {
954 gen_illegal_exception(dc);
957 gen_helper_mtspr(cpu_env, cpu_R[ra], cpu_R[rb], im);
958 tcg_temp_free_i32(im);
963 case 0x33: /* l.swa */
964 LOG_DIS("l.swa r%d, r%d, %d\n", ra, rb, I5_11);
965 gen_swa(dc, rb, cpu_R[ra], I5_11);
968 /* not used yet, open it when we need or64. */
969 /*#ifdef TARGET_OPENRISC64
971 LOG_DIS("l.sd r%d, r%d, %d\n", ra, rb, I5_11);
977 case 0x35: /* l.sw */
978 LOG_DIS("l.sw r%d, r%d, %d\n", ra, rb, I5_11);
982 case 0x36: /* l.sb */
983 LOG_DIS("l.sb r%d, r%d, %d\n", ra, rb, I5_11);
987 case 0x37: /* l.sh */
988 LOG_DIS("l.sh r%d, r%d, %d\n", ra, rb, I5_11);
994 TCGv t0 = tcg_temp_new();
995 tcg_gen_addi_tl(t0, cpu_R[ra], I5_11);
996 tcg_gen_qemu_st_tl(cpu_R[rb], t0, dc->mem_idx, mop);
1002 gen_illegal_exception(dc);
1007 static void dec_mac(DisasContext *dc, uint32_t insn)
1011 op0 = extract32(insn, 0, 4);
1012 ra = extract32(insn, 16, 5);
1013 rb = extract32(insn, 11, 5);
1016 case 0x0001: /* l.mac */
1017 LOG_DIS("l.mac r%d, r%d\n", ra, rb);
1018 gen_mac(dc, cpu_R[ra], cpu_R[rb]);
1021 case 0x0002: /* l.msb */
1022 LOG_DIS("l.msb r%d, r%d\n", ra, rb);
1023 gen_msb(dc, cpu_R[ra], cpu_R[rb]);
1026 case 0x0003: /* l.macu */
1027 LOG_DIS("l.macu r%d, r%d\n", ra, rb);
1028 gen_macu(dc, cpu_R[ra], cpu_R[rb]);
1031 case 0x0004: /* l.msbu */
1032 LOG_DIS("l.msbu r%d, r%d\n", ra, rb);
1033 gen_msbu(dc, cpu_R[ra], cpu_R[rb]);
1037 gen_illegal_exception(dc);
1042 static void dec_logic(DisasContext *dc, uint32_t insn)
1045 uint32_t rd, ra, L6, S6;
1046 op0 = extract32(insn, 6, 2);
1047 rd = extract32(insn, 21, 5);
1048 ra = extract32(insn, 16, 5);
1049 L6 = extract32(insn, 0, 6);
1050 S6 = L6 & (TARGET_LONG_BITS - 1);
1054 case 0x00: /* l.slli */
1055 LOG_DIS("l.slli r%d, r%d, %d\n", rd, ra, L6);
1056 tcg_gen_shli_tl(cpu_R[rd], cpu_R[ra], S6);
1059 case 0x01: /* l.srli */
1060 LOG_DIS("l.srli r%d, r%d, %d\n", rd, ra, L6);
1061 tcg_gen_shri_tl(cpu_R[rd], cpu_R[ra], S6);
1064 case 0x02: /* l.srai */
1065 LOG_DIS("l.srai r%d, r%d, %d\n", rd, ra, L6);
1066 tcg_gen_sari_tl(cpu_R[rd], cpu_R[ra], S6);
1069 case 0x03: /* l.rori */
1070 LOG_DIS("l.rori r%d, r%d, %d\n", rd, ra, L6);
1071 tcg_gen_rotri_tl(cpu_R[rd], cpu_R[ra], S6);
1075 gen_illegal_exception(dc);
1080 static void dec_M(DisasContext *dc, uint32_t insn)
1085 op0 = extract32(insn, 16, 1);
1086 rd = extract32(insn, 21, 5);
1087 K16 = extract32(insn, 0, 16);
1091 case 0x0: /* l.movhi */
1092 LOG_DIS("l.movhi r%d, %d\n", rd, K16);
1093 tcg_gen_movi_tl(cpu_R[rd], (K16 << 16));
1096 case 0x1: /* l.macrc */
1097 LOG_DIS("l.macrc r%d\n", rd);
1098 tcg_gen_trunc_i64_tl(cpu_R[rd], cpu_mac);
1099 tcg_gen_movi_i64(cpu_mac, 0);
1103 gen_illegal_exception(dc);
1108 static void dec_comp(DisasContext *dc, uint32_t insn)
1113 op0 = extract32(insn, 21, 5);
1114 ra = extract32(insn, 16, 5);
1115 rb = extract32(insn, 11, 5);
1117 /* unsigned integers */
1118 tcg_gen_ext32u_tl(cpu_R[ra], cpu_R[ra]);
1119 tcg_gen_ext32u_tl(cpu_R[rb], cpu_R[rb]);
1122 case 0x0: /* l.sfeq */
1123 LOG_DIS("l.sfeq r%d, r%d\n", ra, rb);
1124 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1127 case 0x1: /* l.sfne */
1128 LOG_DIS("l.sfne r%d, r%d\n", ra, rb);
1129 tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1132 case 0x2: /* l.sfgtu */
1133 LOG_DIS("l.sfgtu r%d, r%d\n", ra, rb);
1134 tcg_gen_setcond_tl(TCG_COND_GTU, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1137 case 0x3: /* l.sfgeu */
1138 LOG_DIS("l.sfgeu r%d, r%d\n", ra, rb);
1139 tcg_gen_setcond_tl(TCG_COND_GEU, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1142 case 0x4: /* l.sfltu */
1143 LOG_DIS("l.sfltu r%d, r%d\n", ra, rb);
1144 tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1147 case 0x5: /* l.sfleu */
1148 LOG_DIS("l.sfleu r%d, r%d\n", ra, rb);
1149 tcg_gen_setcond_tl(TCG_COND_LEU, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1152 case 0xa: /* l.sfgts */
1153 LOG_DIS("l.sfgts r%d, r%d\n", ra, rb);
1154 tcg_gen_setcond_tl(TCG_COND_GT, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1157 case 0xb: /* l.sfges */
1158 LOG_DIS("l.sfges r%d, r%d\n", ra, rb);
1159 tcg_gen_setcond_tl(TCG_COND_GE, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1162 case 0xc: /* l.sflts */
1163 LOG_DIS("l.sflts r%d, r%d\n", ra, rb);
1164 tcg_gen_setcond_tl(TCG_COND_LT, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1167 case 0xd: /* l.sfles */
1168 LOG_DIS("l.sfles r%d, r%d\n", ra, rb);
1169 tcg_gen_setcond_tl(TCG_COND_LE, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1173 gen_illegal_exception(dc);
1178 static void dec_compi(DisasContext *dc, uint32_t insn)
1183 op0 = extract32(insn, 21, 5);
1184 ra = extract32(insn, 16, 5);
1185 I16 = sextract32(insn, 0, 16);
1188 case 0x0: /* l.sfeqi */
1189 LOG_DIS("l.sfeqi r%d, %d\n", ra, I16);
1190 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_f, cpu_R[ra], I16);
1193 case 0x1: /* l.sfnei */
1194 LOG_DIS("l.sfnei r%d, %d\n", ra, I16);
1195 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_f, cpu_R[ra], I16);
1198 case 0x2: /* l.sfgtui */
1199 LOG_DIS("l.sfgtui r%d, %d\n", ra, I16);
1200 tcg_gen_setcondi_tl(TCG_COND_GTU, cpu_sr_f, cpu_R[ra], I16);
1203 case 0x3: /* l.sfgeui */
1204 LOG_DIS("l.sfgeui r%d, %d\n", ra, I16);
1205 tcg_gen_setcondi_tl(TCG_COND_GEU, cpu_sr_f, cpu_R[ra], I16);
1208 case 0x4: /* l.sfltui */
1209 LOG_DIS("l.sfltui r%d, %d\n", ra, I16);
1210 tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_sr_f, cpu_R[ra], I16);
1213 case 0x5: /* l.sfleui */
1214 LOG_DIS("l.sfleui r%d, %d\n", ra, I16);
1215 tcg_gen_setcondi_tl(TCG_COND_LEU, cpu_sr_f, cpu_R[ra], I16);
1218 case 0xa: /* l.sfgtsi */
1219 LOG_DIS("l.sfgtsi r%d, %d\n", ra, I16);
1220 tcg_gen_setcondi_tl(TCG_COND_GT, cpu_sr_f, cpu_R[ra], I16);
1223 case 0xb: /* l.sfgesi */
1224 LOG_DIS("l.sfgesi r%d, %d\n", ra, I16);
1225 tcg_gen_setcondi_tl(TCG_COND_GE, cpu_sr_f, cpu_R[ra], I16);
1228 case 0xc: /* l.sfltsi */
1229 LOG_DIS("l.sfltsi r%d, %d\n", ra, I16);
1230 tcg_gen_setcondi_tl(TCG_COND_LT, cpu_sr_f, cpu_R[ra], I16);
1233 case 0xd: /* l.sflesi */
1234 LOG_DIS("l.sflesi r%d, %d\n", ra, I16);
1235 tcg_gen_setcondi_tl(TCG_COND_LE, cpu_sr_f, cpu_R[ra], I16);
1239 gen_illegal_exception(dc);
1244 static void dec_sys(DisasContext *dc, uint32_t insn)
1249 op0 = extract32(insn, 16, 10);
1250 K16 = extract32(insn, 0, 16);
1253 case 0x000: /* l.sys */
1254 LOG_DIS("l.sys %d\n", K16);
1255 tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
1256 gen_exception(dc, EXCP_SYSCALL);
1257 dc->base.is_jmp = DISAS_NORETURN;
1260 case 0x100: /* l.trap */
1261 LOG_DIS("l.trap %d\n", K16);
1262 tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
1263 gen_exception(dc, EXCP_TRAP);
1264 dc->base.is_jmp = DISAS_NORETURN;
1267 case 0x300: /* l.csync */
1268 LOG_DIS("l.csync\n");
1271 case 0x200: /* l.msync */
1272 LOG_DIS("l.msync\n");
1273 tcg_gen_mb(TCG_MO_ALL);
1276 case 0x270: /* l.psync */
1277 LOG_DIS("l.psync\n");
1281 gen_illegal_exception(dc);
1286 static void dec_float(DisasContext *dc, uint32_t insn)
1289 uint32_t ra, rb, rd;
1290 op0 = extract32(insn, 0, 8);
1291 ra = extract32(insn, 16, 5);
1292 rb = extract32(insn, 11, 5);
1293 rd = extract32(insn, 21, 5);
1296 case 0x00: /* lf.add.s */
1297 LOG_DIS("lf.add.s r%d, r%d, r%d\n", rd, ra, rb);
1299 gen_helper_float_add_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1302 case 0x01: /* lf.sub.s */
1303 LOG_DIS("lf.sub.s r%d, r%d, r%d\n", rd, ra, rb);
1305 gen_helper_float_sub_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1308 case 0x02: /* lf.mul.s */
1309 LOG_DIS("lf.mul.s r%d, r%d, r%d\n", rd, ra, rb);
1311 gen_helper_float_mul_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1314 case 0x03: /* lf.div.s */
1315 LOG_DIS("lf.div.s r%d, r%d, r%d\n", rd, ra, rb);
1317 gen_helper_float_div_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1320 case 0x04: /* lf.itof.s */
1321 LOG_DIS("lf.itof r%d, r%d\n", rd, ra);
1323 gen_helper_itofs(cpu_R[rd], cpu_env, cpu_R[ra]);
1326 case 0x05: /* lf.ftoi.s */
1327 LOG_DIS("lf.ftoi r%d, r%d\n", rd, ra);
1329 gen_helper_ftois(cpu_R[rd], cpu_env, cpu_R[ra]);
1332 case 0x06: /* lf.rem.s */
1333 LOG_DIS("lf.rem.s r%d, r%d, r%d\n", rd, ra, rb);
1335 gen_helper_float_rem_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1338 case 0x07: /* lf.madd.s */
1339 LOG_DIS("lf.madd.s r%d, r%d, r%d\n", rd, ra, rb);
1341 gen_helper_float_madd_s(cpu_R[rd], cpu_env, cpu_R[rd],
1342 cpu_R[ra], cpu_R[rb]);
1345 case 0x08: /* lf.sfeq.s */
1346 LOG_DIS("lf.sfeq.s r%d, r%d\n", ra, rb);
1347 gen_helper_float_eq_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1350 case 0x09: /* lf.sfne.s */
1351 LOG_DIS("lf.sfne.s r%d, r%d\n", ra, rb);
1352 gen_helper_float_ne_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1355 case 0x0a: /* lf.sfgt.s */
1356 LOG_DIS("lf.sfgt.s r%d, r%d\n", ra, rb);
1357 gen_helper_float_gt_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1360 case 0x0b: /* lf.sfge.s */
1361 LOG_DIS("lf.sfge.s r%d, r%d\n", ra, rb);
1362 gen_helper_float_ge_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1365 case 0x0c: /* lf.sflt.s */
1366 LOG_DIS("lf.sflt.s r%d, r%d\n", ra, rb);
1367 gen_helper_float_lt_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1370 case 0x0d: /* lf.sfle.s */
1371 LOG_DIS("lf.sfle.s r%d, r%d\n", ra, rb);
1372 gen_helper_float_le_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1375 /* not used yet, open it when we need or64. */
1376 /*#ifdef TARGET_OPENRISC64
1378 LOG_DIS("lf.add.d r%d, r%d, r%d\n", rd, ra, rb);
1381 gen_helper_float_add_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1385 LOG_DIS("lf.sub.d r%d, r%d, r%d\n", rd, ra, rb);
1388 gen_helper_float_sub_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1392 LOG_DIS("lf.mul.d r%d, r%d, r%d\n", rd, ra, rb);
1395 gen_helper_float_mul_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1399 LOG_DIS("lf.div.d r%d, r%d, r%d\n", rd, ra, rb);
1402 gen_helper_float_div_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1405 case 0x14: lf.itof.d
1406 LOG_DIS("lf.itof r%d, r%d\n", rd, ra);
1409 gen_helper_itofd(cpu_R[rd], cpu_env, cpu_R[ra]);
1412 case 0x15: lf.ftoi.d
1413 LOG_DIS("lf.ftoi r%d, r%d\n", rd, ra);
1416 gen_helper_ftoid(cpu_R[rd], cpu_env, cpu_R[ra]);
1420 LOG_DIS("lf.rem.d r%d, r%d, r%d\n", rd, ra, rb);
1423 gen_helper_float_rem_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1426 case 0x17: lf.madd.d
1427 LOG_DIS("lf.madd.d r%d, r%d, r%d\n", rd, ra, rb);
1430 gen_helper_float_madd_d(cpu_R[rd], cpu_env, cpu_R[rd],
1431 cpu_R[ra], cpu_R[rb]);
1434 case 0x18: lf.sfeq.d
1435 LOG_DIS("lf.sfeq.d r%d, r%d\n", ra, rb);
1437 gen_helper_float_eq_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1440 case 0x1a: lf.sfgt.d
1441 LOG_DIS("lf.sfgt.d r%d, r%d\n", ra, rb);
1443 gen_helper_float_gt_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1446 case 0x1b: lf.sfge.d
1447 LOG_DIS("lf.sfge.d r%d, r%d\n", ra, rb);
1449 gen_helper_float_ge_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1452 case 0x19: lf.sfne.d
1453 LOG_DIS("lf.sfne.d r%d, r%d\n", ra, rb);
1455 gen_helper_float_ne_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1458 case 0x1c: lf.sflt.d
1459 LOG_DIS("lf.sflt.d r%d, r%d\n", ra, rb);
1461 gen_helper_float_lt_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1464 case 0x1d: lf.sfle.d
1465 LOG_DIS("lf.sfle.d r%d, r%d\n", ra, rb);
1467 gen_helper_float_le_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1472 gen_illegal_exception(dc);
1477 static void disas_openrisc_insn(DisasContext *dc, OpenRISCCPU *cpu)
1481 insn = cpu_ldl_code(&cpu->env, dc->base.pc_next);
1482 op0 = extract32(insn, 26, 6);
1494 dec_logic(dc, insn);
1498 dec_compi(dc, insn);
1506 dec_float(dc, insn);
1523 static void openrisc_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1525 DisasContext *dc = container_of(dcb, DisasContext, base);
1526 CPUOpenRISCState *env = cs->env_ptr;
1529 dc->mem_idx = cpu_mmu_index(env, false);
1530 dc->tb_flags = dc->base.tb->flags;
1531 dc->delayed_branch = (dc->tb_flags & TB_FLAGS_DFLAG) != 0;
1532 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1533 dc->base.max_insns = MIN(dc->base.max_insns, bound);
1536 static void openrisc_tr_tb_start(DisasContextBase *db, CPUState *cs)
1538 DisasContext *dc = container_of(db, DisasContext, base);
1540 /* Allow the TCG optimizer to see that R0 == 0,
1541 when it's true, which is the common case. */
1542 if (dc->tb_flags & TB_FLAGS_R0_0) {
1543 cpu_R[0] = tcg_const_tl(0);
1549 static void openrisc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
1551 DisasContext *dc = container_of(dcbase, DisasContext, base);
1553 tcg_gen_insn_start(dc->base.pc_next, (dc->delayed_branch ? 1 : 0)
1554 | (dc->base.num_insns > 1 ? 2 : 0));
1557 static bool openrisc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
1558 const CPUBreakpoint *bp)
1560 DisasContext *dc = container_of(dcbase, DisasContext, base);
1562 tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
1563 gen_exception(dc, EXCP_DEBUG);
1564 dc->base.is_jmp = DISAS_NORETURN;
1565 /* The address covered by the breakpoint must be included in
1566 [tb->pc, tb->pc + tb->size) in order to for it to be
1567 properly cleared -- thus we increment the PC here so that
1568 the logic setting tb->size below does the right thing. */
1569 dc->base.pc_next += 4;
1573 static void openrisc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
1575 DisasContext *dc = container_of(dcbase, DisasContext, base);
1576 OpenRISCCPU *cpu = OPENRISC_CPU(cs);
1578 disas_openrisc_insn(dc, cpu);
1579 dc->base.pc_next += 4;
1582 if (dc->delayed_branch) {
1583 dc->delayed_branch--;
1584 if (!dc->delayed_branch) {
1585 tcg_gen_mov_tl(cpu_pc, jmp_pc);
1586 tcg_gen_discard_tl(jmp_pc);
1587 dc->base.is_jmp = DISAS_UPDATE;
1593 static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
1595 DisasContext *dc = container_of(dcbase, DisasContext, base);
1597 if ((dc->tb_flags & TB_FLAGS_DFLAG ? 1 : 0) != (dc->delayed_branch != 0)) {
1598 tcg_gen_movi_i32(cpu_dflag, dc->delayed_branch != 0);
1601 tcg_gen_movi_tl(cpu_ppc, dc->base.pc_next - 4);
1602 if (dc->base.is_jmp == DISAS_NEXT) {
1603 dc->base.is_jmp = DISAS_UPDATE;
1604 tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
1606 if (unlikely(dc->base.singlestep_enabled)) {
1607 gen_exception(dc, EXCP_DEBUG);
1609 switch (dc->base.is_jmp) {
1610 case DISAS_TOO_MANY:
1611 gen_goto_tb(dc, 0, dc->base.pc_next);
1613 case DISAS_NORETURN:
1618 /* indicate that the hash table must be used
1619 to find the next TB */
1623 g_assert_not_reached();
1628 static void openrisc_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
1630 DisasContext *s = container_of(dcbase, DisasContext, base);
1632 qemu_log("IN: %s\n", lookup_symbol(s->base.pc_first));
1633 log_target_disas(cs, s->base.pc_first, s->base.tb->size);
1636 static const TranslatorOps openrisc_tr_ops = {
1637 .init_disas_context = openrisc_tr_init_disas_context,
1638 .tb_start = openrisc_tr_tb_start,
1639 .insn_start = openrisc_tr_insn_start,
1640 .breakpoint_check = openrisc_tr_breakpoint_check,
1641 .translate_insn = openrisc_tr_translate_insn,
1642 .tb_stop = openrisc_tr_tb_stop,
1643 .disas_log = openrisc_tr_disas_log,
1646 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
1650 translator_loop(&openrisc_tr_ops, &ctx.base, cs, tb);
1653 void openrisc_cpu_dump_state(CPUState *cs, FILE *f,
1654 fprintf_function cpu_fprintf,
1657 OpenRISCCPU *cpu = OPENRISC_CPU(cs);
1658 CPUOpenRISCState *env = &cpu->env;
1661 cpu_fprintf(f, "PC=%08x\n", env->pc);
1662 for (i = 0; i < 32; ++i) {
1663 cpu_fprintf(f, "R%02d=%08x%c", i, cpu_get_gpr(env, i),
1664 (i % 4) == 3 ? '\n' : ' ');
1668 void restore_state_to_opc(CPUOpenRISCState *env, TranslationBlock *tb,
1672 env->dflag = data[1] & 1;
1674 env->ppc = env->pc - 4;