7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "disas/disas.h"
26 #include "qemu-common.h"
28 #include "qemu/bitops.h"
29 #include "exec/cpu_ldst.h"
30 #include "exec/translator.h"
32 #include "exec/helper-proto.h"
33 #include "exec/helper-gen.h"
35 #include "trace-tcg.h"
38 #define LOG_DIS(str, ...) \
39 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%08x: " str, dc->pc, ## __VA_ARGS__)
41 /* is_jmp field values */
42 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
43 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
44 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
46 typedef struct DisasContext {
52 uint32_t delayed_branch;
53 bool singlestep_enabled;
56 static TCGv_env cpu_env;
58 static TCGv cpu_R[32];
61 static TCGv jmp_pc; /* l.jr/l.jalr temp pc */
63 static TCGv cpu_sr_f; /* bf/bnf, F flag taken */
64 static TCGv cpu_sr_cy; /* carry (unsigned overflow) */
65 static TCGv cpu_sr_ov; /* signed overflow */
66 static TCGv cpu_lock_addr;
67 static TCGv cpu_lock_value;
68 static TCGv_i32 fpcsr;
69 static TCGv_i64 cpu_mac; /* MACHI:MACLO */
70 static TCGv_i32 cpu_dflag;
71 #include "exec/gen-icount.h"
73 void openrisc_translate_init(void)
75 static const char * const regnames[] = {
76 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
78 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
79 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
83 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
84 tcg_ctx->tcg_env = cpu_env;
85 cpu_sr = tcg_global_mem_new(cpu_env,
86 offsetof(CPUOpenRISCState, sr), "sr");
87 cpu_dflag = tcg_global_mem_new_i32(cpu_env,
88 offsetof(CPUOpenRISCState, dflag),
90 cpu_pc = tcg_global_mem_new(cpu_env,
91 offsetof(CPUOpenRISCState, pc), "pc");
92 cpu_ppc = tcg_global_mem_new(cpu_env,
93 offsetof(CPUOpenRISCState, ppc), "ppc");
94 jmp_pc = tcg_global_mem_new(cpu_env,
95 offsetof(CPUOpenRISCState, jmp_pc), "jmp_pc");
96 cpu_sr_f = tcg_global_mem_new(cpu_env,
97 offsetof(CPUOpenRISCState, sr_f), "sr_f");
98 cpu_sr_cy = tcg_global_mem_new(cpu_env,
99 offsetof(CPUOpenRISCState, sr_cy), "sr_cy");
100 cpu_sr_ov = tcg_global_mem_new(cpu_env,
101 offsetof(CPUOpenRISCState, sr_ov), "sr_ov");
102 cpu_lock_addr = tcg_global_mem_new(cpu_env,
103 offsetof(CPUOpenRISCState, lock_addr),
105 cpu_lock_value = tcg_global_mem_new(cpu_env,
106 offsetof(CPUOpenRISCState, lock_value),
108 fpcsr = tcg_global_mem_new_i32(cpu_env,
109 offsetof(CPUOpenRISCState, fpcsr),
111 cpu_mac = tcg_global_mem_new_i64(cpu_env,
112 offsetof(CPUOpenRISCState, mac),
114 for (i = 0; i < 32; i++) {
115 cpu_R[i] = tcg_global_mem_new(cpu_env,
116 offsetof(CPUOpenRISCState,
123 static void gen_exception(DisasContext *dc, unsigned int excp)
125 TCGv_i32 tmp = tcg_const_i32(excp);
126 gen_helper_exception(cpu_env, tmp);
127 tcg_temp_free_i32(tmp);
130 static void gen_illegal_exception(DisasContext *dc)
132 tcg_gen_movi_tl(cpu_pc, dc->pc);
133 gen_exception(dc, EXCP_ILLEGAL);
134 dc->is_jmp = DISAS_UPDATE;
137 /* not used yet, open it when we need or64. */
138 /*#ifdef TARGET_OPENRISC64
139 static void check_ob64s(DisasContext *dc)
141 if (!(dc->flags & CPUCFGR_OB64S)) {
142 gen_illegal_exception(dc);
146 static void check_of64s(DisasContext *dc)
148 if (!(dc->flags & CPUCFGR_OF64S)) {
149 gen_illegal_exception(dc);
153 static void check_ov64s(DisasContext *dc)
155 if (!(dc->flags & CPUCFGR_OV64S)) {
156 gen_illegal_exception(dc);
161 /* We're about to write to REG. On the off-chance that the user is
162 writing to R0, re-instate the architectural register. */
163 #define check_r0_write(reg) \
165 if (unlikely(reg == 0)) { \
170 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
172 if (unlikely(dc->singlestep_enabled)) {
176 #ifndef CONFIG_USER_ONLY
177 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
183 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
185 if (use_goto_tb(dc, dest)) {
186 tcg_gen_movi_tl(cpu_pc, dest);
188 tcg_gen_exit_tb((uintptr_t)dc->tb + n);
190 tcg_gen_movi_tl(cpu_pc, dest);
191 if (dc->singlestep_enabled) {
192 gen_exception(dc, EXCP_DEBUG);
198 static void gen_jump(DisasContext *dc, int32_t n26, uint32_t reg, uint32_t op0)
200 target_ulong tmp_pc = dc->pc + n26 * 4;
204 tcg_gen_movi_tl(jmp_pc, tmp_pc);
206 case 0x01: /* l.jal */
207 tcg_gen_movi_tl(cpu_R[9], dc->pc + 8);
208 /* Optimize jal being used to load the PC for PIC. */
209 if (tmp_pc == dc->pc + 8) {
212 tcg_gen_movi_tl(jmp_pc, tmp_pc);
214 case 0x03: /* l.bnf */
215 case 0x04: /* l.bf */
217 TCGv t_next = tcg_const_tl(dc->pc + 8);
218 TCGv t_true = tcg_const_tl(tmp_pc);
219 TCGv t_zero = tcg_const_tl(0);
221 tcg_gen_movcond_tl(op0 == 0x03 ? TCG_COND_EQ : TCG_COND_NE,
222 jmp_pc, cpu_sr_f, t_zero, t_true, t_next);
224 tcg_temp_free(t_next);
225 tcg_temp_free(t_true);
226 tcg_temp_free(t_zero);
229 case 0x11: /* l.jr */
230 tcg_gen_mov_tl(jmp_pc, cpu_R[reg]);
232 case 0x12: /* l.jalr */
233 tcg_gen_movi_tl(cpu_R[9], (dc->pc + 8));
234 tcg_gen_mov_tl(jmp_pc, cpu_R[reg]);
237 gen_illegal_exception(dc);
241 dc->delayed_branch = 2;
244 static void gen_ove_cy(DisasContext *dc)
246 if (dc->tb_flags & SR_OVE) {
247 gen_helper_ove_cy(cpu_env);
251 static void gen_ove_ov(DisasContext *dc)
253 if (dc->tb_flags & SR_OVE) {
254 gen_helper_ove_ov(cpu_env);
258 static void gen_ove_cyov(DisasContext *dc)
260 if (dc->tb_flags & SR_OVE) {
261 gen_helper_ove_cyov(cpu_env);
265 static void gen_add(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
267 TCGv t0 = tcg_const_tl(0);
268 TCGv res = tcg_temp_new();
270 tcg_gen_add2_tl(res, cpu_sr_cy, srca, t0, srcb, t0);
271 tcg_gen_xor_tl(cpu_sr_ov, srca, srcb);
272 tcg_gen_xor_tl(t0, res, srcb);
273 tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov);
276 tcg_gen_mov_tl(dest, res);
282 static void gen_addc(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
284 TCGv t0 = tcg_const_tl(0);
285 TCGv res = tcg_temp_new();
287 tcg_gen_add2_tl(res, cpu_sr_cy, srca, t0, cpu_sr_cy, t0);
288 tcg_gen_add2_tl(res, cpu_sr_cy, res, cpu_sr_cy, srcb, t0);
289 tcg_gen_xor_tl(cpu_sr_ov, srca, srcb);
290 tcg_gen_xor_tl(t0, res, srcb);
291 tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov);
294 tcg_gen_mov_tl(dest, res);
300 static void gen_sub(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
302 TCGv res = tcg_temp_new();
304 tcg_gen_sub_tl(res, srca, srcb);
305 tcg_gen_xor_tl(cpu_sr_cy, srca, srcb);
306 tcg_gen_xor_tl(cpu_sr_ov, res, srcb);
307 tcg_gen_and_tl(cpu_sr_ov, cpu_sr_ov, cpu_sr_cy);
308 tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_cy, srca, srcb);
310 tcg_gen_mov_tl(dest, res);
316 static void gen_mul(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
318 TCGv t0 = tcg_temp_new();
320 tcg_gen_muls2_tl(dest, cpu_sr_ov, srca, srcb);
321 tcg_gen_sari_tl(t0, dest, TARGET_LONG_BITS - 1);
322 tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_ov, cpu_sr_ov, t0);
325 tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
329 static void gen_mulu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
331 tcg_gen_muls2_tl(dest, cpu_sr_cy, srca, srcb);
332 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_cy, cpu_sr_cy, 0);
337 static void gen_div(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
339 TCGv t0 = tcg_temp_new();
341 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_ov, srcb, 0);
342 /* The result of divide-by-zero is undefined.
343 Supress the host-side exception by dividing by 1. */
344 tcg_gen_or_tl(t0, srcb, cpu_sr_ov);
345 tcg_gen_div_tl(dest, srca, t0);
348 tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
352 static void gen_divu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
354 TCGv t0 = tcg_temp_new();
356 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_cy, srcb, 0);
357 /* The result of divide-by-zero is undefined.
358 Supress the host-side exception by dividing by 1. */
359 tcg_gen_or_tl(t0, srcb, cpu_sr_cy);
360 tcg_gen_divu_tl(dest, srca, t0);
366 static void gen_muld(DisasContext *dc, TCGv srca, TCGv srcb)
368 TCGv_i64 t1 = tcg_temp_new_i64();
369 TCGv_i64 t2 = tcg_temp_new_i64();
371 tcg_gen_ext_tl_i64(t1, srca);
372 tcg_gen_ext_tl_i64(t2, srcb);
373 if (TARGET_LONG_BITS == 32) {
374 tcg_gen_mul_i64(cpu_mac, t1, t2);
375 tcg_gen_movi_tl(cpu_sr_ov, 0);
377 TCGv_i64 high = tcg_temp_new_i64();
379 tcg_gen_muls2_i64(cpu_mac, high, t1, t2);
380 tcg_gen_sari_i64(t1, cpu_mac, 63);
381 tcg_gen_setcond_i64(TCG_COND_NE, t1, t1, high);
382 tcg_temp_free_i64(high);
383 tcg_gen_trunc_i64_tl(cpu_sr_ov, t1);
384 tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
388 tcg_temp_free_i64(t1);
389 tcg_temp_free_i64(t2);
392 static void gen_muldu(DisasContext *dc, TCGv srca, TCGv srcb)
394 TCGv_i64 t1 = tcg_temp_new_i64();
395 TCGv_i64 t2 = tcg_temp_new_i64();
397 tcg_gen_extu_tl_i64(t1, srca);
398 tcg_gen_extu_tl_i64(t2, srcb);
399 if (TARGET_LONG_BITS == 32) {
400 tcg_gen_mul_i64(cpu_mac, t1, t2);
401 tcg_gen_movi_tl(cpu_sr_cy, 0);
403 TCGv_i64 high = tcg_temp_new_i64();
405 tcg_gen_mulu2_i64(cpu_mac, high, t1, t2);
406 tcg_gen_setcondi_i64(TCG_COND_NE, high, high, 0);
407 tcg_gen_trunc_i64_tl(cpu_sr_cy, high);
408 tcg_temp_free_i64(high);
412 tcg_temp_free_i64(t1);
413 tcg_temp_free_i64(t2);
416 static void gen_mac(DisasContext *dc, TCGv srca, TCGv srcb)
418 TCGv_i64 t1 = tcg_temp_new_i64();
419 TCGv_i64 t2 = tcg_temp_new_i64();
421 tcg_gen_ext_tl_i64(t1, srca);
422 tcg_gen_ext_tl_i64(t2, srcb);
423 tcg_gen_mul_i64(t1, t1, t2);
425 /* Note that overflow is only computed during addition stage. */
426 tcg_gen_xor_i64(t2, cpu_mac, t1);
427 tcg_gen_add_i64(cpu_mac, cpu_mac, t1);
428 tcg_gen_xor_i64(t1, t1, cpu_mac);
429 tcg_gen_andc_i64(t1, t1, t2);
430 tcg_temp_free_i64(t2);
432 #if TARGET_LONG_BITS == 32
433 tcg_gen_extrh_i64_i32(cpu_sr_ov, t1);
435 tcg_gen_mov_i64(cpu_sr_ov, t1);
437 tcg_temp_free_i64(t1);
442 static void gen_macu(DisasContext *dc, TCGv srca, TCGv srcb)
444 TCGv_i64 t1 = tcg_temp_new_i64();
445 TCGv_i64 t2 = tcg_temp_new_i64();
447 tcg_gen_extu_tl_i64(t1, srca);
448 tcg_gen_extu_tl_i64(t2, srcb);
449 tcg_gen_mul_i64(t1, t1, t2);
450 tcg_temp_free_i64(t2);
452 /* Note that overflow is only computed during addition stage. */
453 tcg_gen_add_i64(cpu_mac, cpu_mac, t1);
454 tcg_gen_setcond_i64(TCG_COND_LTU, t1, cpu_mac, t1);
455 tcg_gen_trunc_i64_tl(cpu_sr_cy, t1);
456 tcg_temp_free_i64(t1);
461 static void gen_msb(DisasContext *dc, TCGv srca, TCGv srcb)
463 TCGv_i64 t1 = tcg_temp_new_i64();
464 TCGv_i64 t2 = tcg_temp_new_i64();
466 tcg_gen_ext_tl_i64(t1, srca);
467 tcg_gen_ext_tl_i64(t2, srcb);
468 tcg_gen_mul_i64(t1, t1, t2);
470 /* Note that overflow is only computed during subtraction stage. */
471 tcg_gen_xor_i64(t2, cpu_mac, t1);
472 tcg_gen_sub_i64(cpu_mac, cpu_mac, t1);
473 tcg_gen_xor_i64(t1, t1, cpu_mac);
474 tcg_gen_and_i64(t1, t1, t2);
475 tcg_temp_free_i64(t2);
477 #if TARGET_LONG_BITS == 32
478 tcg_gen_extrh_i64_i32(cpu_sr_ov, t1);
480 tcg_gen_mov_i64(cpu_sr_ov, t1);
482 tcg_temp_free_i64(t1);
487 static void gen_msbu(DisasContext *dc, TCGv srca, TCGv srcb)
489 TCGv_i64 t1 = tcg_temp_new_i64();
490 TCGv_i64 t2 = tcg_temp_new_i64();
492 tcg_gen_extu_tl_i64(t1, srca);
493 tcg_gen_extu_tl_i64(t2, srcb);
494 tcg_gen_mul_i64(t1, t1, t2);
496 /* Note that overflow is only computed during subtraction stage. */
497 tcg_gen_setcond_i64(TCG_COND_LTU, t2, cpu_mac, t1);
498 tcg_gen_sub_i64(cpu_mac, cpu_mac, t1);
499 tcg_gen_trunc_i64_tl(cpu_sr_cy, t2);
500 tcg_temp_free_i64(t2);
501 tcg_temp_free_i64(t1);
506 static void gen_lwa(DisasContext *dc, TCGv rd, TCGv ra, int32_t ofs)
508 TCGv ea = tcg_temp_new();
510 tcg_gen_addi_tl(ea, ra, ofs);
511 tcg_gen_qemu_ld_tl(rd, ea, dc->mem_idx, MO_TEUL);
512 tcg_gen_mov_tl(cpu_lock_addr, ea);
513 tcg_gen_mov_tl(cpu_lock_value, rd);
517 static void gen_swa(DisasContext *dc, int b, TCGv ra, int32_t ofs)
520 TCGLabel *lab_fail, *lab_done;
523 tcg_gen_addi_tl(ea, ra, ofs);
525 /* For TB_FLAGS_R0_0, the branch below invalidates the temporary assigned
526 to cpu_R[0]. Since l.swa is quite often immediately followed by a
527 branch, don't bother reallocating; finish the TB using the "real" R0.
528 This also takes care of RB input across the branch. */
531 lab_fail = gen_new_label();
532 lab_done = gen_new_label();
533 tcg_gen_brcond_tl(TCG_COND_NE, ea, cpu_lock_addr, lab_fail);
536 val = tcg_temp_new();
537 tcg_gen_atomic_cmpxchg_tl(val, cpu_lock_addr, cpu_lock_value,
538 cpu_R[b], dc->mem_idx, MO_TEUL);
539 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, val, cpu_lock_value);
542 tcg_gen_br(lab_done);
544 gen_set_label(lab_fail);
545 tcg_gen_movi_tl(cpu_sr_f, 0);
547 gen_set_label(lab_done);
548 tcg_gen_movi_tl(cpu_lock_addr, -1);
551 static void dec_calc(DisasContext *dc, uint32_t insn)
553 uint32_t op0, op1, op2;
555 op0 = extract32(insn, 0, 4);
556 op1 = extract32(insn, 8, 2);
557 op2 = extract32(insn, 6, 2);
558 ra = extract32(insn, 16, 5);
559 rb = extract32(insn, 11, 5);
560 rd = extract32(insn, 21, 5);
565 case 0x0: /* l.add */
566 LOG_DIS("l.add r%d, r%d, r%d\n", rd, ra, rb);
567 gen_add(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]);
570 case 0x1: /* l.addc */
571 LOG_DIS("l.addc r%d, r%d, r%d\n", rd, ra, rb);
572 gen_addc(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]);
575 case 0x2: /* l.sub */
576 LOG_DIS("l.sub r%d, r%d, r%d\n", rd, ra, rb);
577 gen_sub(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]);
580 case 0x3: /* l.and */
581 LOG_DIS("l.and r%d, r%d, r%d\n", rd, ra, rb);
582 tcg_gen_and_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
586 LOG_DIS("l.or r%d, r%d, r%d\n", rd, ra, rb);
587 tcg_gen_or_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
590 case 0x5: /* l.xor */
591 LOG_DIS("l.xor r%d, r%d, r%d\n", rd, ra, rb);
592 tcg_gen_xor_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
598 LOG_DIS("l.sll r%d, r%d, r%d\n", rd, ra, rb);
599 tcg_gen_shl_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
602 LOG_DIS("l.srl r%d, r%d, r%d\n", rd, ra, rb);
603 tcg_gen_shr_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
606 LOG_DIS("l.sra r%d, r%d, r%d\n", rd, ra, rb);
607 tcg_gen_sar_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
610 LOG_DIS("l.ror r%d, r%d, r%d\n", rd, ra, rb);
611 tcg_gen_rotr_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
618 case 0: /* l.exths */
619 LOG_DIS("l.exths r%d, r%d\n", rd, ra);
620 tcg_gen_ext16s_tl(cpu_R[rd], cpu_R[ra]);
622 case 1: /* l.extbs */
623 LOG_DIS("l.extbs r%d, r%d\n", rd, ra);
624 tcg_gen_ext8s_tl(cpu_R[rd], cpu_R[ra]);
626 case 2: /* l.exthz */
627 LOG_DIS("l.exthz r%d, r%d\n", rd, ra);
628 tcg_gen_ext16u_tl(cpu_R[rd], cpu_R[ra]);
630 case 3: /* l.extbz */
631 LOG_DIS("l.extbz r%d, r%d\n", rd, ra);
632 tcg_gen_ext8u_tl(cpu_R[rd], cpu_R[ra]);
639 case 0: /* l.extws */
640 LOG_DIS("l.extws r%d, r%d\n", rd, ra);
641 tcg_gen_ext32s_tl(cpu_R[rd], cpu_R[ra]);
643 case 1: /* l.extwz */
644 LOG_DIS("l.extwz r%d, r%d\n", rd, ra);
645 tcg_gen_ext32u_tl(cpu_R[rd], cpu_R[ra]);
650 case 0xe: /* l.cmov */
651 LOG_DIS("l.cmov r%d, r%d, r%d\n", rd, ra, rb);
653 TCGv zero = tcg_const_tl(0);
654 tcg_gen_movcond_tl(TCG_COND_NE, cpu_R[rd], cpu_sr_f, zero,
655 cpu_R[ra], cpu_R[rb]);
660 case 0xf: /* l.ff1 */
661 LOG_DIS("l.ff1 r%d, r%d, r%d\n", rd, ra, rb);
662 tcg_gen_ctzi_tl(cpu_R[rd], cpu_R[ra], -1);
663 tcg_gen_addi_tl(cpu_R[rd], cpu_R[rd], 1);
670 case 0xf: /* l.fl1 */
671 LOG_DIS("l.fl1 r%d, r%d, r%d\n", rd, ra, rb);
672 tcg_gen_clzi_tl(cpu_R[rd], cpu_R[ra], TARGET_LONG_BITS);
673 tcg_gen_subfi_tl(cpu_R[rd], TARGET_LONG_BITS, cpu_R[rd]);
683 case 0x6: /* l.mul */
684 LOG_DIS("l.mul r%d, r%d, r%d\n", rd, ra, rb);
685 gen_mul(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]);
688 case 0x7: /* l.muld */
689 LOG_DIS("l.muld r%d, r%d\n", ra, rb);
690 gen_muld(dc, cpu_R[ra], cpu_R[rb]);
693 case 0x9: /* l.div */
694 LOG_DIS("l.div r%d, r%d, r%d\n", rd, ra, rb);
695 gen_div(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]);
698 case 0xa: /* l.divu */
699 LOG_DIS("l.divu r%d, r%d, r%d\n", rd, ra, rb);
700 gen_divu(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]);
703 case 0xb: /* l.mulu */
704 LOG_DIS("l.mulu r%d, r%d, r%d\n", rd, ra, rb);
705 gen_mulu(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]);
708 case 0xc: /* l.muldu */
709 LOG_DIS("l.muldu r%d, r%d\n", ra, rb);
710 gen_muldu(dc, cpu_R[ra], cpu_R[rb]);
715 gen_illegal_exception(dc);
718 static void dec_misc(DisasContext *dc, uint32_t insn)
722 uint32_t L6, K5, K16, K5_11;
723 int32_t I16, I5_11, N26;
727 op0 = extract32(insn, 26, 6);
728 op1 = extract32(insn, 24, 2);
729 ra = extract32(insn, 16, 5);
730 rb = extract32(insn, 11, 5);
731 rd = extract32(insn, 21, 5);
732 L6 = extract32(insn, 5, 6);
733 K5 = extract32(insn, 0, 5);
734 K16 = extract32(insn, 0, 16);
736 N26 = sextract32(insn, 0, 26);
737 K5_11 = (extract32(insn, 21, 5) << 11) | extract32(insn, 0, 11);
738 I5_11 = (int16_t)K5_11;
742 LOG_DIS("l.j %d\n", N26);
743 gen_jump(dc, N26, 0, op0);
746 case 0x01: /* l.jal */
747 LOG_DIS("l.jal %d\n", N26);
748 gen_jump(dc, N26, 0, op0);
751 case 0x03: /* l.bnf */
752 LOG_DIS("l.bnf %d\n", N26);
753 gen_jump(dc, N26, 0, op0);
756 case 0x04: /* l.bf */
757 LOG_DIS("l.bf %d\n", N26);
758 gen_jump(dc, N26, 0, op0);
763 case 0x01: /* l.nop */
764 LOG_DIS("l.nop %d\n", I16);
768 gen_illegal_exception(dc);
773 case 0x11: /* l.jr */
774 LOG_DIS("l.jr r%d\n", rb);
775 gen_jump(dc, 0, rb, op0);
778 case 0x12: /* l.jalr */
779 LOG_DIS("l.jalr r%d\n", rb);
780 gen_jump(dc, 0, rb, op0);
783 case 0x13: /* l.maci */
784 LOG_DIS("l.maci r%d, %d\n", ra, I16);
785 t0 = tcg_const_tl(I16);
786 gen_mac(dc, cpu_R[ra], t0);
790 case 0x09: /* l.rfe */
793 #if defined(CONFIG_USER_ONLY)
796 if (dc->mem_idx == MMU_USER_IDX) {
797 gen_illegal_exception(dc);
800 gen_helper_rfe(cpu_env);
801 dc->is_jmp = DISAS_UPDATE;
806 case 0x1b: /* l.lwa */
807 LOG_DIS("l.lwa r%d, r%d, %d\n", rd, ra, I16);
809 gen_lwa(dc, cpu_R[rd], cpu_R[ra], I16);
812 case 0x1c: /* l.cust1 */
813 LOG_DIS("l.cust1\n");
816 case 0x1d: /* l.cust2 */
817 LOG_DIS("l.cust2\n");
820 case 0x1e: /* l.cust3 */
821 LOG_DIS("l.cust3\n");
824 case 0x1f: /* l.cust4 */
825 LOG_DIS("l.cust4\n");
828 case 0x3c: /* l.cust5 */
829 LOG_DIS("l.cust5 r%d, r%d, r%d, %d, %d\n", rd, ra, rb, L6, K5);
832 case 0x3d: /* l.cust6 */
833 LOG_DIS("l.cust6\n");
836 case 0x3e: /* l.cust7 */
837 LOG_DIS("l.cust7\n");
840 case 0x3f: /* l.cust8 */
841 LOG_DIS("l.cust8\n");
844 /* not used yet, open it when we need or64. */
845 /*#ifdef TARGET_OPENRISC64
847 LOG_DIS("l.ld r%d, r%d, %d\n", rd, ra, I16);
853 case 0x21: /* l.lwz */
854 LOG_DIS("l.lwz r%d, r%d, %d\n", rd, ra, I16);
858 case 0x22: /* l.lws */
859 LOG_DIS("l.lws r%d, r%d, %d\n", rd, ra, I16);
863 case 0x23: /* l.lbz */
864 LOG_DIS("l.lbz r%d, r%d, %d\n", rd, ra, I16);
868 case 0x24: /* l.lbs */
869 LOG_DIS("l.lbs r%d, r%d, %d\n", rd, ra, I16);
873 case 0x25: /* l.lhz */
874 LOG_DIS("l.lhz r%d, r%d, %d\n", rd, ra, I16);
878 case 0x26: /* l.lhs */
879 LOG_DIS("l.lhs r%d, r%d, %d\n", rd, ra, I16);
886 tcg_gen_addi_tl(t0, cpu_R[ra], I16);
887 tcg_gen_qemu_ld_tl(cpu_R[rd], t0, dc->mem_idx, mop);
891 case 0x27: /* l.addi */
892 LOG_DIS("l.addi r%d, r%d, %d\n", rd, ra, I16);
894 t0 = tcg_const_tl(I16);
895 gen_add(dc, cpu_R[rd], cpu_R[ra], t0);
899 case 0x28: /* l.addic */
900 LOG_DIS("l.addic r%d, r%d, %d\n", rd, ra, I16);
902 t0 = tcg_const_tl(I16);
903 gen_addc(dc, cpu_R[rd], cpu_R[ra], t0);
907 case 0x29: /* l.andi */
908 LOG_DIS("l.andi r%d, r%d, %d\n", rd, ra, K16);
910 tcg_gen_andi_tl(cpu_R[rd], cpu_R[ra], K16);
913 case 0x2a: /* l.ori */
914 LOG_DIS("l.ori r%d, r%d, %d\n", rd, ra, K16);
916 tcg_gen_ori_tl(cpu_R[rd], cpu_R[ra], K16);
919 case 0x2b: /* l.xori */
920 LOG_DIS("l.xori r%d, r%d, %d\n", rd, ra, I16);
922 tcg_gen_xori_tl(cpu_R[rd], cpu_R[ra], I16);
925 case 0x2c: /* l.muli */
926 LOG_DIS("l.muli r%d, r%d, %d\n", rd, ra, I16);
928 t0 = tcg_const_tl(I16);
929 gen_mul(dc, cpu_R[rd], cpu_R[ra], t0);
933 case 0x2d: /* l.mfspr */
934 LOG_DIS("l.mfspr r%d, r%d, %d\n", rd, ra, K16);
937 #if defined(CONFIG_USER_ONLY)
940 TCGv_i32 ti = tcg_const_i32(K16);
941 if (dc->mem_idx == MMU_USER_IDX) {
942 gen_illegal_exception(dc);
945 gen_helper_mfspr(cpu_R[rd], cpu_env, cpu_R[rd], cpu_R[ra], ti);
946 tcg_temp_free_i32(ti);
951 case 0x30: /* l.mtspr */
952 LOG_DIS("l.mtspr r%d, r%d, %d\n", ra, rb, K5_11);
954 #if defined(CONFIG_USER_ONLY)
957 TCGv_i32 im = tcg_const_i32(K5_11);
958 if (dc->mem_idx == MMU_USER_IDX) {
959 gen_illegal_exception(dc);
962 gen_helper_mtspr(cpu_env, cpu_R[ra], cpu_R[rb], im);
963 tcg_temp_free_i32(im);
968 case 0x33: /* l.swa */
969 LOG_DIS("l.swa r%d, r%d, %d\n", ra, rb, I5_11);
970 gen_swa(dc, rb, cpu_R[ra], I5_11);
973 /* not used yet, open it when we need or64. */
974 /*#ifdef TARGET_OPENRISC64
976 LOG_DIS("l.sd r%d, r%d, %d\n", ra, rb, I5_11);
982 case 0x35: /* l.sw */
983 LOG_DIS("l.sw r%d, r%d, %d\n", ra, rb, I5_11);
987 case 0x36: /* l.sb */
988 LOG_DIS("l.sb r%d, r%d, %d\n", ra, rb, I5_11);
992 case 0x37: /* l.sh */
993 LOG_DIS("l.sh r%d, r%d, %d\n", ra, rb, I5_11);
999 TCGv t0 = tcg_temp_new();
1000 tcg_gen_addi_tl(t0, cpu_R[ra], I5_11);
1001 tcg_gen_qemu_st_tl(cpu_R[rb], t0, dc->mem_idx, mop);
1007 gen_illegal_exception(dc);
1012 static void dec_mac(DisasContext *dc, uint32_t insn)
1016 op0 = extract32(insn, 0, 4);
1017 ra = extract32(insn, 16, 5);
1018 rb = extract32(insn, 11, 5);
1021 case 0x0001: /* l.mac */
1022 LOG_DIS("l.mac r%d, r%d\n", ra, rb);
1023 gen_mac(dc, cpu_R[ra], cpu_R[rb]);
1026 case 0x0002: /* l.msb */
1027 LOG_DIS("l.msb r%d, r%d\n", ra, rb);
1028 gen_msb(dc, cpu_R[ra], cpu_R[rb]);
1031 case 0x0003: /* l.macu */
1032 LOG_DIS("l.macu r%d, r%d\n", ra, rb);
1033 gen_macu(dc, cpu_R[ra], cpu_R[rb]);
1036 case 0x0004: /* l.msbu */
1037 LOG_DIS("l.msbu r%d, r%d\n", ra, rb);
1038 gen_msbu(dc, cpu_R[ra], cpu_R[rb]);
1042 gen_illegal_exception(dc);
1047 static void dec_logic(DisasContext *dc, uint32_t insn)
1050 uint32_t rd, ra, L6, S6;
1051 op0 = extract32(insn, 6, 2);
1052 rd = extract32(insn, 21, 5);
1053 ra = extract32(insn, 16, 5);
1054 L6 = extract32(insn, 0, 6);
1055 S6 = L6 & (TARGET_LONG_BITS - 1);
1059 case 0x00: /* l.slli */
1060 LOG_DIS("l.slli r%d, r%d, %d\n", rd, ra, L6);
1061 tcg_gen_shli_tl(cpu_R[rd], cpu_R[ra], S6);
1064 case 0x01: /* l.srli */
1065 LOG_DIS("l.srli r%d, r%d, %d\n", rd, ra, L6);
1066 tcg_gen_shri_tl(cpu_R[rd], cpu_R[ra], S6);
1069 case 0x02: /* l.srai */
1070 LOG_DIS("l.srai r%d, r%d, %d\n", rd, ra, L6);
1071 tcg_gen_sari_tl(cpu_R[rd], cpu_R[ra], S6);
1074 case 0x03: /* l.rori */
1075 LOG_DIS("l.rori r%d, r%d, %d\n", rd, ra, L6);
1076 tcg_gen_rotri_tl(cpu_R[rd], cpu_R[ra], S6);
1080 gen_illegal_exception(dc);
1085 static void dec_M(DisasContext *dc, uint32_t insn)
1090 op0 = extract32(insn, 16, 1);
1091 rd = extract32(insn, 21, 5);
1092 K16 = extract32(insn, 0, 16);
1096 case 0x0: /* l.movhi */
1097 LOG_DIS("l.movhi r%d, %d\n", rd, K16);
1098 tcg_gen_movi_tl(cpu_R[rd], (K16 << 16));
1101 case 0x1: /* l.macrc */
1102 LOG_DIS("l.macrc r%d\n", rd);
1103 tcg_gen_trunc_i64_tl(cpu_R[rd], cpu_mac);
1104 tcg_gen_movi_i64(cpu_mac, 0);
1108 gen_illegal_exception(dc);
1113 static void dec_comp(DisasContext *dc, uint32_t insn)
1118 op0 = extract32(insn, 21, 5);
1119 ra = extract32(insn, 16, 5);
1120 rb = extract32(insn, 11, 5);
1122 /* unsigned integers */
1123 tcg_gen_ext32u_tl(cpu_R[ra], cpu_R[ra]);
1124 tcg_gen_ext32u_tl(cpu_R[rb], cpu_R[rb]);
1127 case 0x0: /* l.sfeq */
1128 LOG_DIS("l.sfeq r%d, r%d\n", ra, rb);
1129 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1132 case 0x1: /* l.sfne */
1133 LOG_DIS("l.sfne r%d, r%d\n", ra, rb);
1134 tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1137 case 0x2: /* l.sfgtu */
1138 LOG_DIS("l.sfgtu r%d, r%d\n", ra, rb);
1139 tcg_gen_setcond_tl(TCG_COND_GTU, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1142 case 0x3: /* l.sfgeu */
1143 LOG_DIS("l.sfgeu r%d, r%d\n", ra, rb);
1144 tcg_gen_setcond_tl(TCG_COND_GEU, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1147 case 0x4: /* l.sfltu */
1148 LOG_DIS("l.sfltu r%d, r%d\n", ra, rb);
1149 tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1152 case 0x5: /* l.sfleu */
1153 LOG_DIS("l.sfleu r%d, r%d\n", ra, rb);
1154 tcg_gen_setcond_tl(TCG_COND_LEU, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1157 case 0xa: /* l.sfgts */
1158 LOG_DIS("l.sfgts r%d, r%d\n", ra, rb);
1159 tcg_gen_setcond_tl(TCG_COND_GT, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1162 case 0xb: /* l.sfges */
1163 LOG_DIS("l.sfges r%d, r%d\n", ra, rb);
1164 tcg_gen_setcond_tl(TCG_COND_GE, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1167 case 0xc: /* l.sflts */
1168 LOG_DIS("l.sflts r%d, r%d\n", ra, rb);
1169 tcg_gen_setcond_tl(TCG_COND_LT, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1172 case 0xd: /* l.sfles */
1173 LOG_DIS("l.sfles r%d, r%d\n", ra, rb);
1174 tcg_gen_setcond_tl(TCG_COND_LE, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1178 gen_illegal_exception(dc);
1183 static void dec_compi(DisasContext *dc, uint32_t insn)
1188 op0 = extract32(insn, 21, 5);
1189 ra = extract32(insn, 16, 5);
1190 I16 = sextract32(insn, 0, 16);
1193 case 0x0: /* l.sfeqi */
1194 LOG_DIS("l.sfeqi r%d, %d\n", ra, I16);
1195 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_f, cpu_R[ra], I16);
1198 case 0x1: /* l.sfnei */
1199 LOG_DIS("l.sfnei r%d, %d\n", ra, I16);
1200 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_f, cpu_R[ra], I16);
1203 case 0x2: /* l.sfgtui */
1204 LOG_DIS("l.sfgtui r%d, %d\n", ra, I16);
1205 tcg_gen_setcondi_tl(TCG_COND_GTU, cpu_sr_f, cpu_R[ra], I16);
1208 case 0x3: /* l.sfgeui */
1209 LOG_DIS("l.sfgeui r%d, %d\n", ra, I16);
1210 tcg_gen_setcondi_tl(TCG_COND_GEU, cpu_sr_f, cpu_R[ra], I16);
1213 case 0x4: /* l.sfltui */
1214 LOG_DIS("l.sfltui r%d, %d\n", ra, I16);
1215 tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_sr_f, cpu_R[ra], I16);
1218 case 0x5: /* l.sfleui */
1219 LOG_DIS("l.sfleui r%d, %d\n", ra, I16);
1220 tcg_gen_setcondi_tl(TCG_COND_LEU, cpu_sr_f, cpu_R[ra], I16);
1223 case 0xa: /* l.sfgtsi */
1224 LOG_DIS("l.sfgtsi r%d, %d\n", ra, I16);
1225 tcg_gen_setcondi_tl(TCG_COND_GT, cpu_sr_f, cpu_R[ra], I16);
1228 case 0xb: /* l.sfgesi */
1229 LOG_DIS("l.sfgesi r%d, %d\n", ra, I16);
1230 tcg_gen_setcondi_tl(TCG_COND_GE, cpu_sr_f, cpu_R[ra], I16);
1233 case 0xc: /* l.sfltsi */
1234 LOG_DIS("l.sfltsi r%d, %d\n", ra, I16);
1235 tcg_gen_setcondi_tl(TCG_COND_LT, cpu_sr_f, cpu_R[ra], I16);
1238 case 0xd: /* l.sflesi */
1239 LOG_DIS("l.sflesi r%d, %d\n", ra, I16);
1240 tcg_gen_setcondi_tl(TCG_COND_LE, cpu_sr_f, cpu_R[ra], I16);
1244 gen_illegal_exception(dc);
1249 static void dec_sys(DisasContext *dc, uint32_t insn)
1254 op0 = extract32(insn, 16, 10);
1255 K16 = extract32(insn, 0, 16);
1258 case 0x000: /* l.sys */
1259 LOG_DIS("l.sys %d\n", K16);
1260 tcg_gen_movi_tl(cpu_pc, dc->pc);
1261 gen_exception(dc, EXCP_SYSCALL);
1262 dc->is_jmp = DISAS_UPDATE;
1265 case 0x100: /* l.trap */
1266 LOG_DIS("l.trap %d\n", K16);
1267 tcg_gen_movi_tl(cpu_pc, dc->pc);
1268 gen_exception(dc, EXCP_TRAP);
1271 case 0x300: /* l.csync */
1272 LOG_DIS("l.csync\n");
1275 case 0x200: /* l.msync */
1276 LOG_DIS("l.msync\n");
1277 tcg_gen_mb(TCG_MO_ALL);
1280 case 0x270: /* l.psync */
1281 LOG_DIS("l.psync\n");
1285 gen_illegal_exception(dc);
1290 static void dec_float(DisasContext *dc, uint32_t insn)
1293 uint32_t ra, rb, rd;
1294 op0 = extract32(insn, 0, 8);
1295 ra = extract32(insn, 16, 5);
1296 rb = extract32(insn, 11, 5);
1297 rd = extract32(insn, 21, 5);
1300 case 0x00: /* lf.add.s */
1301 LOG_DIS("lf.add.s r%d, r%d, r%d\n", rd, ra, rb);
1303 gen_helper_float_add_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1306 case 0x01: /* lf.sub.s */
1307 LOG_DIS("lf.sub.s r%d, r%d, r%d\n", rd, ra, rb);
1309 gen_helper_float_sub_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1312 case 0x02: /* lf.mul.s */
1313 LOG_DIS("lf.mul.s r%d, r%d, r%d\n", rd, ra, rb);
1315 gen_helper_float_mul_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1318 case 0x03: /* lf.div.s */
1319 LOG_DIS("lf.div.s r%d, r%d, r%d\n", rd, ra, rb);
1321 gen_helper_float_div_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1324 case 0x04: /* lf.itof.s */
1325 LOG_DIS("lf.itof r%d, r%d\n", rd, ra);
1327 gen_helper_itofs(cpu_R[rd], cpu_env, cpu_R[ra]);
1330 case 0x05: /* lf.ftoi.s */
1331 LOG_DIS("lf.ftoi r%d, r%d\n", rd, ra);
1333 gen_helper_ftois(cpu_R[rd], cpu_env, cpu_R[ra]);
1336 case 0x06: /* lf.rem.s */
1337 LOG_DIS("lf.rem.s r%d, r%d, r%d\n", rd, ra, rb);
1339 gen_helper_float_rem_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1342 case 0x07: /* lf.madd.s */
1343 LOG_DIS("lf.madd.s r%d, r%d, r%d\n", rd, ra, rb);
1345 gen_helper_float_madd_s(cpu_R[rd], cpu_env, cpu_R[rd],
1346 cpu_R[ra], cpu_R[rb]);
1349 case 0x08: /* lf.sfeq.s */
1350 LOG_DIS("lf.sfeq.s r%d, r%d\n", ra, rb);
1351 gen_helper_float_eq_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1354 case 0x09: /* lf.sfne.s */
1355 LOG_DIS("lf.sfne.s r%d, r%d\n", ra, rb);
1356 gen_helper_float_ne_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1359 case 0x0a: /* lf.sfgt.s */
1360 LOG_DIS("lf.sfgt.s r%d, r%d\n", ra, rb);
1361 gen_helper_float_gt_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1364 case 0x0b: /* lf.sfge.s */
1365 LOG_DIS("lf.sfge.s r%d, r%d\n", ra, rb);
1366 gen_helper_float_ge_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1369 case 0x0c: /* lf.sflt.s */
1370 LOG_DIS("lf.sflt.s r%d, r%d\n", ra, rb);
1371 gen_helper_float_lt_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1374 case 0x0d: /* lf.sfle.s */
1375 LOG_DIS("lf.sfle.s r%d, r%d\n", ra, rb);
1376 gen_helper_float_le_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1379 /* not used yet, open it when we need or64. */
1380 /*#ifdef TARGET_OPENRISC64
1382 LOG_DIS("lf.add.d r%d, r%d, r%d\n", rd, ra, rb);
1385 gen_helper_float_add_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1389 LOG_DIS("lf.sub.d r%d, r%d, r%d\n", rd, ra, rb);
1392 gen_helper_float_sub_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1396 LOG_DIS("lf.mul.d r%d, r%d, r%d\n", rd, ra, rb);
1399 gen_helper_float_mul_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1403 LOG_DIS("lf.div.d r%d, r%d, r%d\n", rd, ra, rb);
1406 gen_helper_float_div_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1409 case 0x14: lf.itof.d
1410 LOG_DIS("lf.itof r%d, r%d\n", rd, ra);
1413 gen_helper_itofd(cpu_R[rd], cpu_env, cpu_R[ra]);
1416 case 0x15: lf.ftoi.d
1417 LOG_DIS("lf.ftoi r%d, r%d\n", rd, ra);
1420 gen_helper_ftoid(cpu_R[rd], cpu_env, cpu_R[ra]);
1424 LOG_DIS("lf.rem.d r%d, r%d, r%d\n", rd, ra, rb);
1427 gen_helper_float_rem_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1430 case 0x17: lf.madd.d
1431 LOG_DIS("lf.madd.d r%d, r%d, r%d\n", rd, ra, rb);
1434 gen_helper_float_madd_d(cpu_R[rd], cpu_env, cpu_R[rd],
1435 cpu_R[ra], cpu_R[rb]);
1438 case 0x18: lf.sfeq.d
1439 LOG_DIS("lf.sfeq.d r%d, r%d\n", ra, rb);
1441 gen_helper_float_eq_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1444 case 0x1a: lf.sfgt.d
1445 LOG_DIS("lf.sfgt.d r%d, r%d\n", ra, rb);
1447 gen_helper_float_gt_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1450 case 0x1b: lf.sfge.d
1451 LOG_DIS("lf.sfge.d r%d, r%d\n", ra, rb);
1453 gen_helper_float_ge_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1456 case 0x19: lf.sfne.d
1457 LOG_DIS("lf.sfne.d r%d, r%d\n", ra, rb);
1459 gen_helper_float_ne_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1462 case 0x1c: lf.sflt.d
1463 LOG_DIS("lf.sflt.d r%d, r%d\n", ra, rb);
1465 gen_helper_float_lt_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1468 case 0x1d: lf.sfle.d
1469 LOG_DIS("lf.sfle.d r%d, r%d\n", ra, rb);
1471 gen_helper_float_le_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1476 gen_illegal_exception(dc);
1481 static void disas_openrisc_insn(DisasContext *dc, OpenRISCCPU *cpu)
1485 insn = cpu_ldl_code(&cpu->env, dc->pc);
1486 op0 = extract32(insn, 26, 6);
1498 dec_logic(dc, insn);
1502 dec_compi(dc, insn);
1510 dec_float(dc, insn);
1527 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
1529 CPUOpenRISCState *env = cs->env_ptr;
1530 OpenRISCCPU *cpu = openrisc_env_get_cpu(env);
1531 struct DisasContext ctx, *dc = &ctx;
1533 uint32_t next_page_start;
1540 dc->is_jmp = DISAS_NEXT;
1542 dc->mem_idx = cpu_mmu_index(&cpu->env, false);
1543 dc->tb_flags = tb->flags;
1544 dc->delayed_branch = (dc->tb_flags & TB_FLAGS_DFLAG) != 0;
1545 dc->singlestep_enabled = cs->singlestep_enabled;
1547 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1549 max_insns = tb_cflags(tb) & CF_COUNT_MASK;
1551 if (max_insns == 0) {
1552 max_insns = CF_COUNT_MASK;
1554 if (max_insns > TCG_MAX_INSNS) {
1555 max_insns = TCG_MAX_INSNS;
1558 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1559 && qemu_log_in_addr_range(pc_start)) {
1561 qemu_log("----------------\n");
1562 qemu_log("IN: %s\n", lookup_symbol(pc_start));
1567 /* Allow the TCG optimizer to see that R0 == 0,
1568 when it's true, which is the common case. */
1569 if (dc->tb_flags & TB_FLAGS_R0_0) {
1570 cpu_R[0] = tcg_const_tl(0);
1576 tcg_gen_insn_start(dc->pc, (dc->delayed_branch ? 1 : 0)
1577 | (num_insns ? 2 : 0));
1580 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1581 tcg_gen_movi_tl(cpu_pc, dc->pc);
1582 gen_exception(dc, EXCP_DEBUG);
1583 dc->is_jmp = DISAS_UPDATE;
1584 /* The address covered by the breakpoint must be included in
1585 [tb->pc, tb->pc + tb->size) in order to for it to be
1586 properly cleared -- thus we increment the PC here so that
1587 the logic setting tb->size below does the right thing. */
1592 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
1595 disas_openrisc_insn(dc, cpu);
1596 dc->pc = dc->pc + 4;
1599 if (dc->delayed_branch) {
1600 dc->delayed_branch--;
1601 if (!dc->delayed_branch) {
1602 tcg_gen_mov_tl(cpu_pc, jmp_pc);
1603 tcg_gen_discard_tl(jmp_pc);
1604 dc->is_jmp = DISAS_UPDATE;
1608 } while (!dc->is_jmp
1609 && !tcg_op_buf_full()
1610 && !cs->singlestep_enabled
1612 && (dc->pc < next_page_start)
1613 && num_insns < max_insns);
1615 if (tb_cflags(tb) & CF_LAST_IO) {
1619 if ((dc->tb_flags & TB_FLAGS_DFLAG ? 1 : 0) != (dc->delayed_branch != 0)) {
1620 tcg_gen_movi_i32(cpu_dflag, dc->delayed_branch != 0);
1623 tcg_gen_movi_tl(cpu_ppc, dc->pc - 4);
1624 if (dc->is_jmp == DISAS_NEXT) {
1625 dc->is_jmp = DISAS_UPDATE;
1626 tcg_gen_movi_tl(cpu_pc, dc->pc);
1628 if (unlikely(cs->singlestep_enabled)) {
1629 gen_exception(dc, EXCP_DEBUG);
1631 switch (dc->is_jmp) {
1633 gen_goto_tb(dc, 0, dc->pc);
1639 /* indicate that the hash table must be used
1640 to find the next TB */
1644 /* nothing more to generate */
1649 gen_tb_end(tb, num_insns);
1651 tb->size = dc->pc - pc_start;
1652 tb->icount = num_insns;
1654 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1655 && qemu_log_in_addr_range(pc_start)) {
1656 log_target_disas(cs, pc_start, tb->size, 0);
1662 void openrisc_cpu_dump_state(CPUState *cs, FILE *f,
1663 fprintf_function cpu_fprintf,
1666 OpenRISCCPU *cpu = OPENRISC_CPU(cs);
1667 CPUOpenRISCState *env = &cpu->env;
1670 cpu_fprintf(f, "PC=%08x\n", env->pc);
1671 for (i = 0; i < 32; ++i) {
1672 cpu_fprintf(f, "R%02d=%08x%c", i, cpu_get_gpr(env, i),
1673 (i % 4) == 3 ? '\n' : ' ');
1677 void restore_state_to_opc(CPUOpenRISCState *env, TranslationBlock *tb,
1681 env->dflag = data[1] & 1;
1683 env->ppc = env->pc - 4;