7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "disas/disas.h"
26 #include "qemu-common.h"
28 #include "qemu/bitops.h"
29 #include "qemu/qemu-print.h"
30 #include "exec/cpu_ldst.h"
31 #include "exec/translator.h"
33 #include "exec/helper-proto.h"
34 #include "exec/helper-gen.h"
35 #include "exec/gen-icount.h"
37 #include "trace-tcg.h"
40 /* is_jmp field values */
41 #define DISAS_EXIT DISAS_TARGET_0 /* force exit to main loop */
42 #define DISAS_JUMP DISAS_TARGET_1 /* exit via jmp_pc/jmp_pc_imm */
44 typedef struct DisasContext {
45 DisasContextBase base;
48 uint32_t delayed_branch;
50 /* If not -1, jmp_pc contains this value and so is a direct jump. */
51 target_ulong jmp_pc_imm;
54 static inline bool is_user(DisasContext *dc)
56 #ifdef CONFIG_USER_ONLY
59 return !(dc->tb_flags & TB_FLAGS_SM);
63 /* Include the auto-generated decoder. */
64 #include "decode.inc.c"
67 static TCGv cpu_R[32];
70 static TCGv jmp_pc; /* l.jr/l.jalr temp pc */
72 static TCGv cpu_sr_f; /* bf/bnf, F flag taken */
73 static TCGv cpu_sr_cy; /* carry (unsigned overflow) */
74 static TCGv cpu_sr_ov; /* signed overflow */
75 static TCGv cpu_lock_addr;
76 static TCGv cpu_lock_value;
77 static TCGv_i32 fpcsr;
78 static TCGv_i64 cpu_mac; /* MACHI:MACLO */
79 static TCGv_i32 cpu_dflag;
81 void openrisc_translate_init(void)
83 static const char * const regnames[] = {
84 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
85 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
86 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
87 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
91 cpu_sr = tcg_global_mem_new(cpu_env,
92 offsetof(CPUOpenRISCState, sr), "sr");
93 cpu_dflag = tcg_global_mem_new_i32(cpu_env,
94 offsetof(CPUOpenRISCState, dflag),
96 cpu_pc = tcg_global_mem_new(cpu_env,
97 offsetof(CPUOpenRISCState, pc), "pc");
98 cpu_ppc = tcg_global_mem_new(cpu_env,
99 offsetof(CPUOpenRISCState, ppc), "ppc");
100 jmp_pc = tcg_global_mem_new(cpu_env,
101 offsetof(CPUOpenRISCState, jmp_pc), "jmp_pc");
102 cpu_sr_f = tcg_global_mem_new(cpu_env,
103 offsetof(CPUOpenRISCState, sr_f), "sr_f");
104 cpu_sr_cy = tcg_global_mem_new(cpu_env,
105 offsetof(CPUOpenRISCState, sr_cy), "sr_cy");
106 cpu_sr_ov = tcg_global_mem_new(cpu_env,
107 offsetof(CPUOpenRISCState, sr_ov), "sr_ov");
108 cpu_lock_addr = tcg_global_mem_new(cpu_env,
109 offsetof(CPUOpenRISCState, lock_addr),
111 cpu_lock_value = tcg_global_mem_new(cpu_env,
112 offsetof(CPUOpenRISCState, lock_value),
114 fpcsr = tcg_global_mem_new_i32(cpu_env,
115 offsetof(CPUOpenRISCState, fpcsr),
117 cpu_mac = tcg_global_mem_new_i64(cpu_env,
118 offsetof(CPUOpenRISCState, mac),
120 for (i = 0; i < 32; i++) {
121 cpu_R[i] = tcg_global_mem_new(cpu_env,
122 offsetof(CPUOpenRISCState,
129 static void gen_exception(DisasContext *dc, unsigned int excp)
131 TCGv_i32 tmp = tcg_const_i32(excp);
132 gen_helper_exception(cpu_env, tmp);
133 tcg_temp_free_i32(tmp);
136 static void gen_illegal_exception(DisasContext *dc)
138 tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
139 gen_exception(dc, EXCP_ILLEGAL);
140 dc->base.is_jmp = DISAS_NORETURN;
143 /* not used yet, open it when we need or64. */
144 /*#ifdef TARGET_OPENRISC64
145 static void check_ob64s(DisasContext *dc)
147 if (!(dc->flags & CPUCFGR_OB64S)) {
148 gen_illegal_exception(dc);
152 static void check_of64s(DisasContext *dc)
154 if (!(dc->flags & CPUCFGR_OF64S)) {
155 gen_illegal_exception(dc);
159 static void check_ov64s(DisasContext *dc)
161 if (!(dc->flags & CPUCFGR_OV64S)) {
162 gen_illegal_exception(dc);
167 /* We're about to write to REG. On the off-chance that the user is
168 writing to R0, re-instate the architectural register. */
169 #define check_r0_write(reg) \
171 if (unlikely(reg == 0)) { \
176 static void gen_ove_cy(DisasContext *dc)
178 if (dc->tb_flags & SR_OVE) {
179 gen_helper_ove_cy(cpu_env);
183 static void gen_ove_ov(DisasContext *dc)
185 if (dc->tb_flags & SR_OVE) {
186 gen_helper_ove_ov(cpu_env);
190 static void gen_ove_cyov(DisasContext *dc)
192 if (dc->tb_flags & SR_OVE) {
193 gen_helper_ove_cyov(cpu_env);
197 static void gen_add(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
199 TCGv t0 = tcg_const_tl(0);
200 TCGv res = tcg_temp_new();
202 tcg_gen_add2_tl(res, cpu_sr_cy, srca, t0, srcb, t0);
203 tcg_gen_xor_tl(cpu_sr_ov, srca, srcb);
204 tcg_gen_xor_tl(t0, res, srcb);
205 tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov);
208 tcg_gen_mov_tl(dest, res);
214 static void gen_addc(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
216 TCGv t0 = tcg_const_tl(0);
217 TCGv res = tcg_temp_new();
219 tcg_gen_add2_tl(res, cpu_sr_cy, srca, t0, cpu_sr_cy, t0);
220 tcg_gen_add2_tl(res, cpu_sr_cy, res, cpu_sr_cy, srcb, t0);
221 tcg_gen_xor_tl(cpu_sr_ov, srca, srcb);
222 tcg_gen_xor_tl(t0, res, srcb);
223 tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov);
226 tcg_gen_mov_tl(dest, res);
232 static void gen_sub(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
234 TCGv res = tcg_temp_new();
236 tcg_gen_sub_tl(res, srca, srcb);
237 tcg_gen_xor_tl(cpu_sr_cy, srca, srcb);
238 tcg_gen_xor_tl(cpu_sr_ov, res, srcb);
239 tcg_gen_and_tl(cpu_sr_ov, cpu_sr_ov, cpu_sr_cy);
240 tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_cy, srca, srcb);
242 tcg_gen_mov_tl(dest, res);
248 static void gen_mul(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
250 TCGv t0 = tcg_temp_new();
252 tcg_gen_muls2_tl(dest, cpu_sr_ov, srca, srcb);
253 tcg_gen_sari_tl(t0, dest, TARGET_LONG_BITS - 1);
254 tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_ov, cpu_sr_ov, t0);
257 tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
261 static void gen_mulu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
263 tcg_gen_muls2_tl(dest, cpu_sr_cy, srca, srcb);
264 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_cy, cpu_sr_cy, 0);
269 static void gen_div(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
271 TCGv t0 = tcg_temp_new();
273 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_ov, srcb, 0);
274 /* The result of divide-by-zero is undefined.
275 Supress the host-side exception by dividing by 1. */
276 tcg_gen_or_tl(t0, srcb, cpu_sr_ov);
277 tcg_gen_div_tl(dest, srca, t0);
280 tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
284 static void gen_divu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
286 TCGv t0 = tcg_temp_new();
288 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_cy, srcb, 0);
289 /* The result of divide-by-zero is undefined.
290 Supress the host-side exception by dividing by 1. */
291 tcg_gen_or_tl(t0, srcb, cpu_sr_cy);
292 tcg_gen_divu_tl(dest, srca, t0);
298 static void gen_muld(DisasContext *dc, TCGv srca, TCGv srcb)
300 TCGv_i64 t1 = tcg_temp_new_i64();
301 TCGv_i64 t2 = tcg_temp_new_i64();
303 tcg_gen_ext_tl_i64(t1, srca);
304 tcg_gen_ext_tl_i64(t2, srcb);
305 if (TARGET_LONG_BITS == 32) {
306 tcg_gen_mul_i64(cpu_mac, t1, t2);
307 tcg_gen_movi_tl(cpu_sr_ov, 0);
309 TCGv_i64 high = tcg_temp_new_i64();
311 tcg_gen_muls2_i64(cpu_mac, high, t1, t2);
312 tcg_gen_sari_i64(t1, cpu_mac, 63);
313 tcg_gen_setcond_i64(TCG_COND_NE, t1, t1, high);
314 tcg_temp_free_i64(high);
315 tcg_gen_trunc_i64_tl(cpu_sr_ov, t1);
316 tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
320 tcg_temp_free_i64(t1);
321 tcg_temp_free_i64(t2);
324 static void gen_muldu(DisasContext *dc, TCGv srca, TCGv srcb)
326 TCGv_i64 t1 = tcg_temp_new_i64();
327 TCGv_i64 t2 = tcg_temp_new_i64();
329 tcg_gen_extu_tl_i64(t1, srca);
330 tcg_gen_extu_tl_i64(t2, srcb);
331 if (TARGET_LONG_BITS == 32) {
332 tcg_gen_mul_i64(cpu_mac, t1, t2);
333 tcg_gen_movi_tl(cpu_sr_cy, 0);
335 TCGv_i64 high = tcg_temp_new_i64();
337 tcg_gen_mulu2_i64(cpu_mac, high, t1, t2);
338 tcg_gen_setcondi_i64(TCG_COND_NE, high, high, 0);
339 tcg_gen_trunc_i64_tl(cpu_sr_cy, high);
340 tcg_temp_free_i64(high);
344 tcg_temp_free_i64(t1);
345 tcg_temp_free_i64(t2);
348 static void gen_mac(DisasContext *dc, TCGv srca, TCGv srcb)
350 TCGv_i64 t1 = tcg_temp_new_i64();
351 TCGv_i64 t2 = tcg_temp_new_i64();
353 tcg_gen_ext_tl_i64(t1, srca);
354 tcg_gen_ext_tl_i64(t2, srcb);
355 tcg_gen_mul_i64(t1, t1, t2);
357 /* Note that overflow is only computed during addition stage. */
358 tcg_gen_xor_i64(t2, cpu_mac, t1);
359 tcg_gen_add_i64(cpu_mac, cpu_mac, t1);
360 tcg_gen_xor_i64(t1, t1, cpu_mac);
361 tcg_gen_andc_i64(t1, t1, t2);
362 tcg_temp_free_i64(t2);
364 #if TARGET_LONG_BITS == 32
365 tcg_gen_extrh_i64_i32(cpu_sr_ov, t1);
367 tcg_gen_mov_i64(cpu_sr_ov, t1);
369 tcg_temp_free_i64(t1);
374 static void gen_macu(DisasContext *dc, TCGv srca, TCGv srcb)
376 TCGv_i64 t1 = tcg_temp_new_i64();
377 TCGv_i64 t2 = tcg_temp_new_i64();
379 tcg_gen_extu_tl_i64(t1, srca);
380 tcg_gen_extu_tl_i64(t2, srcb);
381 tcg_gen_mul_i64(t1, t1, t2);
382 tcg_temp_free_i64(t2);
384 /* Note that overflow is only computed during addition stage. */
385 tcg_gen_add_i64(cpu_mac, cpu_mac, t1);
386 tcg_gen_setcond_i64(TCG_COND_LTU, t1, cpu_mac, t1);
387 tcg_gen_trunc_i64_tl(cpu_sr_cy, t1);
388 tcg_temp_free_i64(t1);
393 static void gen_msb(DisasContext *dc, TCGv srca, TCGv srcb)
395 TCGv_i64 t1 = tcg_temp_new_i64();
396 TCGv_i64 t2 = tcg_temp_new_i64();
398 tcg_gen_ext_tl_i64(t1, srca);
399 tcg_gen_ext_tl_i64(t2, srcb);
400 tcg_gen_mul_i64(t1, t1, t2);
402 /* Note that overflow is only computed during subtraction stage. */
403 tcg_gen_xor_i64(t2, cpu_mac, t1);
404 tcg_gen_sub_i64(cpu_mac, cpu_mac, t1);
405 tcg_gen_xor_i64(t1, t1, cpu_mac);
406 tcg_gen_and_i64(t1, t1, t2);
407 tcg_temp_free_i64(t2);
409 #if TARGET_LONG_BITS == 32
410 tcg_gen_extrh_i64_i32(cpu_sr_ov, t1);
412 tcg_gen_mov_i64(cpu_sr_ov, t1);
414 tcg_temp_free_i64(t1);
419 static void gen_msbu(DisasContext *dc, TCGv srca, TCGv srcb)
421 TCGv_i64 t1 = tcg_temp_new_i64();
422 TCGv_i64 t2 = tcg_temp_new_i64();
424 tcg_gen_extu_tl_i64(t1, srca);
425 tcg_gen_extu_tl_i64(t2, srcb);
426 tcg_gen_mul_i64(t1, t1, t2);
428 /* Note that overflow is only computed during subtraction stage. */
429 tcg_gen_setcond_i64(TCG_COND_LTU, t2, cpu_mac, t1);
430 tcg_gen_sub_i64(cpu_mac, cpu_mac, t1);
431 tcg_gen_trunc_i64_tl(cpu_sr_cy, t2);
432 tcg_temp_free_i64(t2);
433 tcg_temp_free_i64(t1);
438 static bool trans_l_add(DisasContext *dc, arg_dab *a)
440 check_r0_write(a->d);
441 gen_add(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
445 static bool trans_l_addc(DisasContext *dc, arg_dab *a)
447 check_r0_write(a->d);
448 gen_addc(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
452 static bool trans_l_sub(DisasContext *dc, arg_dab *a)
454 check_r0_write(a->d);
455 gen_sub(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
459 static bool trans_l_and(DisasContext *dc, arg_dab *a)
461 check_r0_write(a->d);
462 tcg_gen_and_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
466 static bool trans_l_or(DisasContext *dc, arg_dab *a)
468 check_r0_write(a->d);
469 tcg_gen_or_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
473 static bool trans_l_xor(DisasContext *dc, arg_dab *a)
475 check_r0_write(a->d);
476 tcg_gen_xor_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
480 static bool trans_l_sll(DisasContext *dc, arg_dab *a)
482 check_r0_write(a->d);
483 tcg_gen_shl_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
487 static bool trans_l_srl(DisasContext *dc, arg_dab *a)
489 check_r0_write(a->d);
490 tcg_gen_shr_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
494 static bool trans_l_sra(DisasContext *dc, arg_dab *a)
496 check_r0_write(a->d);
497 tcg_gen_sar_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
501 static bool trans_l_ror(DisasContext *dc, arg_dab *a)
503 check_r0_write(a->d);
504 tcg_gen_rotr_tl(cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
508 static bool trans_l_exths(DisasContext *dc, arg_da *a)
510 check_r0_write(a->d);
511 tcg_gen_ext16s_tl(cpu_R[a->d], cpu_R[a->a]);
515 static bool trans_l_extbs(DisasContext *dc, arg_da *a)
517 check_r0_write(a->d);
518 tcg_gen_ext8s_tl(cpu_R[a->d], cpu_R[a->a]);
522 static bool trans_l_exthz(DisasContext *dc, arg_da *a)
524 check_r0_write(a->d);
525 tcg_gen_ext16u_tl(cpu_R[a->d], cpu_R[a->a]);
529 static bool trans_l_extbz(DisasContext *dc, arg_da *a)
531 check_r0_write(a->d);
532 tcg_gen_ext8u_tl(cpu_R[a->d], cpu_R[a->a]);
536 static bool trans_l_cmov(DisasContext *dc, arg_dab *a)
540 check_r0_write(a->d);
541 zero = tcg_const_tl(0);
542 tcg_gen_movcond_tl(TCG_COND_NE, cpu_R[a->d], cpu_sr_f, zero,
543 cpu_R[a->a], cpu_R[a->b]);
548 static bool trans_l_ff1(DisasContext *dc, arg_da *a)
550 check_r0_write(a->d);
551 tcg_gen_ctzi_tl(cpu_R[a->d], cpu_R[a->a], -1);
552 tcg_gen_addi_tl(cpu_R[a->d], cpu_R[a->d], 1);
556 static bool trans_l_fl1(DisasContext *dc, arg_da *a)
558 check_r0_write(a->d);
559 tcg_gen_clzi_tl(cpu_R[a->d], cpu_R[a->a], TARGET_LONG_BITS);
560 tcg_gen_subfi_tl(cpu_R[a->d], TARGET_LONG_BITS, cpu_R[a->d]);
564 static bool trans_l_mul(DisasContext *dc, arg_dab *a)
566 check_r0_write(a->d);
567 gen_mul(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
571 static bool trans_l_mulu(DisasContext *dc, arg_dab *a)
573 check_r0_write(a->d);
574 gen_mulu(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
578 static bool trans_l_div(DisasContext *dc, arg_dab *a)
580 check_r0_write(a->d);
581 gen_div(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
585 static bool trans_l_divu(DisasContext *dc, arg_dab *a)
587 check_r0_write(a->d);
588 gen_divu(dc, cpu_R[a->d], cpu_R[a->a], cpu_R[a->b]);
592 static bool trans_l_muld(DisasContext *dc, arg_ab *a)
594 gen_muld(dc, cpu_R[a->a], cpu_R[a->b]);
598 static bool trans_l_muldu(DisasContext *dc, arg_ab *a)
600 gen_muldu(dc, cpu_R[a->a], cpu_R[a->b]);
604 static bool trans_l_j(DisasContext *dc, arg_l_j *a)
606 target_ulong tmp_pc = dc->base.pc_next + a->n * 4;
608 tcg_gen_movi_tl(jmp_pc, tmp_pc);
609 dc->jmp_pc_imm = tmp_pc;
610 dc->delayed_branch = 2;
614 static bool trans_l_jal(DisasContext *dc, arg_l_jal *a)
616 target_ulong tmp_pc = dc->base.pc_next + a->n * 4;
617 target_ulong ret_pc = dc->base.pc_next + 8;
619 tcg_gen_movi_tl(cpu_R[9], ret_pc);
620 /* Optimize jal being used to load the PC for PIC. */
621 if (tmp_pc != ret_pc) {
622 tcg_gen_movi_tl(jmp_pc, tmp_pc);
623 dc->jmp_pc_imm = tmp_pc;
624 dc->delayed_branch = 2;
629 static void do_bf(DisasContext *dc, arg_l_bf *a, TCGCond cond)
631 target_ulong tmp_pc = dc->base.pc_next + a->n * 4;
632 TCGv t_next = tcg_const_tl(dc->base.pc_next + 8);
633 TCGv t_true = tcg_const_tl(tmp_pc);
634 TCGv t_zero = tcg_const_tl(0);
636 tcg_gen_movcond_tl(cond, jmp_pc, cpu_sr_f, t_zero, t_true, t_next);
638 tcg_temp_free(t_next);
639 tcg_temp_free(t_true);
640 tcg_temp_free(t_zero);
641 dc->delayed_branch = 2;
644 static bool trans_l_bf(DisasContext *dc, arg_l_bf *a)
646 do_bf(dc, a, TCG_COND_NE);
650 static bool trans_l_bnf(DisasContext *dc, arg_l_bf *a)
652 do_bf(dc, a, TCG_COND_EQ);
656 static bool trans_l_jr(DisasContext *dc, arg_l_jr *a)
658 tcg_gen_mov_tl(jmp_pc, cpu_R[a->b]);
659 dc->delayed_branch = 2;
663 static bool trans_l_jalr(DisasContext *dc, arg_l_jalr *a)
665 tcg_gen_mov_tl(jmp_pc, cpu_R[a->b]);
666 tcg_gen_movi_tl(cpu_R[9], dc->base.pc_next + 8);
667 dc->delayed_branch = 2;
671 static bool trans_l_lwa(DisasContext *dc, arg_load *a)
675 check_r0_write(a->d);
677 tcg_gen_addi_tl(ea, cpu_R[a->a], a->i);
678 tcg_gen_qemu_ld_tl(cpu_R[a->d], ea, dc->mem_idx, MO_TEUL);
679 tcg_gen_mov_tl(cpu_lock_addr, ea);
680 tcg_gen_mov_tl(cpu_lock_value, cpu_R[a->d]);
685 static void do_load(DisasContext *dc, arg_load *a, TCGMemOp mop)
689 check_r0_write(a->d);
691 tcg_gen_addi_tl(ea, cpu_R[a->a], a->i);
692 tcg_gen_qemu_ld_tl(cpu_R[a->d], ea, dc->mem_idx, mop);
696 static bool trans_l_lwz(DisasContext *dc, arg_load *a)
698 do_load(dc, a, MO_TEUL);
702 static bool trans_l_lws(DisasContext *dc, arg_load *a)
704 do_load(dc, a, MO_TESL);
708 static bool trans_l_lbz(DisasContext *dc, arg_load *a)
710 do_load(dc, a, MO_UB);
714 static bool trans_l_lbs(DisasContext *dc, arg_load *a)
716 do_load(dc, a, MO_SB);
720 static bool trans_l_lhz(DisasContext *dc, arg_load *a)
722 do_load(dc, a, MO_TEUW);
726 static bool trans_l_lhs(DisasContext *dc, arg_load *a)
728 do_load(dc, a, MO_TESW);
732 static bool trans_l_swa(DisasContext *dc, arg_store *a)
735 TCGLabel *lab_fail, *lab_done;
738 tcg_gen_addi_tl(ea, cpu_R[a->a], a->i);
740 /* For TB_FLAGS_R0_0, the branch below invalidates the temporary assigned
741 to cpu_R[0]. Since l.swa is quite often immediately followed by a
742 branch, don't bother reallocating; finish the TB using the "real" R0.
743 This also takes care of RB input across the branch. */
746 lab_fail = gen_new_label();
747 lab_done = gen_new_label();
748 tcg_gen_brcond_tl(TCG_COND_NE, ea, cpu_lock_addr, lab_fail);
751 val = tcg_temp_new();
752 tcg_gen_atomic_cmpxchg_tl(val, cpu_lock_addr, cpu_lock_value,
753 cpu_R[a->b], dc->mem_idx, MO_TEUL);
754 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, val, cpu_lock_value);
757 tcg_gen_br(lab_done);
759 gen_set_label(lab_fail);
760 tcg_gen_movi_tl(cpu_sr_f, 0);
762 gen_set_label(lab_done);
763 tcg_gen_movi_tl(cpu_lock_addr, -1);
767 static void do_store(DisasContext *dc, arg_store *a, TCGMemOp mop)
769 TCGv t0 = tcg_temp_new();
770 tcg_gen_addi_tl(t0, cpu_R[a->a], a->i);
771 tcg_gen_qemu_st_tl(cpu_R[a->b], t0, dc->mem_idx, mop);
775 static bool trans_l_sw(DisasContext *dc, arg_store *a)
777 do_store(dc, a, MO_TEUL);
781 static bool trans_l_sb(DisasContext *dc, arg_store *a)
783 do_store(dc, a, MO_UB);
787 static bool trans_l_sh(DisasContext *dc, arg_store *a)
789 do_store(dc, a, MO_TEUW);
793 static bool trans_l_nop(DisasContext *dc, arg_l_nop *a)
798 static bool trans_l_addi(DisasContext *dc, arg_rri *a)
802 check_r0_write(a->d);
803 t0 = tcg_const_tl(a->i);
804 gen_add(dc, cpu_R[a->d], cpu_R[a->a], t0);
809 static bool trans_l_addic(DisasContext *dc, arg_rri *a)
813 check_r0_write(a->d);
814 t0 = tcg_const_tl(a->i);
815 gen_addc(dc, cpu_R[a->d], cpu_R[a->a], t0);
820 static bool trans_l_muli(DisasContext *dc, arg_rri *a)
824 check_r0_write(a->d);
825 t0 = tcg_const_tl(a->i);
826 gen_mul(dc, cpu_R[a->d], cpu_R[a->a], t0);
831 static bool trans_l_maci(DisasContext *dc, arg_l_maci *a)
835 t0 = tcg_const_tl(a->i);
836 gen_mac(dc, cpu_R[a->a], t0);
841 static bool trans_l_andi(DisasContext *dc, arg_rrk *a)
843 check_r0_write(a->d);
844 tcg_gen_andi_tl(cpu_R[a->d], cpu_R[a->a], a->k);
848 static bool trans_l_ori(DisasContext *dc, arg_rrk *a)
850 check_r0_write(a->d);
851 tcg_gen_ori_tl(cpu_R[a->d], cpu_R[a->a], a->k);
855 static bool trans_l_xori(DisasContext *dc, arg_rri *a)
857 check_r0_write(a->d);
858 tcg_gen_xori_tl(cpu_R[a->d], cpu_R[a->a], a->i);
862 static bool trans_l_mfspr(DisasContext *dc, arg_l_mfspr *a)
864 check_r0_write(a->d);
867 gen_illegal_exception(dc);
869 TCGv spr = tcg_temp_new();
870 tcg_gen_ori_tl(spr, cpu_R[a->a], a->k);
871 gen_helper_mfspr(cpu_R[a->d], cpu_env, cpu_R[a->d], spr);
877 static bool trans_l_mtspr(DisasContext *dc, arg_l_mtspr *a)
880 gen_illegal_exception(dc);
884 /* For SR, we will need to exit the TB to recognize the new
885 * exception state. For NPC, in theory this counts as a branch
886 * (although the SPR only exists for use by an ICE). Save all
887 * of the cpu state first, allowing it to be overwritten.
889 if (dc->delayed_branch) {
890 tcg_gen_mov_tl(cpu_pc, jmp_pc);
891 tcg_gen_discard_tl(jmp_pc);
893 tcg_gen_movi_tl(cpu_pc, dc->base.pc_next + 4);
895 dc->base.is_jmp = DISAS_EXIT;
897 spr = tcg_temp_new();
898 tcg_gen_ori_tl(spr, cpu_R[a->a], a->k);
899 gen_helper_mtspr(cpu_env, spr, cpu_R[a->b]);
905 static bool trans_l_mac(DisasContext *dc, arg_ab *a)
907 gen_mac(dc, cpu_R[a->a], cpu_R[a->b]);
911 static bool trans_l_msb(DisasContext *dc, arg_ab *a)
913 gen_msb(dc, cpu_R[a->a], cpu_R[a->b]);
917 static bool trans_l_macu(DisasContext *dc, arg_ab *a)
919 gen_macu(dc, cpu_R[a->a], cpu_R[a->b]);
923 static bool trans_l_msbu(DisasContext *dc, arg_ab *a)
925 gen_msbu(dc, cpu_R[a->a], cpu_R[a->b]);
929 static bool trans_l_slli(DisasContext *dc, arg_dal *a)
931 check_r0_write(a->d);
932 tcg_gen_shli_tl(cpu_R[a->d], cpu_R[a->a], a->l & (TARGET_LONG_BITS - 1));
936 static bool trans_l_srli(DisasContext *dc, arg_dal *a)
938 check_r0_write(a->d);
939 tcg_gen_shri_tl(cpu_R[a->d], cpu_R[a->a], a->l & (TARGET_LONG_BITS - 1));
943 static bool trans_l_srai(DisasContext *dc, arg_dal *a)
945 check_r0_write(a->d);
946 tcg_gen_sari_tl(cpu_R[a->d], cpu_R[a->a], a->l & (TARGET_LONG_BITS - 1));
950 static bool trans_l_rori(DisasContext *dc, arg_dal *a)
952 check_r0_write(a->d);
953 tcg_gen_rotri_tl(cpu_R[a->d], cpu_R[a->a], a->l & (TARGET_LONG_BITS - 1));
957 static bool trans_l_movhi(DisasContext *dc, arg_l_movhi *a)
959 check_r0_write(a->d);
960 tcg_gen_movi_tl(cpu_R[a->d], a->k << 16);
964 static bool trans_l_macrc(DisasContext *dc, arg_l_macrc *a)
966 check_r0_write(a->d);
967 tcg_gen_trunc_i64_tl(cpu_R[a->d], cpu_mac);
968 tcg_gen_movi_i64(cpu_mac, 0);
972 static bool trans_l_sfeq(DisasContext *dc, arg_ab *a)
974 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
978 static bool trans_l_sfne(DisasContext *dc, arg_ab *a)
980 tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
984 static bool trans_l_sfgtu(DisasContext *dc, arg_ab *a)
986 tcg_gen_setcond_tl(TCG_COND_GTU, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
990 static bool trans_l_sfgeu(DisasContext *dc, arg_ab *a)
992 tcg_gen_setcond_tl(TCG_COND_GEU, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
996 static bool trans_l_sfltu(DisasContext *dc, arg_ab *a)
998 tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
1002 static bool trans_l_sfleu(DisasContext *dc, arg_ab *a)
1004 tcg_gen_setcond_tl(TCG_COND_LEU, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
1008 static bool trans_l_sfgts(DisasContext *dc, arg_ab *a)
1010 tcg_gen_setcond_tl(TCG_COND_GT, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
1014 static bool trans_l_sfges(DisasContext *dc, arg_ab *a)
1016 tcg_gen_setcond_tl(TCG_COND_GE, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
1020 static bool trans_l_sflts(DisasContext *dc, arg_ab *a)
1022 tcg_gen_setcond_tl(TCG_COND_LT, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
1026 static bool trans_l_sfles(DisasContext *dc, arg_ab *a)
1028 tcg_gen_setcond_tl(TCG_COND_LE, cpu_sr_f, cpu_R[a->a], cpu_R[a->b]);
1032 static bool trans_l_sfeqi(DisasContext *dc, arg_ai *a)
1034 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_f, cpu_R[a->a], a->i);
1038 static bool trans_l_sfnei(DisasContext *dc, arg_ai *a)
1040 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_f, cpu_R[a->a], a->i);
1044 static bool trans_l_sfgtui(DisasContext *dc, arg_ai *a)
1046 tcg_gen_setcondi_tl(TCG_COND_GTU, cpu_sr_f, cpu_R[a->a], a->i);
1050 static bool trans_l_sfgeui(DisasContext *dc, arg_ai *a)
1052 tcg_gen_setcondi_tl(TCG_COND_GEU, cpu_sr_f, cpu_R[a->a], a->i);
1056 static bool trans_l_sfltui(DisasContext *dc, arg_ai *a)
1058 tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_sr_f, cpu_R[a->a], a->i);
1062 static bool trans_l_sfleui(DisasContext *dc, arg_ai *a)
1064 tcg_gen_setcondi_tl(TCG_COND_LEU, cpu_sr_f, cpu_R[a->a], a->i);
1068 static bool trans_l_sfgtsi(DisasContext *dc, arg_ai *a)
1070 tcg_gen_setcondi_tl(TCG_COND_GT, cpu_sr_f, cpu_R[a->a], a->i);
1074 static bool trans_l_sfgesi(DisasContext *dc, arg_ai *a)
1076 tcg_gen_setcondi_tl(TCG_COND_GE, cpu_sr_f, cpu_R[a->a], a->i);
1080 static bool trans_l_sfltsi(DisasContext *dc, arg_ai *a)
1082 tcg_gen_setcondi_tl(TCG_COND_LT, cpu_sr_f, cpu_R[a->a], a->i);
1086 static bool trans_l_sflesi(DisasContext *dc, arg_ai *a)
1088 tcg_gen_setcondi_tl(TCG_COND_LE, cpu_sr_f, cpu_R[a->a], a->i);
1092 static bool trans_l_sys(DisasContext *dc, arg_l_sys *a)
1094 tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
1095 gen_exception(dc, EXCP_SYSCALL);
1096 dc->base.is_jmp = DISAS_NORETURN;
1100 static bool trans_l_trap(DisasContext *dc, arg_l_trap *a)
1102 tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
1103 gen_exception(dc, EXCP_TRAP);
1104 dc->base.is_jmp = DISAS_NORETURN;
1108 static bool trans_l_msync(DisasContext *dc, arg_l_msync *a)
1110 tcg_gen_mb(TCG_MO_ALL);
1114 static bool trans_l_psync(DisasContext *dc, arg_l_psync *a)
1119 static bool trans_l_csync(DisasContext *dc, arg_l_csync *a)
1124 static bool trans_l_rfe(DisasContext *dc, arg_l_rfe *a)
1127 gen_illegal_exception(dc);
1129 gen_helper_rfe(cpu_env);
1130 dc->base.is_jmp = DISAS_EXIT;
1135 static void do_fp2(DisasContext *dc, arg_da *a,
1136 void (*fn)(TCGv, TCGv_env, TCGv))
1138 check_r0_write(a->d);
1139 fn(cpu_R[a->d], cpu_env, cpu_R[a->a]);
1140 gen_helper_update_fpcsr(cpu_env);
1143 static void do_fp3(DisasContext *dc, arg_dab *a,
1144 void (*fn)(TCGv, TCGv_env, TCGv, TCGv))
1146 check_r0_write(a->d);
1147 fn(cpu_R[a->d], cpu_env, cpu_R[a->a], cpu_R[a->b]);
1148 gen_helper_update_fpcsr(cpu_env);
1151 static void do_fpcmp(DisasContext *dc, arg_ab *a,
1152 void (*fn)(TCGv, TCGv_env, TCGv, TCGv),
1153 bool inv, bool swap)
1156 fn(cpu_sr_f, cpu_env, cpu_R[a->b], cpu_R[a->a]);
1158 fn(cpu_sr_f, cpu_env, cpu_R[a->a], cpu_R[a->b]);
1161 tcg_gen_xori_tl(cpu_sr_f, cpu_sr_f, 1);
1163 gen_helper_update_fpcsr(cpu_env);
1166 static bool trans_lf_add_s(DisasContext *dc, arg_dab *a)
1168 do_fp3(dc, a, gen_helper_float_add_s);
1172 static bool trans_lf_sub_s(DisasContext *dc, arg_dab *a)
1174 do_fp3(dc, a, gen_helper_float_sub_s);
1178 static bool trans_lf_mul_s(DisasContext *dc, arg_dab *a)
1180 do_fp3(dc, a, gen_helper_float_mul_s);
1184 static bool trans_lf_div_s(DisasContext *dc, arg_dab *a)
1186 do_fp3(dc, a, gen_helper_float_div_s);
1190 static bool trans_lf_rem_s(DisasContext *dc, arg_dab *a)
1192 do_fp3(dc, a, gen_helper_float_rem_s);
1196 static bool trans_lf_itof_s(DisasContext *dc, arg_da *a)
1198 do_fp2(dc, a, gen_helper_itofs);
1202 static bool trans_lf_ftoi_s(DisasContext *dc, arg_da *a)
1204 do_fp2(dc, a, gen_helper_ftois);
1208 static bool trans_lf_madd_s(DisasContext *dc, arg_dab *a)
1210 check_r0_write(a->d);
1211 gen_helper_float_madd_s(cpu_R[a->d], cpu_env, cpu_R[a->d],
1212 cpu_R[a->a], cpu_R[a->b]);
1213 gen_helper_update_fpcsr(cpu_env);
1217 static bool trans_lf_sfeq_s(DisasContext *dc, arg_ab *a)
1219 do_fpcmp(dc, a, gen_helper_float_eq_s, false, false);
1223 static bool trans_lf_sfne_s(DisasContext *dc, arg_ab *a)
1225 do_fpcmp(dc, a, gen_helper_float_eq_s, true, false);
1229 static bool trans_lf_sfgt_s(DisasContext *dc, arg_ab *a)
1231 do_fpcmp(dc, a, gen_helper_float_lt_s, false, true);
1235 static bool trans_lf_sfge_s(DisasContext *dc, arg_ab *a)
1237 do_fpcmp(dc, a, gen_helper_float_le_s, false, true);
1241 static bool trans_lf_sflt_s(DisasContext *dc, arg_ab *a)
1243 do_fpcmp(dc, a, gen_helper_float_lt_s, false, false);
1247 static bool trans_lf_sfle_s(DisasContext *dc, arg_ab *a)
1249 do_fpcmp(dc, a, gen_helper_float_le_s, false, false);
1253 static void openrisc_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1255 DisasContext *dc = container_of(dcb, DisasContext, base);
1256 CPUOpenRISCState *env = cs->env_ptr;
1259 dc->mem_idx = cpu_mmu_index(env, false);
1260 dc->tb_flags = dc->base.tb->flags;
1261 dc->delayed_branch = (dc->tb_flags & TB_FLAGS_DFLAG) != 0;
1262 dc->jmp_pc_imm = -1;
1264 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1265 dc->base.max_insns = MIN(dc->base.max_insns, bound);
1268 static void openrisc_tr_tb_start(DisasContextBase *db, CPUState *cs)
1270 DisasContext *dc = container_of(db, DisasContext, base);
1272 /* Allow the TCG optimizer to see that R0 == 0,
1273 when it's true, which is the common case. */
1274 if (dc->tb_flags & TB_FLAGS_R0_0) {
1275 cpu_R[0] = tcg_const_tl(0);
1281 static void openrisc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
1283 DisasContext *dc = container_of(dcbase, DisasContext, base);
1285 tcg_gen_insn_start(dc->base.pc_next, (dc->delayed_branch ? 1 : 0)
1286 | (dc->base.num_insns > 1 ? 2 : 0));
1289 static bool openrisc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
1290 const CPUBreakpoint *bp)
1292 DisasContext *dc = container_of(dcbase, DisasContext, base);
1294 tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
1295 gen_exception(dc, EXCP_DEBUG);
1296 dc->base.is_jmp = DISAS_NORETURN;
1297 /* The address covered by the breakpoint must be included in
1298 [tb->pc, tb->pc + tb->size) in order to for it to be
1299 properly cleared -- thus we increment the PC here so that
1300 the logic setting tb->size below does the right thing. */
1301 dc->base.pc_next += 4;
1305 static void openrisc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
1307 DisasContext *dc = container_of(dcbase, DisasContext, base);
1308 OpenRISCCPU *cpu = OPENRISC_CPU(cs);
1309 uint32_t insn = cpu_ldl_code(&cpu->env, dc->base.pc_next);
1311 if (!decode(dc, insn)) {
1312 gen_illegal_exception(dc);
1314 dc->base.pc_next += 4;
1316 /* When exiting the delay slot normally, exit via jmp_pc.
1317 * For DISAS_NORETURN, we have raised an exception and already exited.
1318 * For DISAS_EXIT, we found l.rfe in a delay slot. There's nothing
1319 * in the manual saying this is illegal, but it surely it should.
1320 * At least or1ksim overrides pcnext and ignores the branch.
1322 if (dc->delayed_branch
1323 && --dc->delayed_branch == 0
1324 && dc->base.is_jmp == DISAS_NEXT) {
1325 dc->base.is_jmp = DISAS_JUMP;
1329 static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
1331 DisasContext *dc = container_of(dcbase, DisasContext, base);
1332 target_ulong jmp_dest;
1334 /* If we have already exited the TB, nothing following has effect. */
1335 if (dc->base.is_jmp == DISAS_NORETURN) {
1339 /* Adjust the delayed branch state for the next TB. */
1340 if ((dc->tb_flags & TB_FLAGS_DFLAG ? 1 : 0) != (dc->delayed_branch != 0)) {
1341 tcg_gen_movi_i32(cpu_dflag, dc->delayed_branch != 0);
1344 /* For DISAS_TOO_MANY, jump to the next insn. */
1345 jmp_dest = dc->base.pc_next;
1346 tcg_gen_movi_tl(cpu_ppc, jmp_dest - 4);
1348 switch (dc->base.is_jmp) {
1350 jmp_dest = dc->jmp_pc_imm;
1351 if (jmp_dest == -1) {
1352 /* The jump destination is indirect/computed; use jmp_pc. */
1353 tcg_gen_mov_tl(cpu_pc, jmp_pc);
1354 tcg_gen_discard_tl(jmp_pc);
1355 if (unlikely(dc->base.singlestep_enabled)) {
1356 gen_exception(dc, EXCP_DEBUG);
1358 tcg_gen_lookup_and_goto_ptr();
1362 /* The jump destination is direct; use jmp_pc_imm.
1363 However, we will have stored into jmp_pc as well;
1364 we know now that it wasn't needed. */
1365 tcg_gen_discard_tl(jmp_pc);
1368 case DISAS_TOO_MANY:
1369 if (unlikely(dc->base.singlestep_enabled)) {
1370 tcg_gen_movi_tl(cpu_pc, jmp_dest);
1371 gen_exception(dc, EXCP_DEBUG);
1372 } else if ((dc->base.pc_first ^ jmp_dest) & TARGET_PAGE_MASK) {
1373 tcg_gen_movi_tl(cpu_pc, jmp_dest);
1374 tcg_gen_lookup_and_goto_ptr();
1377 tcg_gen_movi_tl(cpu_pc, jmp_dest);
1378 tcg_gen_exit_tb(dc->base.tb, 0);
1383 if (unlikely(dc->base.singlestep_enabled)) {
1384 gen_exception(dc, EXCP_DEBUG);
1386 tcg_gen_exit_tb(NULL, 0);
1390 g_assert_not_reached();
1394 static void openrisc_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
1396 DisasContext *s = container_of(dcbase, DisasContext, base);
1398 qemu_log("IN: %s\n", lookup_symbol(s->base.pc_first));
1399 log_target_disas(cs, s->base.pc_first, s->base.tb->size);
1402 static const TranslatorOps openrisc_tr_ops = {
1403 .init_disas_context = openrisc_tr_init_disas_context,
1404 .tb_start = openrisc_tr_tb_start,
1405 .insn_start = openrisc_tr_insn_start,
1406 .breakpoint_check = openrisc_tr_breakpoint_check,
1407 .translate_insn = openrisc_tr_translate_insn,
1408 .tb_stop = openrisc_tr_tb_stop,
1409 .disas_log = openrisc_tr_disas_log,
1412 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1416 translator_loop(&openrisc_tr_ops, &ctx.base, cs, tb, max_insns);
1419 void openrisc_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1421 OpenRISCCPU *cpu = OPENRISC_CPU(cs);
1422 CPUOpenRISCState *env = &cpu->env;
1425 qemu_fprintf(f, "PC=%08x\n", env->pc);
1426 for (i = 0; i < 32; ++i) {
1427 qemu_fprintf(f, "R%02d=%08x%c", i, cpu_get_gpr(env, i),
1428 (i % 4) == 3 ? '\n' : ' ');
1432 void restore_state_to_opc(CPUOpenRISCState *env, TranslationBlock *tb,
1436 env->dflag = data[1] & 1;
1438 env->ppc = env->pc - 4;