2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "qemu/qemu-print.h"
33 #include "trace-tcg.h"
40 #if DISAS_MB && !SIM_COMPAT
41 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
43 # define LOG_DIS(...) do { } while (0)
48 #define EXTRACT_FIELD(src, start, end) \
49 (((src) >> start) & ((1 << (end - start + 1)) - 1))
51 /* is_jmp field values */
52 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
53 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
54 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
56 static TCGv_i32 cpu_R[32];
57 static TCGv_i32 cpu_pc;
58 static TCGv_i32 cpu_msr;
59 static TCGv_i32 cpu_imm;
60 static TCGv_i32 cpu_btaken;
61 static TCGv_i32 cpu_btarget;
62 static TCGv_i32 cpu_iflags;
63 static TCGv cpu_res_addr;
64 static TCGv_i32 cpu_res_val;
66 #include "exec/gen-icount.h"
68 /* This is the state at translation time. */
69 typedef struct DisasContext {
80 unsigned int cpustate_changed;
81 unsigned int delayed_branch;
82 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
83 unsigned int clear_imm;
88 #define JMP_DIRECT_CC 2
89 #define JMP_INDIRECT 3
93 int abort_at_next_insn;
94 struct TranslationBlock *tb;
95 int singlestep_enabled;
98 static const char *regnames[] =
100 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
101 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
102 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
103 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
106 static inline void t_sync_flags(DisasContext *dc)
108 /* Synch the tb dependent flags between translator and runtime. */
109 if (dc->tb_flags != dc->synced_flags) {
110 tcg_gen_movi_i32(cpu_iflags, dc->tb_flags);
111 dc->synced_flags = dc->tb_flags;
115 static void gen_raise_exception(DisasContext *dc, uint32_t index)
117 TCGv_i32 tmp = tcg_const_i32(index);
119 gen_helper_raise_exception(cpu_env, tmp);
120 tcg_temp_free_i32(tmp);
121 dc->is_jmp = DISAS_UPDATE;
124 static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
127 tcg_gen_movi_i32(cpu_pc, dc->pc);
128 gen_raise_exception(dc, index);
131 static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
133 TCGv_i32 tmp = tcg_const_i32(esr_ec);
134 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, esr));
135 tcg_temp_free_i32(tmp);
137 gen_raise_exception_sync(dc, EXCP_HW_EXCP);
140 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
142 #ifndef CONFIG_USER_ONLY
143 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
149 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
151 if (use_goto_tb(dc, dest)) {
153 tcg_gen_movi_i32(cpu_pc, dest);
154 tcg_gen_exit_tb(dc->tb, n);
156 tcg_gen_movi_i32(cpu_pc, dest);
157 tcg_gen_exit_tb(NULL, 0);
161 static void read_carry(DisasContext *dc, TCGv_i32 d)
163 tcg_gen_shri_i32(d, cpu_msr, 31);
167 * write_carry sets the carry bits in MSR based on bit 0 of v.
168 * v[31:1] are ignored.
170 static void write_carry(DisasContext *dc, TCGv_i32 v)
172 /* Deposit bit 0 into MSR_C and the alias MSR_CC. */
173 tcg_gen_deposit_i32(cpu_msr, cpu_msr, v, 2, 1);
174 tcg_gen_deposit_i32(cpu_msr, cpu_msr, v, 31, 1);
177 static void write_carryi(DisasContext *dc, bool carry)
179 TCGv_i32 t0 = tcg_temp_new_i32();
180 tcg_gen_movi_i32(t0, carry);
182 tcg_temp_free_i32(t0);
186 * Returns true if the insn an illegal operation.
187 * If exceptions are enabled, an exception is raised.
189 static bool trap_illegal(DisasContext *dc, bool cond)
191 if (cond && (dc->tb_flags & MSR_EE_FLAG)
192 && dc->cpu->cfg.illegal_opcode_exception) {
193 gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
199 * Returns true if the insn is illegal in userspace.
200 * If exceptions are enabled, an exception is raised.
202 static bool trap_userspace(DisasContext *dc, bool cond)
204 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
205 bool cond_user = cond && mem_index == MMU_USER_IDX;
207 if (cond_user && (dc->tb_flags & MSR_EE_FLAG)) {
208 gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
213 /* True if ALU operand b is a small immediate that may deserve
215 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
217 /* Immediate insn without the imm prefix ? */
218 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
221 static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
224 if (dc->tb_flags & IMM_FLAG)
225 tcg_gen_ori_i32(cpu_imm, cpu_imm, dc->imm);
227 tcg_gen_movi_i32(cpu_imm, (int32_t)((int16_t)dc->imm));
230 return &cpu_R[dc->rb];
233 static void dec_add(DisasContext *dc)
241 LOG_DIS("add%s%s%s r%d r%d r%d\n",
242 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
243 dc->rd, dc->ra, dc->rb);
245 /* Take care of the easy cases first. */
247 /* k - keep carry, no need to update MSR. */
248 /* If rd == r0, it's a nop. */
250 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
253 /* c - Add carry into the result. */
254 cf = tcg_temp_new_i32();
257 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
258 tcg_temp_free_i32(cf);
264 /* From now on, we can assume k is zero. So we need to update MSR. */
266 cf = tcg_temp_new_i32();
270 tcg_gen_movi_i32(cf, 0);
274 TCGv_i32 ncf = tcg_temp_new_i32();
275 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
276 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
277 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
278 write_carry(dc, ncf);
279 tcg_temp_free_i32(ncf);
281 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
284 tcg_temp_free_i32(cf);
287 static void dec_sub(DisasContext *dc)
289 unsigned int u, cmp, k, c;
295 cmp = (dc->imm & 1) && (!dc->type_b) && k;
298 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
301 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
303 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
308 LOG_DIS("sub%s%s r%d, r%d r%d\n",
309 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
311 /* Take care of the easy cases first. */
313 /* k - keep carry, no need to update MSR. */
314 /* If rd == r0, it's a nop. */
316 tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
319 /* c - Add carry into the result. */
320 cf = tcg_temp_new_i32();
323 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
324 tcg_temp_free_i32(cf);
330 /* From now on, we can assume k is zero. So we need to update MSR. */
331 /* Extract carry. And complement a into na. */
332 cf = tcg_temp_new_i32();
333 na = tcg_temp_new_i32();
337 tcg_gen_movi_i32(cf, 1);
340 /* d = b + ~a + c. carry defaults to 1. */
341 tcg_gen_not_i32(na, cpu_R[dc->ra]);
344 TCGv_i32 ncf = tcg_temp_new_i32();
345 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
346 tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
347 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
348 write_carry(dc, ncf);
349 tcg_temp_free_i32(ncf);
351 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
354 tcg_temp_free_i32(cf);
355 tcg_temp_free_i32(na);
358 static void dec_pattern(DisasContext *dc)
362 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
366 mode = dc->opcode & 3;
370 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
372 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
375 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
377 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_R[dc->rd],
378 cpu_R[dc->ra], cpu_R[dc->rb]);
382 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
384 tcg_gen_setcond_i32(TCG_COND_NE, cpu_R[dc->rd],
385 cpu_R[dc->ra], cpu_R[dc->rb]);
389 cpu_abort(CPU(dc->cpu),
390 "unsupported pattern insn opcode=%x\n", dc->opcode);
395 static void dec_and(DisasContext *dc)
399 if (!dc->type_b && (dc->imm & (1 << 10))) {
404 not = dc->opcode & (1 << 1);
405 LOG_DIS("and%s\n", not ? "n" : "");
411 tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
413 tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
416 static void dec_or(DisasContext *dc)
418 if (!dc->type_b && (dc->imm & (1 << 10))) {
423 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
425 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
428 static void dec_xor(DisasContext *dc)
430 if (!dc->type_b && (dc->imm & (1 << 10))) {
435 LOG_DIS("xor r%d\n", dc->rd);
437 tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
440 static inline void msr_read(DisasContext *dc, TCGv_i32 d)
442 tcg_gen_mov_i32(d, cpu_msr);
445 static inline void msr_write(DisasContext *dc, TCGv_i32 v)
447 dc->cpustate_changed = 1;
448 /* PVR bit is not writable, and is never set. */
449 tcg_gen_andi_i32(cpu_msr, v, ~MSR_PVR);
452 static void dec_msr(DisasContext *dc)
454 CPUState *cs = CPU(dc->cpu);
457 bool to, clrset, extended = false;
459 sr = extract32(dc->imm, 0, 14);
460 to = extract32(dc->imm, 14, 1);
461 clrset = extract32(dc->imm, 15, 1) == 0;
464 dc->cpustate_changed = 1;
467 /* Extended MSRs are only available if addr_size > 32. */
468 if (dc->cpu->cfg.addr_size > 32) {
469 /* The E-bit is encoded differently for To/From MSR. */
470 static const unsigned int e_bit[] = { 19, 24 };
472 extended = extract32(dc->imm, e_bit[to], 1);
475 /* msrclr and msrset. */
477 bool clr = extract32(dc->ir, 16, 1);
479 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
482 if (!dc->cpu->cfg.use_msr_instr) {
487 if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
492 msr_read(dc, cpu_R[dc->rd]);
494 t0 = tcg_temp_new_i32();
495 t1 = tcg_temp_new_i32();
497 tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
500 tcg_gen_not_i32(t1, t1);
501 tcg_gen_and_i32(t0, t0, t1);
503 tcg_gen_or_i32(t0, t0, t1);
505 tcg_temp_free_i32(t0);
506 tcg_temp_free_i32(t1);
507 tcg_gen_movi_i32(cpu_pc, dc->pc + 4);
508 dc->is_jmp = DISAS_UPDATE;
512 if (trap_userspace(dc, to)) {
516 #if !defined(CONFIG_USER_ONLY)
517 /* Catch read/writes to the mmu block. */
518 if ((sr & ~0xff) == 0x1000) {
519 TCGv_i32 tmp_ext = tcg_const_i32(extended);
523 tmp_sr = tcg_const_i32(sr);
524 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
526 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_sr, cpu_R[dc->ra]);
528 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tmp_ext, tmp_sr);
530 tcg_temp_free_i32(tmp_sr);
531 tcg_temp_free_i32(tmp_ext);
537 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
542 msr_write(dc, cpu_R[dc->ra]);
546 TCGv_i64 t64 = tcg_temp_new_i64();
547 tcg_gen_extu_i32_i64(t64, cpu_R[dc->ra]);
548 tcg_gen_st_i64(t64, cpu_env, offsetof(CPUMBState, ear));
549 tcg_temp_free_i64(t64);
553 tcg_gen_st_i32(cpu_R[dc->ra],
554 cpu_env, offsetof(CPUMBState, esr));
557 tcg_gen_st_i32(cpu_R[dc->ra],
558 cpu_env, offsetof(CPUMBState, fsr));
561 tcg_gen_st_i32(cpu_R[dc->ra],
562 cpu_env, offsetof(CPUMBState, btr));
565 tcg_gen_st_i32(cpu_R[dc->ra],
566 cpu_env, offsetof(CPUMBState, edr));
569 tcg_gen_st_i32(cpu_R[dc->ra],
570 cpu_env, offsetof(CPUMBState, slr));
573 tcg_gen_st_i32(cpu_R[dc->ra],
574 cpu_env, offsetof(CPUMBState, shr));
577 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
581 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
585 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
588 msr_read(dc, cpu_R[dc->rd]);
592 TCGv_i64 t64 = tcg_temp_new_i64();
593 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
595 tcg_gen_extrh_i64_i32(cpu_R[dc->rd], t64);
597 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], t64);
599 tcg_temp_free_i64(t64);
603 tcg_gen_ld_i32(cpu_R[dc->rd],
604 cpu_env, offsetof(CPUMBState, esr));
607 tcg_gen_ld_i32(cpu_R[dc->rd],
608 cpu_env, offsetof(CPUMBState, fsr));
611 tcg_gen_ld_i32(cpu_R[dc->rd],
612 cpu_env, offsetof(CPUMBState, btr));
615 tcg_gen_ld_i32(cpu_R[dc->rd],
616 cpu_env, offsetof(CPUMBState, edr));
619 tcg_gen_ld_i32(cpu_R[dc->rd],
620 cpu_env, offsetof(CPUMBState, slr));
623 tcg_gen_ld_i32(cpu_R[dc->rd],
624 cpu_env, offsetof(CPUMBState, shr));
626 case 0x2000 ... 0x200c:
628 tcg_gen_ld_i32(cpu_R[dc->rd],
629 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
632 cpu_abort(cs, "unknown mfs reg %x\n", sr);
638 tcg_gen_movi_i32(cpu_R[0], 0);
642 /* Multiplier unit. */
643 static void dec_mul(DisasContext *dc)
646 unsigned int subcode;
648 if (trap_illegal(dc, !dc->cpu->cfg.use_hw_mul)) {
652 subcode = dc->imm & 3;
655 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
656 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
660 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
661 if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
665 tmp = tcg_temp_new_i32();
668 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
669 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
672 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
673 tcg_gen_muls2_i32(tmp, cpu_R[dc->rd],
674 cpu_R[dc->ra], cpu_R[dc->rb]);
677 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
678 tcg_gen_mulsu2_i32(tmp, cpu_R[dc->rd],
679 cpu_R[dc->ra], cpu_R[dc->rb]);
682 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
683 tcg_gen_mulu2_i32(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
686 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
689 tcg_temp_free_i32(tmp);
693 static void dec_div(DisasContext *dc)
700 if (trap_illegal(dc, !dc->cpu->cfg.use_div)) {
705 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
708 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
711 tcg_gen_movi_i32(cpu_R[dc->rd], 0);
714 static void dec_barrel(DisasContext *dc)
717 unsigned int imm_w, imm_s;
718 bool s, t, e = false, i = false;
720 if (trap_illegal(dc, !dc->cpu->cfg.use_barrel)) {
725 /* Insert and extract are only available in immediate mode. */
726 i = extract32(dc->imm, 15, 1);
727 e = extract32(dc->imm, 14, 1);
729 s = extract32(dc->imm, 10, 1);
730 t = extract32(dc->imm, 9, 1);
731 imm_w = extract32(dc->imm, 6, 5);
732 imm_s = extract32(dc->imm, 0, 5);
734 LOG_DIS("bs%s%s%s r%d r%d r%d\n",
736 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
739 if (imm_w + imm_s > 32 || imm_w == 0) {
740 /* These inputs have an undefined behavior. */
741 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
744 tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
747 int width = imm_w - imm_s + 1;
750 /* These inputs have an undefined behavior. */
751 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
754 tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
758 t0 = tcg_temp_new_i32();
760 tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
761 tcg_gen_andi_i32(t0, t0, 31);
764 tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
767 tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
769 tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
772 tcg_temp_free_i32(t0);
776 static void dec_bit(DisasContext *dc)
778 CPUState *cs = CPU(dc->cpu);
782 op = dc->ir & ((1 << 9) - 1);
786 t0 = tcg_temp_new_i32();
788 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
789 tcg_gen_andi_i32(t0, cpu_msr, MSR_CC);
790 write_carry(dc, cpu_R[dc->ra]);
792 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
793 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
795 tcg_temp_free_i32(t0);
801 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
803 /* Update carry. Note that write carry only looks at the LSB. */
804 write_carry(dc, cpu_R[dc->ra]);
807 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
809 tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
813 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
814 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
817 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
818 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
825 LOG_DIS("wdc r%d\n", dc->ra);
826 trap_userspace(dc, true);
830 LOG_DIS("wic r%d\n", dc->ra);
831 trap_userspace(dc, true);
834 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
837 if (dc->cpu->cfg.use_pcmp_instr) {
838 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
843 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
844 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
848 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
849 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
852 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
853 dc->pc, op, dc->rd, dc->ra, dc->rb);
858 static inline void sync_jmpstate(DisasContext *dc)
860 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
861 if (dc->jmp == JMP_DIRECT) {
862 tcg_gen_movi_i32(cpu_btaken, 1);
864 dc->jmp = JMP_INDIRECT;
865 tcg_gen_movi_i32(cpu_btarget, dc->jmp_pc);
869 static void dec_imm(DisasContext *dc)
871 LOG_DIS("imm %x\n", dc->imm << 16);
872 tcg_gen_movi_i32(cpu_imm, (dc->imm << 16));
873 dc->tb_flags |= IMM_FLAG;
877 static inline void compute_ldst_addr(DisasContext *dc, bool ea, TCGv t)
879 bool extimm = dc->tb_flags & IMM_FLAG;
880 /* Should be set to true if r1 is used by loadstores. */
881 bool stackprot = false;
884 /* All load/stores use ra. */
885 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
889 /* Treat the common cases first. */
892 int addr_size = dc->cpu->cfg.addr_size;
894 if (addr_size == 32) {
895 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
899 tcg_gen_concat_i32_i64(t, cpu_R[dc->rb], cpu_R[dc->ra]);
900 if (addr_size < 64) {
901 /* Mask off out of range bits. */
902 tcg_gen_andi_i64(t, t, MAKE_64BIT_MASK(0, addr_size));
907 /* If any of the regs is r0, set t to the value of the other reg. */
909 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
911 } else if (dc->rb == 0) {
912 tcg_gen_extu_i32_tl(t, cpu_R[dc->ra]);
916 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
920 t32 = tcg_temp_new_i32();
921 tcg_gen_add_i32(t32, cpu_R[dc->ra], cpu_R[dc->rb]);
922 tcg_gen_extu_i32_tl(t, t32);
923 tcg_temp_free_i32(t32);
926 gen_helper_stackprot(cpu_env, t);
931 t32 = tcg_temp_new_i32();
933 tcg_gen_addi_i32(t32, cpu_R[dc->ra], (int16_t)dc->imm);
935 tcg_gen_add_i32(t32, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
937 tcg_gen_extu_i32_tl(t, t32);
938 tcg_temp_free_i32(t32);
941 gen_helper_stackprot(cpu_env, t);
946 static void dec_load(DisasContext *dc)
951 bool rev = false, ex = false, ea = false;
952 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
955 mop = dc->opcode & 3;
958 ea = extract32(dc->ir, 7, 1);
959 rev = extract32(dc->ir, 9, 1);
960 ex = extract32(dc->ir, 10, 1);
967 if (trap_illegal(dc, size > 4)) {
971 if (trap_userspace(dc, ea)) {
975 LOG_DIS("l%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
980 addr = tcg_temp_new();
981 compute_ldst_addr(dc, ea, addr);
982 /* Extended addressing bypasses the MMU. */
983 mem_index = ea ? MMU_NOMMU_IDX : mem_index;
986 * When doing reverse accesses we need to do two things.
988 * 1. Reverse the address wrt endianness.
989 * 2. Byteswap the data lanes on the way back into the CPU core.
991 if (rev && size != 4) {
992 /* Endian reverse the address. t is addr. */
996 tcg_gen_xori_tl(addr, addr, 3);
1003 tcg_gen_xori_tl(addr, addr, 2);
1006 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1011 /* lwx does not throw unaligned access errors, so force alignment */
1013 tcg_gen_andi_tl(addr, addr, ~3);
1016 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1019 /* Verify alignment if needed. */
1021 * Microblaze gives MMU faults priority over faults due to
1022 * unaligned addresses. That's why we speculatively do the load
1023 * into v. If the load succeeds, we verify alignment of the
1024 * address and if that succeeds we write into the destination reg.
1026 v = tcg_temp_new_i32();
1027 tcg_gen_qemu_ld_i32(v, addr, mem_index, mop);
1029 if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
1030 TCGv_i32 t0 = tcg_const_i32(0);
1031 TCGv_i32 treg = tcg_const_i32(dc->rd);
1032 TCGv_i32 tsize = tcg_const_i32(size - 1);
1034 tcg_gen_movi_i32(cpu_pc, dc->pc);
1035 gen_helper_memalign(cpu_env, addr, treg, t0, tsize);
1037 tcg_temp_free_i32(t0);
1038 tcg_temp_free_i32(treg);
1039 tcg_temp_free_i32(tsize);
1043 tcg_gen_mov_tl(cpu_res_addr, addr);
1044 tcg_gen_mov_i32(cpu_res_val, v);
1047 tcg_gen_mov_i32(cpu_R[dc->rd], v);
1049 tcg_temp_free_i32(v);
1052 /* no support for AXI exclusive so always clear C */
1053 write_carryi(dc, 0);
1056 tcg_temp_free(addr);
1059 static void dec_store(DisasContext *dc)
1062 TCGLabel *swx_skip = NULL;
1064 bool rev = false, ex = false, ea = false;
1065 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1068 mop = dc->opcode & 3;
1071 ea = extract32(dc->ir, 7, 1);
1072 rev = extract32(dc->ir, 9, 1);
1073 ex = extract32(dc->ir, 10, 1);
1080 if (trap_illegal(dc, size > 4)) {
1084 trap_userspace(dc, ea);
1086 LOG_DIS("s%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1090 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1092 /* SWX needs a temp_local. */
1093 addr = ex ? tcg_temp_local_new() : tcg_temp_new();
1094 compute_ldst_addr(dc, ea, addr);
1095 /* Extended addressing bypasses the MMU. */
1096 mem_index = ea ? MMU_NOMMU_IDX : mem_index;
1101 /* swx does not throw unaligned access errors, so force alignment */
1102 tcg_gen_andi_tl(addr, addr, ~3);
1104 write_carryi(dc, 1);
1105 swx_skip = gen_new_label();
1106 tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_skip);
1109 * Compare the value loaded at lwx with current contents of
1110 * the reserved location.
1112 tval = tcg_temp_new_i32();
1114 tcg_gen_atomic_cmpxchg_i32(tval, addr, cpu_res_val,
1115 cpu_R[dc->rd], mem_index,
1118 tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_skip);
1119 write_carryi(dc, 0);
1120 tcg_temp_free_i32(tval);
1123 if (rev && size != 4) {
1124 /* Endian reverse the address. t is addr. */
1128 tcg_gen_xori_tl(addr, addr, 3);
1135 /* Force addr into the temp. */
1136 tcg_gen_xori_tl(addr, addr, 2);
1139 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1145 tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr, mem_index, mop);
1148 /* Verify alignment if needed. */
1149 if (dc->cpu->cfg.unaligned_exceptions && size > 1) {
1150 TCGv_i32 t1 = tcg_const_i32(1);
1151 TCGv_i32 treg = tcg_const_i32(dc->rd);
1152 TCGv_i32 tsize = tcg_const_i32(size - 1);
1154 tcg_gen_movi_i32(cpu_pc, dc->pc);
1155 /* FIXME: if the alignment is wrong, we should restore the value
1156 * in memory. One possible way to achieve this is to probe
1157 * the MMU prior to the memaccess, thay way we could put
1158 * the alignment checks in between the probe and the mem
1161 gen_helper_memalign(cpu_env, addr, treg, t1, tsize);
1163 tcg_temp_free_i32(t1);
1164 tcg_temp_free_i32(treg);
1165 tcg_temp_free_i32(tsize);
1169 gen_set_label(swx_skip);
1172 tcg_temp_free(addr);
1175 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1176 TCGv_i32 d, TCGv_i32 a)
1178 static const int mb_to_tcg_cc[] = {
1179 [CC_EQ] = TCG_COND_EQ,
1180 [CC_NE] = TCG_COND_NE,
1181 [CC_LT] = TCG_COND_LT,
1182 [CC_LE] = TCG_COND_LE,
1183 [CC_GE] = TCG_COND_GE,
1184 [CC_GT] = TCG_COND_GT,
1194 tcg_gen_setcondi_i32(mb_to_tcg_cc[cc], d, a, 0);
1197 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1202 static void eval_cond_jmp(DisasContext *dc, TCGv_i32 pc_true, TCGv_i32 pc_false)
1204 TCGv_i32 zero = tcg_const_i32(0);
1206 tcg_gen_movcond_i32(TCG_COND_NE, cpu_pc,
1210 tcg_temp_free_i32(zero);
1213 static void dec_setup_dslot(DisasContext *dc)
1215 TCGv_i32 tmp = tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG));
1217 dc->delayed_branch = 2;
1218 dc->tb_flags |= D_FLAG;
1220 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, bimm));
1221 tcg_temp_free_i32(tmp);
1224 static void dec_bcc(DisasContext *dc)
1229 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1230 dslot = dc->ir & (1 << 25);
1231 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1233 dc->delayed_branch = 1;
1235 dec_setup_dslot(dc);
1238 if (dec_alu_op_b_is_small_imm(dc)) {
1239 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1241 tcg_gen_movi_i32(cpu_btarget, dc->pc + offset);
1242 dc->jmp = JMP_DIRECT_CC;
1243 dc->jmp_pc = dc->pc + offset;
1245 dc->jmp = JMP_INDIRECT;
1246 tcg_gen_addi_i32(cpu_btarget, *dec_alu_op_b(dc), dc->pc);
1248 eval_cc(dc, cc, cpu_btaken, cpu_R[dc->ra]);
1251 static void dec_br(DisasContext *dc)
1253 unsigned int dslot, link, abs, mbar;
1255 dslot = dc->ir & (1 << 20);
1256 abs = dc->ir & (1 << 19);
1257 link = dc->ir & (1 << 18);
1259 /* Memory barrier. */
1260 mbar = (dc->ir >> 16) & 31;
1261 if (mbar == 2 && dc->imm == 4) {
1262 uint16_t mbar_imm = dc->rd;
1264 LOG_DIS("mbar %d\n", mbar_imm);
1266 /* Data access memory barrier. */
1267 if ((mbar_imm & 2) == 0) {
1268 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1271 /* mbar IMM & 16 decodes to sleep. */
1272 if (mbar_imm & 16) {
1277 if (trap_userspace(dc, true)) {
1278 /* Sleep is a privileged instruction. */
1284 tmp_1 = tcg_const_i32(1);
1285 tcg_gen_st_i32(tmp_1, cpu_env,
1286 -offsetof(MicroBlazeCPU, env)
1287 +offsetof(CPUState, halted));
1288 tcg_temp_free_i32(tmp_1);
1290 tcg_gen_movi_i32(cpu_pc, dc->pc + 4);
1292 gen_raise_exception(dc, EXCP_HLT);
1296 dc->cpustate_changed = 1;
1300 LOG_DIS("br%s%s%s%s imm=%x\n",
1301 abs ? "a" : "", link ? "l" : "",
1302 dc->type_b ? "i" : "", dslot ? "d" : "",
1305 dc->delayed_branch = 1;
1307 dec_setup_dslot(dc);
1310 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
1312 dc->jmp = JMP_INDIRECT;
1314 tcg_gen_movi_i32(cpu_btaken, 1);
1315 tcg_gen_mov_i32(cpu_btarget, *(dec_alu_op_b(dc)));
1316 if (link && !dslot) {
1317 if (!(dc->tb_flags & IMM_FLAG) &&
1318 (dc->imm == 8 || dc->imm == 0x18)) {
1319 gen_raise_exception_sync(dc, EXCP_BREAK);
1322 if (trap_userspace(dc, true)) {
1325 gen_raise_exception_sync(dc, EXCP_DEBUG);
1329 if (dec_alu_op_b_is_small_imm(dc)) {
1330 dc->jmp = JMP_DIRECT;
1331 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1333 tcg_gen_movi_i32(cpu_btaken, 1);
1334 tcg_gen_addi_i32(cpu_btarget, *dec_alu_op_b(dc), dc->pc);
1339 static inline void do_rti(DisasContext *dc)
1342 t0 = tcg_temp_new_i32();
1343 t1 = tcg_temp_new_i32();
1344 tcg_gen_mov_i32(t1, cpu_msr);
1345 tcg_gen_shri_i32(t0, t1, 1);
1346 tcg_gen_ori_i32(t1, t1, MSR_IE);
1347 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1349 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1350 tcg_gen_or_i32(t1, t1, t0);
1352 tcg_temp_free_i32(t1);
1353 tcg_temp_free_i32(t0);
1354 dc->tb_flags &= ~DRTI_FLAG;
1357 static inline void do_rtb(DisasContext *dc)
1360 t0 = tcg_temp_new_i32();
1361 t1 = tcg_temp_new_i32();
1362 tcg_gen_mov_i32(t1, cpu_msr);
1363 tcg_gen_andi_i32(t1, t1, ~MSR_BIP);
1364 tcg_gen_shri_i32(t0, t1, 1);
1365 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1367 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1368 tcg_gen_or_i32(t1, t1, t0);
1370 tcg_temp_free_i32(t1);
1371 tcg_temp_free_i32(t0);
1372 dc->tb_flags &= ~DRTB_FLAG;
1375 static inline void do_rte(DisasContext *dc)
1378 t0 = tcg_temp_new_i32();
1379 t1 = tcg_temp_new_i32();
1381 tcg_gen_mov_i32(t1, cpu_msr);
1382 tcg_gen_ori_i32(t1, t1, MSR_EE);
1383 tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1384 tcg_gen_shri_i32(t0, t1, 1);
1385 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1387 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1388 tcg_gen_or_i32(t1, t1, t0);
1390 tcg_temp_free_i32(t1);
1391 tcg_temp_free_i32(t0);
1392 dc->tb_flags &= ~DRTE_FLAG;
1395 static void dec_rts(DisasContext *dc)
1397 unsigned int b_bit, i_bit, e_bit;
1399 i_bit = dc->ir & (1 << 21);
1400 b_bit = dc->ir & (1 << 22);
1401 e_bit = dc->ir & (1 << 23);
1403 if (trap_userspace(dc, i_bit || b_bit || e_bit)) {
1407 dec_setup_dslot(dc);
1410 LOG_DIS("rtid ir=%x\n", dc->ir);
1411 dc->tb_flags |= DRTI_FLAG;
1413 LOG_DIS("rtbd ir=%x\n", dc->ir);
1414 dc->tb_flags |= DRTB_FLAG;
1416 LOG_DIS("rted ir=%x\n", dc->ir);
1417 dc->tb_flags |= DRTE_FLAG;
1419 LOG_DIS("rts ir=%x\n", dc->ir);
1421 dc->jmp = JMP_INDIRECT;
1422 tcg_gen_movi_i32(cpu_btaken, 1);
1423 tcg_gen_add_i32(cpu_btarget, cpu_R[dc->ra], *dec_alu_op_b(dc));
1426 static int dec_check_fpuv2(DisasContext *dc)
1428 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1429 gen_raise_hw_excp(dc, ESR_EC_FPU);
1431 return (dc->cpu->cfg.use_fpu == 2) ? PVR2_USE_FPU2_MASK : 0;
1434 static void dec_fpu(DisasContext *dc)
1436 unsigned int fpu_insn;
1438 if (trap_illegal(dc, !dc->cpu->cfg.use_fpu)) {
1442 fpu_insn = (dc->ir >> 7) & 7;
1446 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1451 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1456 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1461 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1466 switch ((dc->ir >> 4) & 7) {
1468 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1469 cpu_R[dc->ra], cpu_R[dc->rb]);
1472 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1473 cpu_R[dc->ra], cpu_R[dc->rb]);
1476 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1477 cpu_R[dc->ra], cpu_R[dc->rb]);
1480 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1481 cpu_R[dc->ra], cpu_R[dc->rb]);
1484 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1485 cpu_R[dc->ra], cpu_R[dc->rb]);
1488 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1489 cpu_R[dc->ra], cpu_R[dc->rb]);
1492 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1493 cpu_R[dc->ra], cpu_R[dc->rb]);
1496 qemu_log_mask(LOG_UNIMP,
1497 "unimplemented fcmp fpu_insn=%x pc=%x"
1499 fpu_insn, dc->pc, dc->opcode);
1500 dc->abort_at_next_insn = 1;
1506 if (!dec_check_fpuv2(dc)) {
1509 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1513 if (!dec_check_fpuv2(dc)) {
1516 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1520 if (!dec_check_fpuv2(dc)) {
1523 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1527 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1529 fpu_insn, dc->pc, dc->opcode);
1530 dc->abort_at_next_insn = 1;
1535 static void dec_null(DisasContext *dc)
1537 if (trap_illegal(dc, true)) {
1540 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1541 dc->abort_at_next_insn = 1;
1544 /* Insns connected to FSL or AXI stream attached devices. */
1545 static void dec_stream(DisasContext *dc)
1547 TCGv_i32 t_id, t_ctrl;
1550 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1551 dc->type_b ? "" : "d", dc->imm);
1553 if (trap_userspace(dc, true)) {
1557 t_id = tcg_temp_new_i32();
1559 tcg_gen_movi_i32(t_id, dc->imm & 0xf);
1560 ctrl = dc->imm >> 10;
1562 tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
1563 ctrl = dc->imm >> 5;
1566 t_ctrl = tcg_const_i32(ctrl);
1569 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1571 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1573 tcg_temp_free_i32(t_id);
1574 tcg_temp_free_i32(t_ctrl);
1577 static struct decoder_info {
1582 void (*dec)(DisasContext *dc);
1590 {DEC_BARREL, dec_barrel},
1592 {DEC_ST, dec_store},
1601 {DEC_STREAM, dec_stream},
1605 static inline void decode(DisasContext *dc, uint32_t ir)
1610 LOG_DIS("%8.8x\t", dc->ir);
1613 trap_illegal(dc, dc->cpu->cfg.opcode_0_illegal);
1614 /* Don't decode nop/zero instructions any further. */
1618 /* bit 2 seems to indicate insn type. */
1619 dc->type_b = ir & (1 << 29);
1621 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1622 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1623 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1624 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1625 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1627 /* Large switch for all insns. */
1628 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1629 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1636 /* generate intermediate code for basic block 'tb'. */
1637 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1639 CPUMBState *env = cs->env_ptr;
1640 MicroBlazeCPU *cpu = env_archcpu(env);
1642 struct DisasContext ctx;
1643 struct DisasContext *dc = &ctx;
1644 uint32_t page_start, org_flags;
1651 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1653 dc->is_jmp = DISAS_NEXT;
1655 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1656 if (dc->delayed_branch) {
1657 dc->jmp = JMP_INDIRECT;
1660 dc->singlestep_enabled = cs->singlestep_enabled;
1661 dc->cpustate_changed = 0;
1662 dc->abort_at_next_insn = 0;
1665 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1668 page_start = pc_start & TARGET_PAGE_MASK;
1674 tcg_gen_insn_start(dc->pc);
1677 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1678 gen_raise_exception_sync(dc, EXCP_DEBUG);
1679 /* The address covered by the breakpoint must be included in
1680 [tb->pc, tb->pc + tb->size) in order to for it to be
1681 properly cleared -- thus we increment the PC here so that
1682 the logic setting tb->size below does the right thing. */
1688 LOG_DIS("%8.8x:\t", dc->pc);
1690 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
1695 decode(dc, cpu_ldl_code(env, dc->pc));
1697 dc->tb_flags &= ~IMM_FLAG;
1700 if (dc->delayed_branch) {
1701 dc->delayed_branch--;
1702 if (!dc->delayed_branch) {
1703 if (dc->tb_flags & DRTI_FLAG)
1705 if (dc->tb_flags & DRTB_FLAG)
1707 if (dc->tb_flags & DRTE_FLAG)
1709 /* Clear the delay slot flag. */
1710 dc->tb_flags &= ~D_FLAG;
1711 /* If it is a direct jump, try direct chaining. */
1712 if (dc->jmp == JMP_INDIRECT) {
1713 TCGv_i32 tmp_pc = tcg_const_i32(dc->pc);
1714 eval_cond_jmp(dc, cpu_btarget, tmp_pc);
1715 tcg_temp_free_i32(tmp_pc);
1716 dc->is_jmp = DISAS_JUMP;
1717 } else if (dc->jmp == JMP_DIRECT) {
1719 gen_goto_tb(dc, 0, dc->jmp_pc);
1720 dc->is_jmp = DISAS_TB_JUMP;
1721 } else if (dc->jmp == JMP_DIRECT_CC) {
1722 TCGLabel *l1 = gen_new_label();
1724 /* Conditional jmp. */
1725 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_btaken, 0, l1);
1726 gen_goto_tb(dc, 1, dc->pc);
1728 gen_goto_tb(dc, 0, dc->jmp_pc);
1730 dc->is_jmp = DISAS_TB_JUMP;
1735 if (cs->singlestep_enabled) {
1738 } while (!dc->is_jmp && !dc->cpustate_changed
1739 && !tcg_op_buf_full()
1741 && (dc->pc - page_start < TARGET_PAGE_SIZE)
1742 && num_insns < max_insns);
1745 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1746 if (dc->tb_flags & D_FLAG) {
1747 dc->is_jmp = DISAS_UPDATE;
1748 tcg_gen_movi_i32(cpu_pc, npc);
1754 /* Force an update if the per-tb cpu state has changed. */
1755 if (dc->is_jmp == DISAS_NEXT
1756 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1757 dc->is_jmp = DISAS_UPDATE;
1758 tcg_gen_movi_i32(cpu_pc, npc);
1762 if (unlikely(cs->singlestep_enabled)) {
1763 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1765 if (dc->is_jmp != DISAS_JUMP) {
1766 tcg_gen_movi_i32(cpu_pc, npc);
1768 gen_helper_raise_exception(cpu_env, tmp);
1769 tcg_temp_free_i32(tmp);
1771 switch(dc->is_jmp) {
1773 gen_goto_tb(dc, 1, npc);
1778 /* indicate that the hash table must be used
1779 to find the next TB */
1780 tcg_gen_exit_tb(NULL, 0);
1783 /* nothing more to generate */
1787 gen_tb_end(tb, num_insns);
1789 tb->size = dc->pc - pc_start;
1790 tb->icount = num_insns;
1794 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1795 && qemu_log_in_addr_range(pc_start)) {
1796 FILE *logfile = qemu_log_lock();
1797 qemu_log("--------------\n");
1798 log_target_disas(cs, pc_start, dc->pc - pc_start);
1799 qemu_log_unlock(logfile);
1803 assert(!dc->abort_at_next_insn);
1806 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1808 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1809 CPUMBState *env = &cpu->env;
1816 qemu_fprintf(f, "IN: PC=%x %s\n",
1817 env->pc, lookup_symbol(env->pc));
1818 qemu_fprintf(f, "rmsr=%x resr=%x rear=%" PRIx64 " "
1819 "imm=%x iflags=%x fsr=%x rbtr=%x\n",
1820 env->msr, env->esr, env->ear,
1821 env->imm, env->iflags, env->fsr, env->btr);
1822 qemu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1823 env->btaken, env->btarget,
1824 (env->msr & MSR_UM) ? "user" : "kernel",
1825 (env->msr & MSR_UMS) ? "user" : "kernel",
1826 (bool)(env->msr & MSR_EIP),
1827 (bool)(env->msr & MSR_IE));
1828 for (i = 0; i < 12; i++) {
1829 qemu_fprintf(f, "rpvr%2.2d=%8.8x ", i, env->pvr.regs[i]);
1830 if ((i + 1) % 4 == 0) {
1831 qemu_fprintf(f, "\n");
1835 /* Registers that aren't modeled are reported as 0 */
1836 qemu_fprintf(f, "redr=%x rpid=0 rzpr=0 rtlbx=0 rtlbsx=0 "
1837 "rtlblo=0 rtlbhi=0\n", env->edr);
1838 qemu_fprintf(f, "slr=%x shr=%x\n", env->slr, env->shr);
1839 for (i = 0; i < 32; i++) {
1840 qemu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1841 if ((i + 1) % 4 == 0)
1842 qemu_fprintf(f, "\n");
1844 qemu_fprintf(f, "\n\n");
1847 void mb_tcg_init(void)
1851 cpu_iflags = tcg_global_mem_new_i32(cpu_env,
1852 offsetof(CPUMBState, iflags),
1854 cpu_imm = tcg_global_mem_new_i32(cpu_env,
1855 offsetof(CPUMBState, imm),
1857 cpu_btarget = tcg_global_mem_new_i32(cpu_env,
1858 offsetof(CPUMBState, btarget),
1860 cpu_btaken = tcg_global_mem_new_i32(cpu_env,
1861 offsetof(CPUMBState, btaken),
1863 cpu_res_addr = tcg_global_mem_new(cpu_env,
1864 offsetof(CPUMBState, res_addr),
1866 cpu_res_val = tcg_global_mem_new_i32(cpu_env,
1867 offsetof(CPUMBState, res_val),
1869 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1870 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
1871 offsetof(CPUMBState, regs[i]),
1876 tcg_global_mem_new_i32(cpu_env, offsetof(CPUMBState, pc), "rpc");
1878 tcg_global_mem_new_i32(cpu_env, offsetof(CPUMBState, msr), "rmsr");
1881 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,