2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
32 #include "trace-tcg.h"
39 #if DISAS_MB && !SIM_COMPAT
40 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
42 # define LOG_DIS(...) do { } while (0)
47 #define EXTRACT_FIELD(src, start, end) \
48 (((src) >> start) & ((1 << (end - start + 1)) - 1))
50 /* is_jmp field values */
51 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
52 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
53 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
55 static TCGv_i32 env_debug;
56 static TCGv_i32 cpu_R[32];
57 static TCGv_i32 cpu_SR[14];
58 static TCGv_i32 env_imm;
59 static TCGv_i32 env_btaken;
60 static TCGv_i32 env_btarget;
61 static TCGv_i32 env_iflags;
62 static TCGv env_res_addr;
63 static TCGv_i32 env_res_val;
65 #include "exec/gen-icount.h"
67 /* This is the state at translation time. */
68 typedef struct DisasContext {
79 unsigned int cpustate_changed;
80 unsigned int delayed_branch;
81 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
82 unsigned int clear_imm;
87 #define JMP_DIRECT_CC 2
88 #define JMP_INDIRECT 3
92 int abort_at_next_insn;
94 struct TranslationBlock *tb;
95 int singlestep_enabled;
98 static const char *regnames[] =
100 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
101 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
102 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
103 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
106 static const char *special_regnames[] =
108 "rpc", "rmsr", "sr2", "rear", "sr4", "resr", "sr6", "rfsr",
109 "sr8", "sr9", "sr10", "rbtr", "sr12", "redr"
112 static inline void t_sync_flags(DisasContext *dc)
114 /* Synch the tb dependent flags between translator and runtime. */
115 if (dc->tb_flags != dc->synced_flags) {
116 tcg_gen_movi_i32(env_iflags, dc->tb_flags);
117 dc->synced_flags = dc->tb_flags;
121 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
123 TCGv_i32 tmp = tcg_const_i32(index);
126 tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
127 gen_helper_raise_exception(cpu_env, tmp);
128 tcg_temp_free_i32(tmp);
129 dc->is_jmp = DISAS_UPDATE;
132 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
134 #ifndef CONFIG_USER_ONLY
135 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
141 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
143 if (use_goto_tb(dc, dest)) {
145 tcg_gen_movi_i32(cpu_SR[SR_PC], dest);
146 tcg_gen_exit_tb((uintptr_t)dc->tb + n);
148 tcg_gen_movi_i32(cpu_SR[SR_PC], dest);
153 static void read_carry(DisasContext *dc, TCGv_i32 d)
155 tcg_gen_shri_i32(d, cpu_SR[SR_MSR], 31);
159 * write_carry sets the carry bits in MSR based on bit 0 of v.
160 * v[31:1] are ignored.
162 static void write_carry(DisasContext *dc, TCGv_i32 v)
164 TCGv_i32 t0 = tcg_temp_new_i32();
165 tcg_gen_shli_i32(t0, v, 31);
166 tcg_gen_sari_i32(t0, t0, 31);
167 tcg_gen_andi_i32(t0, t0, (MSR_C | MSR_CC));
168 tcg_gen_andi_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
170 tcg_gen_or_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
171 tcg_temp_free_i32(t0);
174 static void write_carryi(DisasContext *dc, bool carry)
176 TCGv_i32 t0 = tcg_temp_new_i32();
177 tcg_gen_movi_i32(t0, carry);
179 tcg_temp_free_i32(t0);
183 * Returns true if the insn an illegal operation.
184 * If exceptions are enabled, an exception is raised.
186 static bool trap_illegal(DisasContext *dc, bool cond)
188 if (cond && (dc->tb_flags & MSR_EE_FLAG)
189 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
190 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
191 t_gen_raise_exception(dc, EXCP_HW_EXCP);
197 * Returns true if the insn is illegal in userspace.
198 * If exceptions are enabled, an exception is raised.
200 static bool trap_userspace(DisasContext *dc, bool cond)
202 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
203 bool cond_user = cond && mem_index == MMU_USER_IDX;
205 if (cond_user && (dc->tb_flags & MSR_EE_FLAG)) {
206 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
207 t_gen_raise_exception(dc, EXCP_HW_EXCP);
212 /* True if ALU operand b is a small immediate that may deserve
214 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
216 /* Immediate insn without the imm prefix ? */
217 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
220 static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
223 if (dc->tb_flags & IMM_FLAG)
224 tcg_gen_ori_i32(env_imm, env_imm, dc->imm);
226 tcg_gen_movi_i32(env_imm, (int32_t)((int16_t)dc->imm));
229 return &cpu_R[dc->rb];
232 static void dec_add(DisasContext *dc)
240 LOG_DIS("add%s%s%s r%d r%d r%d\n",
241 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
242 dc->rd, dc->ra, dc->rb);
244 /* Take care of the easy cases first. */
246 /* k - keep carry, no need to update MSR. */
247 /* If rd == r0, it's a nop. */
249 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
252 /* c - Add carry into the result. */
253 cf = tcg_temp_new_i32();
256 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
257 tcg_temp_free_i32(cf);
263 /* From now on, we can assume k is zero. So we need to update MSR. */
265 cf = tcg_temp_new_i32();
269 tcg_gen_movi_i32(cf, 0);
273 TCGv_i32 ncf = tcg_temp_new_i32();
274 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
275 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
276 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
277 write_carry(dc, ncf);
278 tcg_temp_free_i32(ncf);
280 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
283 tcg_temp_free_i32(cf);
286 static void dec_sub(DisasContext *dc)
288 unsigned int u, cmp, k, c;
294 cmp = (dc->imm & 1) && (!dc->type_b) && k;
297 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
300 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
302 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
307 LOG_DIS("sub%s%s r%d, r%d r%d\n",
308 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
310 /* Take care of the easy cases first. */
312 /* k - keep carry, no need to update MSR. */
313 /* If rd == r0, it's a nop. */
315 tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
318 /* c - Add carry into the result. */
319 cf = tcg_temp_new_i32();
322 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
323 tcg_temp_free_i32(cf);
329 /* From now on, we can assume k is zero. So we need to update MSR. */
330 /* Extract carry. And complement a into na. */
331 cf = tcg_temp_new_i32();
332 na = tcg_temp_new_i32();
336 tcg_gen_movi_i32(cf, 1);
339 /* d = b + ~a + c. carry defaults to 1. */
340 tcg_gen_not_i32(na, cpu_R[dc->ra]);
343 TCGv_i32 ncf = tcg_temp_new_i32();
344 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
345 tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
346 tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
347 write_carry(dc, ncf);
348 tcg_temp_free_i32(ncf);
350 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
353 tcg_temp_free_i32(cf);
354 tcg_temp_free_i32(na);
357 static void dec_pattern(DisasContext *dc)
361 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
365 mode = dc->opcode & 3;
369 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
371 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
374 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
376 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_R[dc->rd],
377 cpu_R[dc->ra], cpu_R[dc->rb]);
381 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
383 tcg_gen_setcond_i32(TCG_COND_NE, cpu_R[dc->rd],
384 cpu_R[dc->ra], cpu_R[dc->rb]);
388 cpu_abort(CPU(dc->cpu),
389 "unsupported pattern insn opcode=%x\n", dc->opcode);
394 static void dec_and(DisasContext *dc)
398 if (!dc->type_b && (dc->imm & (1 << 10))) {
403 not = dc->opcode & (1 << 1);
404 LOG_DIS("and%s\n", not ? "n" : "");
410 tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
412 tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
415 static void dec_or(DisasContext *dc)
417 if (!dc->type_b && (dc->imm & (1 << 10))) {
422 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
424 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
427 static void dec_xor(DisasContext *dc)
429 if (!dc->type_b && (dc->imm & (1 << 10))) {
434 LOG_DIS("xor r%d\n", dc->rd);
436 tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
439 static inline void msr_read(DisasContext *dc, TCGv_i32 d)
441 tcg_gen_mov_i32(d, cpu_SR[SR_MSR]);
444 static inline void msr_write(DisasContext *dc, TCGv_i32 v)
448 t = tcg_temp_new_i32();
449 dc->cpustate_changed = 1;
450 /* PVR bit is not writable. */
451 tcg_gen_andi_i32(t, v, ~MSR_PVR);
452 tcg_gen_andi_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
453 tcg_gen_or_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t);
457 static void dec_msr(DisasContext *dc)
459 CPUState *cs = CPU(dc->cpu);
461 unsigned int sr, to, rn;
463 sr = dc->imm & ((1 << 14) - 1);
464 to = dc->imm & (1 << 14);
467 dc->cpustate_changed = 1;
469 /* msrclr and msrset. */
470 if (!(dc->imm & (1 << 15))) {
471 unsigned int clr = dc->ir & (1 << 16);
473 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
476 if (!dc->cpu->cfg.use_msr_instr) {
481 if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
486 msr_read(dc, cpu_R[dc->rd]);
488 t0 = tcg_temp_new_i32();
489 t1 = tcg_temp_new_i32();
491 tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
494 tcg_gen_not_i32(t1, t1);
495 tcg_gen_and_i32(t0, t0, t1);
497 tcg_gen_or_i32(t0, t0, t1);
499 tcg_temp_free_i32(t0);
500 tcg_temp_free_i32(t1);
501 tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc + 4);
502 dc->is_jmp = DISAS_UPDATE;
506 if (trap_userspace(dc, to)) {
510 #if !defined(CONFIG_USER_ONLY)
511 /* Catch read/writes to the mmu block. */
512 if ((sr & ~0xff) == 0x1000) {
514 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
516 gen_helper_mmu_write(cpu_env, tcg_const_i32(sr), cpu_R[dc->ra]);
518 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_i32(sr));
524 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
529 msr_write(dc, cpu_R[dc->ra]);
532 tcg_gen_mov_i32(cpu_SR[SR_EAR], cpu_R[dc->ra]);
535 tcg_gen_mov_i32(cpu_SR[SR_ESR], cpu_R[dc->ra]);
538 tcg_gen_andi_i32(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
541 tcg_gen_st_i32(cpu_R[dc->ra],
542 cpu_env, offsetof(CPUMBState, slr));
545 tcg_gen_st_i32(cpu_R[dc->ra],
546 cpu_env, offsetof(CPUMBState, shr));
549 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
553 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
557 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
560 msr_read(dc, cpu_R[dc->rd]);
563 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_EAR]);
566 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_ESR]);
569 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_FSR]);
572 tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_BTR]);
575 tcg_gen_ld_i32(cpu_R[dc->rd],
576 cpu_env, offsetof(CPUMBState, slr));
579 tcg_gen_ld_i32(cpu_R[dc->rd],
580 cpu_env, offsetof(CPUMBState, shr));
596 tcg_gen_ld_i32(cpu_R[dc->rd],
597 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
600 cpu_abort(cs, "unknown mfs reg %x\n", sr);
606 tcg_gen_movi_i32(cpu_R[0], 0);
610 /* Multiplier unit. */
611 static void dec_mul(DisasContext *dc)
614 unsigned int subcode;
616 if (trap_illegal(dc, !dc->cpu->cfg.use_hw_mul)) {
620 subcode = dc->imm & 3;
623 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
624 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
628 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
629 if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
633 tmp = tcg_temp_new_i32();
636 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
637 tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
640 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
641 tcg_gen_muls2_i32(tmp, cpu_R[dc->rd],
642 cpu_R[dc->ra], cpu_R[dc->rb]);
645 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
646 tcg_gen_mulsu2_i32(tmp, cpu_R[dc->rd],
647 cpu_R[dc->ra], cpu_R[dc->rb]);
650 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
651 tcg_gen_mulu2_i32(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
654 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
657 tcg_temp_free_i32(tmp);
661 static void dec_div(DisasContext *dc)
668 if (trap_illegal(dc, !dc->cpu->cfg.use_div)) {
673 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
676 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
679 tcg_gen_movi_i32(cpu_R[dc->rd], 0);
682 static void dec_barrel(DisasContext *dc)
685 unsigned int imm_w, imm_s;
686 bool s, t, e = false, i = false;
688 if (trap_illegal(dc, !dc->cpu->cfg.use_barrel)) {
693 /* Insert and extract are only available in immediate mode. */
694 i = extract32(dc->imm, 15, 1);
695 e = extract32(dc->imm, 14, 1);
697 s = extract32(dc->imm, 10, 1);
698 t = extract32(dc->imm, 9, 1);
699 imm_w = extract32(dc->imm, 6, 5);
700 imm_s = extract32(dc->imm, 0, 5);
702 LOG_DIS("bs%s%s%s r%d r%d r%d\n",
704 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
707 if (imm_w + imm_s > 32 || imm_w == 0) {
708 /* These inputs have an undefined behavior. */
709 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
712 tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
715 int width = imm_w - imm_s + 1;
718 /* These inputs have an undefined behavior. */
719 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
722 tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
726 t0 = tcg_temp_new_i32();
728 tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
729 tcg_gen_andi_i32(t0, t0, 31);
732 tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
735 tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
737 tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
740 tcg_temp_free_i32(t0);
744 static void dec_bit(DisasContext *dc)
746 CPUState *cs = CPU(dc->cpu);
750 op = dc->ir & ((1 << 9) - 1);
754 t0 = tcg_temp_new_i32();
756 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
757 tcg_gen_andi_i32(t0, cpu_SR[SR_MSR], MSR_CC);
758 write_carry(dc, cpu_R[dc->ra]);
760 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
761 tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
763 tcg_temp_free_i32(t0);
769 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
771 /* Update carry. Note that write carry only looks at the LSB. */
772 write_carry(dc, cpu_R[dc->ra]);
775 tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
777 tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
781 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
782 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
785 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
786 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
793 LOG_DIS("wdc r%d\n", dc->ra);
794 trap_userspace(dc, true);
798 LOG_DIS("wic r%d\n", dc->ra);
799 trap_userspace(dc, true);
802 if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
805 if (dc->cpu->cfg.use_pcmp_instr) {
806 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
811 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
812 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
816 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
817 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
820 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
821 dc->pc, op, dc->rd, dc->ra, dc->rb);
826 static inline void sync_jmpstate(DisasContext *dc)
828 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
829 if (dc->jmp == JMP_DIRECT) {
830 tcg_gen_movi_i32(env_btaken, 1);
832 dc->jmp = JMP_INDIRECT;
833 tcg_gen_movi_i32(env_btarget, dc->jmp_pc);
837 static void dec_imm(DisasContext *dc)
839 LOG_DIS("imm %x\n", dc->imm << 16);
840 tcg_gen_movi_i32(env_imm, (dc->imm << 16));
841 dc->tb_flags |= IMM_FLAG;
845 static inline void compute_ldst_addr(DisasContext *dc, TCGv t)
847 bool extimm = dc->tb_flags & IMM_FLAG;
848 /* Should be set to true if r1 is used by loadstores. */
849 bool stackprot = false;
852 /* All load/stores use ra. */
853 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
857 /* Treat the common cases first. */
859 /* If any of the regs is r0, set t to the value of the other reg. */
861 tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
863 } else if (dc->rb == 0) {
864 tcg_gen_extu_i32_tl(t, cpu_R[dc->ra]);
868 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
872 t32 = tcg_temp_new_i32();
873 tcg_gen_add_i32(t32, cpu_R[dc->ra], cpu_R[dc->rb]);
874 tcg_gen_extu_i32_tl(t, t32);
875 tcg_temp_free_i32(t32);
878 gen_helper_stackprot(cpu_env, t);
883 t32 = tcg_temp_new_i32();
886 tcg_gen_mov_i32(t32, cpu_R[dc->ra]);
888 tcg_gen_movi_i32(t32, (int32_t)((int16_t)dc->imm));
889 tcg_gen_add_i32(t32, cpu_R[dc->ra], t32);
892 tcg_gen_add_i32(t32, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
894 tcg_gen_extu_i32_tl(t, t32);
895 tcg_temp_free_i32(t32);
898 gen_helper_stackprot(cpu_env, t);
903 static void dec_load(DisasContext *dc)
908 bool rev = false, ex = false;
911 mop = dc->opcode & 3;
914 rev = extract32(dc->ir, 9, 1);
915 ex = extract32(dc->ir, 10, 1);
922 if (trap_illegal(dc, size > 4)) {
926 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
930 addr = tcg_temp_new();
931 compute_ldst_addr(dc, addr);
934 * When doing reverse accesses we need to do two things.
936 * 1. Reverse the address wrt endianness.
937 * 2. Byteswap the data lanes on the way back into the CPU core.
939 if (rev && size != 4) {
940 /* Endian reverse the address. t is addr. */
948 TCGv low = tcg_temp_new();
950 tcg_gen_andi_tl(low, addr, 3);
951 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
952 tcg_gen_andi_tl(addr, addr, ~3);
953 tcg_gen_or_tl(addr, addr, low);
961 tcg_gen_xori_tl(addr, addr, 2);
964 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
969 /* lwx does not throw unaligned access errors, so force alignment */
971 tcg_gen_andi_tl(addr, addr, ~3);
974 /* If we get a fault on a dslot, the jmpstate better be in sync. */
977 /* Verify alignment if needed. */
979 * Microblaze gives MMU faults priority over faults due to
980 * unaligned addresses. That's why we speculatively do the load
981 * into v. If the load succeeds, we verify alignment of the
982 * address and if that succeeds we write into the destination reg.
984 v = tcg_temp_new_i32();
985 tcg_gen_qemu_ld_i32(v, addr, cpu_mmu_index(&dc->cpu->env, false), mop);
987 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
988 tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
989 gen_helper_memalign(cpu_env, addr, tcg_const_i32(dc->rd),
990 tcg_const_i32(0), tcg_const_i32(size - 1));
994 tcg_gen_mov_tl(env_res_addr, addr);
995 tcg_gen_mov_i32(env_res_val, v);
998 tcg_gen_mov_i32(cpu_R[dc->rd], v);
1000 tcg_temp_free_i32(v);
1003 /* no support for AXI exclusive so always clear C */
1004 write_carryi(dc, 0);
1007 tcg_temp_free(addr);
1010 static void dec_store(DisasContext *dc)
1013 TCGLabel *swx_skip = NULL;
1015 bool rev = false, ex = false;
1018 mop = dc->opcode & 3;
1021 rev = extract32(dc->ir, 9, 1);
1022 ex = extract32(dc->ir, 10, 1);
1029 if (trap_illegal(dc, size > 4)) {
1033 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1036 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1038 /* SWX needs a temp_local. */
1039 addr = ex ? tcg_temp_local_new() : tcg_temp_new();
1040 compute_ldst_addr(dc, addr);
1045 /* swx does not throw unaligned access errors, so force alignment */
1046 tcg_gen_andi_tl(addr, addr, ~3);
1048 write_carryi(dc, 1);
1049 swx_skip = gen_new_label();
1050 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, addr, swx_skip);
1052 /* Compare the value loaded at lwx with current contents of
1053 the reserved location.
1054 FIXME: This only works for system emulation where we can expect
1055 this compare and the following write to be atomic. For user
1056 emulation we need to add atomicity between threads. */
1057 tval = tcg_temp_new_i32();
1058 tcg_gen_qemu_ld_i32(tval, addr, cpu_mmu_index(&dc->cpu->env, false),
1060 tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip);
1061 write_carryi(dc, 0);
1062 tcg_temp_free_i32(tval);
1065 if (rev && size != 4) {
1066 /* Endian reverse the address. t is addr. */
1074 TCGv low = tcg_temp_new();
1076 tcg_gen_andi_tl(low, addr, 3);
1077 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1078 tcg_gen_andi_tl(addr, addr, ~3);
1079 tcg_gen_or_tl(addr, addr, low);
1087 /* Force addr into the temp. */
1088 tcg_gen_xori_tl(addr, addr, 2);
1091 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1095 tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr,
1096 cpu_mmu_index(&dc->cpu->env, false), mop);
1098 /* Verify alignment if needed. */
1099 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1100 tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
1101 /* FIXME: if the alignment is wrong, we should restore the value
1102 * in memory. One possible way to achieve this is to probe
1103 * the MMU prior to the memaccess, thay way we could put
1104 * the alignment checks in between the probe and the mem
1107 gen_helper_memalign(cpu_env, addr, tcg_const_i32(dc->rd),
1108 tcg_const_i32(1), tcg_const_i32(size - 1));
1112 gen_set_label(swx_skip);
1115 tcg_temp_free(addr);
1118 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1119 TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
1123 tcg_gen_setcond_i32(TCG_COND_EQ, d, a, b);
1126 tcg_gen_setcond_i32(TCG_COND_NE, d, a, b);
1129 tcg_gen_setcond_i32(TCG_COND_LT, d, a, b);
1132 tcg_gen_setcond_i32(TCG_COND_LE, d, a, b);
1135 tcg_gen_setcond_i32(TCG_COND_GE, d, a, b);
1138 tcg_gen_setcond_i32(TCG_COND_GT, d, a, b);
1141 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1146 static void eval_cond_jmp(DisasContext *dc, TCGv_i32 pc_true, TCGv_i32 pc_false)
1148 TCGLabel *l1 = gen_new_label();
1149 /* Conditional jmp. */
1150 tcg_gen_mov_i32(cpu_SR[SR_PC], pc_false);
1151 tcg_gen_brcondi_i32(TCG_COND_EQ, env_btaken, 0, l1);
1152 tcg_gen_mov_i32(cpu_SR[SR_PC], pc_true);
1156 static void dec_bcc(DisasContext *dc)
1161 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1162 dslot = dc->ir & (1 << 25);
1163 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1165 dc->delayed_branch = 1;
1167 dc->delayed_branch = 2;
1168 dc->tb_flags |= D_FLAG;
1169 tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1170 cpu_env, offsetof(CPUMBState, bimm));
1173 if (dec_alu_op_b_is_small_imm(dc)) {
1174 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1176 tcg_gen_movi_i32(env_btarget, dc->pc + offset);
1177 dc->jmp = JMP_DIRECT_CC;
1178 dc->jmp_pc = dc->pc + offset;
1180 dc->jmp = JMP_INDIRECT;
1181 tcg_gen_movi_i32(env_btarget, dc->pc);
1182 tcg_gen_add_i32(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1184 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_i32(0));
1187 static void dec_br(DisasContext *dc)
1189 unsigned int dslot, link, abs, mbar;
1191 dslot = dc->ir & (1 << 20);
1192 abs = dc->ir & (1 << 19);
1193 link = dc->ir & (1 << 18);
1195 /* Memory barrier. */
1196 mbar = (dc->ir >> 16) & 31;
1197 if (mbar == 2 && dc->imm == 4) {
1198 /* mbar IMM & 16 decodes to sleep. */
1200 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1201 TCGv_i32 tmp_1 = tcg_const_i32(1);
1206 tcg_gen_st_i32(tmp_1, cpu_env,
1207 -offsetof(MicroBlazeCPU, env)
1208 +offsetof(CPUState, halted));
1209 tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc + 4);
1210 gen_helper_raise_exception(cpu_env, tmp_hlt);
1211 tcg_temp_free_i32(tmp_hlt);
1212 tcg_temp_free_i32(tmp_1);
1215 LOG_DIS("mbar %d\n", dc->rd);
1217 dc->cpustate_changed = 1;
1221 LOG_DIS("br%s%s%s%s imm=%x\n",
1222 abs ? "a" : "", link ? "l" : "",
1223 dc->type_b ? "i" : "", dslot ? "d" : "",
1226 dc->delayed_branch = 1;
1228 dc->delayed_branch = 2;
1229 dc->tb_flags |= D_FLAG;
1230 tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1231 cpu_env, offsetof(CPUMBState, bimm));
1234 tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
1236 dc->jmp = JMP_INDIRECT;
1238 tcg_gen_movi_i32(env_btaken, 1);
1239 tcg_gen_mov_i32(env_btarget, *(dec_alu_op_b(dc)));
1240 if (link && !dslot) {
1241 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1242 t_gen_raise_exception(dc, EXCP_BREAK);
1244 if (trap_userspace(dc, true)) {
1248 t_gen_raise_exception(dc, EXCP_DEBUG);
1252 if (dec_alu_op_b_is_small_imm(dc)) {
1253 dc->jmp = JMP_DIRECT;
1254 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1256 tcg_gen_movi_i32(env_btaken, 1);
1257 tcg_gen_movi_i32(env_btarget, dc->pc);
1258 tcg_gen_add_i32(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1263 static inline void do_rti(DisasContext *dc)
1266 t0 = tcg_temp_new_i32();
1267 t1 = tcg_temp_new_i32();
1268 tcg_gen_shri_i32(t0, cpu_SR[SR_MSR], 1);
1269 tcg_gen_ori_i32(t1, cpu_SR[SR_MSR], MSR_IE);
1270 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1272 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1273 tcg_gen_or_i32(t1, t1, t0);
1275 tcg_temp_free_i32(t1);
1276 tcg_temp_free_i32(t0);
1277 dc->tb_flags &= ~DRTI_FLAG;
1280 static inline void do_rtb(DisasContext *dc)
1283 t0 = tcg_temp_new_i32();
1284 t1 = tcg_temp_new_i32();
1285 tcg_gen_andi_i32(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1286 tcg_gen_shri_i32(t0, t1, 1);
1287 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1289 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1290 tcg_gen_or_i32(t1, t1, t0);
1292 tcg_temp_free_i32(t1);
1293 tcg_temp_free_i32(t0);
1294 dc->tb_flags &= ~DRTB_FLAG;
1297 static inline void do_rte(DisasContext *dc)
1300 t0 = tcg_temp_new_i32();
1301 t1 = tcg_temp_new_i32();
1303 tcg_gen_ori_i32(t1, cpu_SR[SR_MSR], MSR_EE);
1304 tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1305 tcg_gen_shri_i32(t0, t1, 1);
1306 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1308 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1309 tcg_gen_or_i32(t1, t1, t0);
1311 tcg_temp_free_i32(t1);
1312 tcg_temp_free_i32(t0);
1313 dc->tb_flags &= ~DRTE_FLAG;
1316 static void dec_rts(DisasContext *dc)
1318 unsigned int b_bit, i_bit, e_bit;
1320 i_bit = dc->ir & (1 << 21);
1321 b_bit = dc->ir & (1 << 22);
1322 e_bit = dc->ir & (1 << 23);
1324 if (trap_userspace(dc, i_bit || b_bit || e_bit)) {
1328 dc->delayed_branch = 2;
1329 dc->tb_flags |= D_FLAG;
1330 tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1331 cpu_env, offsetof(CPUMBState, bimm));
1334 LOG_DIS("rtid ir=%x\n", dc->ir);
1335 dc->tb_flags |= DRTI_FLAG;
1337 LOG_DIS("rtbd ir=%x\n", dc->ir);
1338 dc->tb_flags |= DRTB_FLAG;
1340 LOG_DIS("rted ir=%x\n", dc->ir);
1341 dc->tb_flags |= DRTE_FLAG;
1343 LOG_DIS("rts ir=%x\n", dc->ir);
1345 dc->jmp = JMP_INDIRECT;
1346 tcg_gen_movi_i32(env_btaken, 1);
1347 tcg_gen_add_i32(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1350 static int dec_check_fpuv2(DisasContext *dc)
1352 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1353 tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_FPU);
1354 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1356 return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
1359 static void dec_fpu(DisasContext *dc)
1361 unsigned int fpu_insn;
1363 if (trap_illegal(dc, !dc->cpu->cfg.use_fpu)) {
1367 fpu_insn = (dc->ir >> 7) & 7;
1371 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1376 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1381 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1386 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1391 switch ((dc->ir >> 4) & 7) {
1393 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1394 cpu_R[dc->ra], cpu_R[dc->rb]);
1397 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1398 cpu_R[dc->ra], cpu_R[dc->rb]);
1401 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1402 cpu_R[dc->ra], cpu_R[dc->rb]);
1405 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1406 cpu_R[dc->ra], cpu_R[dc->rb]);
1409 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1410 cpu_R[dc->ra], cpu_R[dc->rb]);
1413 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1414 cpu_R[dc->ra], cpu_R[dc->rb]);
1417 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1418 cpu_R[dc->ra], cpu_R[dc->rb]);
1421 qemu_log_mask(LOG_UNIMP,
1422 "unimplemented fcmp fpu_insn=%x pc=%x"
1424 fpu_insn, dc->pc, dc->opcode);
1425 dc->abort_at_next_insn = 1;
1431 if (!dec_check_fpuv2(dc)) {
1434 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1438 if (!dec_check_fpuv2(dc)) {
1441 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1445 if (!dec_check_fpuv2(dc)) {
1448 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1452 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1454 fpu_insn, dc->pc, dc->opcode);
1455 dc->abort_at_next_insn = 1;
1460 static void dec_null(DisasContext *dc)
1462 if (trap_illegal(dc, true)) {
1465 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1466 dc->abort_at_next_insn = 1;
1469 /* Insns connected to FSL or AXI stream attached devices. */
1470 static void dec_stream(DisasContext *dc)
1472 TCGv_i32 t_id, t_ctrl;
1475 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1476 dc->type_b ? "" : "d", dc->imm);
1478 if (trap_userspace(dc, true)) {
1482 t_id = tcg_temp_new_i32();
1484 tcg_gen_movi_i32(t_id, dc->imm & 0xf);
1485 ctrl = dc->imm >> 10;
1487 tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
1488 ctrl = dc->imm >> 5;
1491 t_ctrl = tcg_const_i32(ctrl);
1494 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1496 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1498 tcg_temp_free_i32(t_id);
1499 tcg_temp_free_i32(t_ctrl);
1502 static struct decoder_info {
1507 void (*dec)(DisasContext *dc);
1515 {DEC_BARREL, dec_barrel},
1517 {DEC_ST, dec_store},
1526 {DEC_STREAM, dec_stream},
1530 static inline void decode(DisasContext *dc, uint32_t ir)
1535 LOG_DIS("%8.8x\t", dc->ir);
1540 trap_illegal(dc, dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK);
1542 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1544 if (dc->nr_nops > 4) {
1545 cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
1548 /* bit 2 seems to indicate insn type. */
1549 dc->type_b = ir & (1 << 29);
1551 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1552 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1553 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1554 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1555 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1557 /* Large switch for all insns. */
1558 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1559 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1566 /* generate intermediate code for basic block 'tb'. */
1567 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
1569 CPUMBState *env = cs->env_ptr;
1570 MicroBlazeCPU *cpu = mb_env_get_cpu(env);
1572 struct DisasContext ctx;
1573 struct DisasContext *dc = &ctx;
1574 uint32_t page_start, org_flags;
1582 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1584 dc->is_jmp = DISAS_NEXT;
1586 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1587 if (dc->delayed_branch) {
1588 dc->jmp = JMP_INDIRECT;
1591 dc->singlestep_enabled = cs->singlestep_enabled;
1592 dc->cpustate_changed = 0;
1593 dc->abort_at_next_insn = 0;
1597 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1600 page_start = pc_start & TARGET_PAGE_MASK;
1602 max_insns = tb_cflags(tb) & CF_COUNT_MASK;
1603 if (max_insns == 0) {
1604 max_insns = CF_COUNT_MASK;
1606 if (max_insns > TCG_MAX_INSNS) {
1607 max_insns = TCG_MAX_INSNS;
1613 tcg_gen_insn_start(dc->pc);
1617 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1618 tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
1623 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1624 t_gen_raise_exception(dc, EXCP_DEBUG);
1625 dc->is_jmp = DISAS_UPDATE;
1626 /* The address covered by the breakpoint must be included in
1627 [tb->pc, tb->pc + tb->size) in order to for it to be
1628 properly cleared -- thus we increment the PC here so that
1629 the logic setting tb->size below does the right thing. */
1635 LOG_DIS("%8.8x:\t", dc->pc);
1637 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
1642 decode(dc, cpu_ldl_code(env, dc->pc));
1644 dc->tb_flags &= ~IMM_FLAG;
1647 if (dc->delayed_branch) {
1648 dc->delayed_branch--;
1649 if (!dc->delayed_branch) {
1650 if (dc->tb_flags & DRTI_FLAG)
1652 if (dc->tb_flags & DRTB_FLAG)
1654 if (dc->tb_flags & DRTE_FLAG)
1656 /* Clear the delay slot flag. */
1657 dc->tb_flags &= ~D_FLAG;
1658 /* If it is a direct jump, try direct chaining. */
1659 if (dc->jmp == JMP_INDIRECT) {
1660 eval_cond_jmp(dc, env_btarget, tcg_const_i32(dc->pc));
1661 dc->is_jmp = DISAS_JUMP;
1662 } else if (dc->jmp == JMP_DIRECT) {
1664 gen_goto_tb(dc, 0, dc->jmp_pc);
1665 dc->is_jmp = DISAS_TB_JUMP;
1666 } else if (dc->jmp == JMP_DIRECT_CC) {
1667 TCGLabel *l1 = gen_new_label();
1669 /* Conditional jmp. */
1670 tcg_gen_brcondi_i32(TCG_COND_NE, env_btaken, 0, l1);
1671 gen_goto_tb(dc, 1, dc->pc);
1673 gen_goto_tb(dc, 0, dc->jmp_pc);
1675 dc->is_jmp = DISAS_TB_JUMP;
1680 if (cs->singlestep_enabled) {
1683 } while (!dc->is_jmp && !dc->cpustate_changed
1684 && !tcg_op_buf_full()
1686 && (dc->pc - page_start < TARGET_PAGE_SIZE)
1687 && num_insns < max_insns);
1690 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1691 if (dc->tb_flags & D_FLAG) {
1692 dc->is_jmp = DISAS_UPDATE;
1693 tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
1699 if (tb_cflags(tb) & CF_LAST_IO)
1701 /* Force an update if the per-tb cpu state has changed. */
1702 if (dc->is_jmp == DISAS_NEXT
1703 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1704 dc->is_jmp = DISAS_UPDATE;
1705 tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
1709 if (unlikely(cs->singlestep_enabled)) {
1710 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1712 if (dc->is_jmp != DISAS_JUMP) {
1713 tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
1715 gen_helper_raise_exception(cpu_env, tmp);
1716 tcg_temp_free_i32(tmp);
1718 switch(dc->is_jmp) {
1720 gen_goto_tb(dc, 1, npc);
1725 /* indicate that the hash table must be used
1726 to find the next TB */
1730 /* nothing more to generate */
1734 gen_tb_end(tb, num_insns);
1736 tb->size = dc->pc - pc_start;
1737 tb->icount = num_insns;
1741 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1742 && qemu_log_in_addr_range(pc_start)) {
1744 qemu_log("--------------\n");
1745 log_target_disas(cs, pc_start, dc->pc - pc_start);
1750 assert(!dc->abort_at_next_insn);
1753 void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1756 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1757 CPUMBState *env = &cpu->env;
1763 cpu_fprintf(f, "IN: PC=%x %s\n",
1764 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1765 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1766 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1767 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1768 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1769 env->btaken, env->btarget,
1770 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1771 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1772 (env->sregs[SR_MSR] & MSR_EIP),
1773 (env->sregs[SR_MSR] & MSR_IE));
1775 for (i = 0; i < 32; i++) {
1776 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1777 if ((i + 1) % 4 == 0)
1778 cpu_fprintf(f, "\n");
1780 cpu_fprintf(f, "\n\n");
1783 void mb_tcg_init(void)
1787 env_debug = tcg_global_mem_new_i32(cpu_env,
1788 offsetof(CPUMBState, debug),
1790 env_iflags = tcg_global_mem_new_i32(cpu_env,
1791 offsetof(CPUMBState, iflags),
1793 env_imm = tcg_global_mem_new_i32(cpu_env,
1794 offsetof(CPUMBState, imm),
1796 env_btarget = tcg_global_mem_new_i32(cpu_env,
1797 offsetof(CPUMBState, btarget),
1799 env_btaken = tcg_global_mem_new_i32(cpu_env,
1800 offsetof(CPUMBState, btaken),
1802 env_res_addr = tcg_global_mem_new(cpu_env,
1803 offsetof(CPUMBState, res_addr),
1805 env_res_val = tcg_global_mem_new_i32(cpu_env,
1806 offsetof(CPUMBState, res_val),
1808 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1809 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
1810 offsetof(CPUMBState, regs[i]),
1813 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1814 cpu_SR[i] = tcg_global_mem_new_i32(cpu_env,
1815 offsetof(CPUMBState, sregs[i]),
1816 special_regnames[i]);
1820 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1823 env->sregs[SR_PC] = data[0];