2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
32 #include "microblaze-decode.h"
33 #include "qemu-common.h"
41 #if DISAS_MB && !SIM_COMPAT
42 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
44 # define LOG_DIS(...) do { } while (0)
49 #define EXTRACT_FIELD(src, start, end) \
50 (((src) >> start) & ((1 << (end - start + 1)) - 1))
52 static TCGv env_debug;
53 static TCGv_ptr cpu_env;
54 static TCGv cpu_R[32];
55 static TCGv cpu_SR[18];
57 static TCGv env_btaken;
58 static TCGv env_btarget;
59 static TCGv env_iflags;
61 #include "gen-icount.h"
63 /* This is the state at translation time. */
64 typedef struct DisasContext {
67 target_ulong cache_pc;
76 unsigned int cpustate_changed;
77 unsigned int delayed_branch;
78 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
79 unsigned int clear_imm;
84 #define JMP_INDIRECT 2
88 int abort_at_next_insn;
90 struct TranslationBlock *tb;
91 int singlestep_enabled;
94 const static char *regnames[] =
96 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
97 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
98 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
99 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
102 const static char *special_regnames[] =
104 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
105 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
106 "sr16", "sr17", "sr18"
109 /* Sign extend at translation time. */
110 static inline int sign_extend(unsigned int val, unsigned int width)
122 static inline void t_sync_flags(DisasContext *dc)
124 /* Synch the tb dependant flags between translator and runtime. */
125 if (dc->tb_flags != dc->synced_flags) {
126 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
127 dc->synced_flags = dc->tb_flags;
131 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
133 TCGv_i32 tmp = tcg_const_i32(index);
136 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
137 gen_helper_raise_exception(tmp);
138 tcg_temp_free_i32(tmp);
139 dc->is_jmp = DISAS_UPDATE;
142 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
144 TranslationBlock *tb;
146 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
148 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
149 tcg_gen_exit_tb((long)tb + n);
151 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
156 static inline TCGv *dec_alu_op_b(DisasContext *dc)
159 if (dc->tb_flags & IMM_FLAG)
160 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
162 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
165 return &cpu_R[dc->rb];
168 static void dec_add(DisasContext *dc)
175 LOG_DIS("add%s%s%s r%d r%d r%d\n",
176 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
177 dc->rd, dc->ra, dc->rb);
179 if (k && !c && dc->rd)
180 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
182 gen_helper_addkc(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)),
183 tcg_const_tl(k), tcg_const_tl(c));
185 TCGv d = tcg_temp_new();
186 gen_helper_addkc(d, cpu_R[dc->ra], *(dec_alu_op_b(dc)),
187 tcg_const_tl(k), tcg_const_tl(c));
192 static void dec_sub(DisasContext *dc)
194 unsigned int u, cmp, k, c;
199 cmp = (dc->imm & 1) && (!dc->type_b) && k;
202 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
205 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
207 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
210 LOG_DIS("sub%s%s r%d, r%d r%d\n",
211 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
217 gen_helper_subkc(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)),
218 tcg_const_tl(k), tcg_const_tl(c));
220 gen_helper_subkc(t, cpu_R[dc->ra], *(dec_alu_op_b(dc)),
221 tcg_const_tl(k), tcg_const_tl(c));
225 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
229 static void dec_pattern(DisasContext *dc)
234 if ((dc->tb_flags & MSR_EE_FLAG)
235 && !(dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
236 && !((dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
237 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
238 t_gen_raise_exception(dc, EXCP_HW_EXCP);
241 mode = dc->opcode & 3;
245 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
247 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
250 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
252 TCGv t0 = tcg_temp_local_new();
253 l1 = gen_new_label();
254 tcg_gen_movi_tl(t0, 1);
255 tcg_gen_brcond_tl(TCG_COND_EQ,
256 cpu_R[dc->ra], cpu_R[dc->rb], l1);
257 tcg_gen_movi_tl(t0, 0);
259 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
264 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
265 l1 = gen_new_label();
267 TCGv t0 = tcg_temp_local_new();
268 tcg_gen_movi_tl(t0, 1);
269 tcg_gen_brcond_tl(TCG_COND_NE,
270 cpu_R[dc->ra], cpu_R[dc->rb], l1);
271 tcg_gen_movi_tl(t0, 0);
273 tcg_gen_mov_tl(cpu_R[dc->rd], t0);
279 "unsupported pattern insn opcode=%x\n", dc->opcode);
284 static void dec_and(DisasContext *dc)
288 if (!dc->type_b && (dc->imm & (1 << 10))) {
293 not = dc->opcode & (1 << 1);
294 LOG_DIS("and%s\n", not ? "n" : "");
300 TCGv t = tcg_temp_new();
301 tcg_gen_not_tl(t, *(dec_alu_op_b(dc)));
302 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], t);
305 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
308 static void dec_or(DisasContext *dc)
310 if (!dc->type_b && (dc->imm & (1 << 10))) {
315 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
317 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
320 static void dec_xor(DisasContext *dc)
322 if (!dc->type_b && (dc->imm & (1 << 10))) {
327 LOG_DIS("xor r%d\n", dc->rd);
329 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
332 static void read_carry(DisasContext *dc, TCGv d)
334 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
337 static void write_carry(DisasContext *dc, TCGv v)
339 TCGv t0 = tcg_temp_new();
340 tcg_gen_shli_tl(t0, v, 31);
341 tcg_gen_sari_tl(t0, t0, 31);
342 tcg_gen_mov_tl(env_debug, t0);
343 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
344 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
346 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
351 static inline void msr_read(DisasContext *dc, TCGv d)
353 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
356 static inline void msr_write(DisasContext *dc, TCGv v)
358 dc->cpustate_changed = 1;
359 tcg_gen_mov_tl(cpu_SR[SR_MSR], v);
360 /* PVR, we have a processor version register. */
361 tcg_gen_ori_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], (1 << 10));
364 static void dec_msr(DisasContext *dc)
367 unsigned int sr, to, rn;
368 int mem_index = cpu_mmu_index(dc->env);
370 sr = dc->imm & ((1 << 14) - 1);
371 to = dc->imm & (1 << 14);
374 dc->cpustate_changed = 1;
376 /* msrclr and msrset. */
377 if (!(dc->imm & (1 << 15))) {
378 unsigned int clr = dc->ir & (1 << 16);
380 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
383 if (!(dc->env->pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
388 if ((dc->tb_flags & MSR_EE_FLAG)
389 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
390 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
391 t_gen_raise_exception(dc, EXCP_HW_EXCP);
396 msr_read(dc, cpu_R[dc->rd]);
401 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
404 tcg_gen_not_tl(t1, t1);
405 tcg_gen_and_tl(t0, t0, t1);
407 tcg_gen_or_tl(t0, t0, t1);
411 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
412 dc->is_jmp = DISAS_UPDATE;
417 if ((dc->tb_flags & MSR_EE_FLAG)
418 && mem_index == MMU_USER_IDX) {
419 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
420 t_gen_raise_exception(dc, EXCP_HW_EXCP);
425 #if !defined(CONFIG_USER_ONLY)
426 /* Catch read/writes to the mmu block. */
427 if ((sr & ~0xff) == 0x1000) {
429 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
431 gen_helper_mmu_write(tcg_const_tl(sr), cpu_R[dc->ra]);
433 gen_helper_mmu_read(cpu_R[dc->rd], tcg_const_tl(sr));
439 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
444 msr_write(dc, cpu_R[dc->ra]);
447 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
450 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
453 /* Ignored at the moment. */
456 cpu_abort(dc->env, "unknown mts reg %x\n", sr);
460 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
464 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
467 msr_read(dc, cpu_R[dc->rd]);
470 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
473 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
476 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
479 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
495 tcg_gen_ld_tl(cpu_R[dc->rd],
496 cpu_env, offsetof(CPUState, pvr.regs[rn]));
499 cpu_abort(dc->env, "unknown mfs reg %x\n", sr);
505 tcg_gen_movi_tl(cpu_R[0], 0);
509 /* 64-bit signed mul, lower result in d and upper in d2. */
510 static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
514 t0 = tcg_temp_new_i64();
515 t1 = tcg_temp_new_i64();
517 tcg_gen_ext_i32_i64(t0, a);
518 tcg_gen_ext_i32_i64(t1, b);
519 tcg_gen_mul_i64(t0, t0, t1);
521 tcg_gen_trunc_i64_i32(d, t0);
522 tcg_gen_shri_i64(t0, t0, 32);
523 tcg_gen_trunc_i64_i32(d2, t0);
525 tcg_temp_free_i64(t0);
526 tcg_temp_free_i64(t1);
529 /* 64-bit unsigned muls, lower result in d and upper in d2. */
530 static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
534 t0 = tcg_temp_new_i64();
535 t1 = tcg_temp_new_i64();
537 tcg_gen_extu_i32_i64(t0, a);
538 tcg_gen_extu_i32_i64(t1, b);
539 tcg_gen_mul_i64(t0, t0, t1);
541 tcg_gen_trunc_i64_i32(d, t0);
542 tcg_gen_shri_i64(t0, t0, 32);
543 tcg_gen_trunc_i64_i32(d2, t0);
545 tcg_temp_free_i64(t0);
546 tcg_temp_free_i64(t1);
549 /* Multiplier unit. */
550 static void dec_mul(DisasContext *dc)
553 unsigned int subcode;
555 if ((dc->tb_flags & MSR_EE_FLAG)
556 && !(dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
557 && !(dc->env->pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
558 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
559 t_gen_raise_exception(dc, EXCP_HW_EXCP);
563 subcode = dc->imm & 3;
564 d[0] = tcg_temp_new();
565 d[1] = tcg_temp_new();
568 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
569 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
573 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
574 if (subcode >= 1 && subcode <= 3
575 && !((dc->env->pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
581 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
582 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]);
585 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
586 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
589 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
590 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
593 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
594 t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
597 cpu_abort(dc->env, "unknown MUL insn %x\n", subcode);
606 static void dec_div(DisasContext *dc)
613 if (!(dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
614 && !((dc->env->pvr.regs[0] & PVR0_USE_DIV_MASK))) {
615 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
616 t_gen_raise_exception(dc, EXCP_HW_EXCP);
620 gen_helper_divu(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
622 gen_helper_divs(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
624 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
627 static void dec_barrel(DisasContext *dc)
632 if ((dc->tb_flags & MSR_EE_FLAG)
633 && !(dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
634 && !(dc->env->pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
635 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
636 t_gen_raise_exception(dc, EXCP_HW_EXCP);
640 s = dc->imm & (1 << 10);
641 t = dc->imm & (1 << 9);
643 LOG_DIS("bs%s%s r%d r%d r%d\n",
644 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
648 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
649 tcg_gen_andi_tl(t0, t0, 31);
652 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
655 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
657 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
661 static void dec_bit(DisasContext *dc)
665 int mem_index = cpu_mmu_index(dc->env);
667 op = dc->ir & ((1 << 8) - 1);
673 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
674 tcg_gen_andi_tl(t0, cpu_R[dc->ra], 1);
678 tcg_gen_shli_tl(t1, t1, 31);
680 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
681 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t1);
694 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
697 tcg_gen_andi_tl(t0, cpu_R[dc->ra], 1);
702 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
704 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
708 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
709 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
712 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
713 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
717 LOG_DIS("wdc r%d\n", dc->ra);
718 if ((dc->tb_flags & MSR_EE_FLAG)
719 && mem_index == MMU_USER_IDX) {
720 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
721 t_gen_raise_exception(dc, EXCP_HW_EXCP);
727 LOG_DIS("wic r%d\n", dc->ra);
728 if ((dc->tb_flags & MSR_EE_FLAG)
729 && mem_index == MMU_USER_IDX) {
730 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
731 t_gen_raise_exception(dc, EXCP_HW_EXCP);
736 cpu_abort(dc->env, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
737 dc->pc, op, dc->rd, dc->ra, dc->rb);
742 static inline void sync_jmpstate(DisasContext *dc)
744 if (dc->jmp == JMP_DIRECT) {
745 dc->jmp = JMP_INDIRECT;
746 tcg_gen_movi_tl(env_btaken, 1);
747 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
751 static void dec_imm(DisasContext *dc)
753 LOG_DIS("imm %x\n", dc->imm << 16);
754 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
755 dc->tb_flags |= IMM_FLAG;
759 static inline void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
762 int mem_index = cpu_mmu_index(dc->env);
765 tcg_gen_qemu_ld8u(dst, addr, mem_index);
766 } else if (size == 2) {
767 tcg_gen_qemu_ld16u(dst, addr, mem_index);
768 } else if (size == 4) {
769 tcg_gen_qemu_ld32u(dst, addr, mem_index);
771 cpu_abort(dc->env, "Incorrect load size %d\n", size);
774 static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
776 unsigned int extimm = dc->tb_flags & IMM_FLAG;
778 /* Treat the fast cases first. */
781 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
787 return &cpu_R[dc->ra];
790 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
791 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
794 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
800 static void dec_load(DisasContext *dc)
805 size = 1 << (dc->opcode & 3);
807 LOG_DIS("l %x %d\n", dc->opcode, size);
809 addr = compute_ldst_addr(dc, &t);
811 /* If we get a fault on a dslot, the jmpstate better be in sync. */
814 /* Verify alignment if needed. */
815 if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
816 gen_helper_memalign(*addr, tcg_const_tl(dc->rd),
817 tcg_const_tl(0), tcg_const_tl(size));
821 gen_load(dc, cpu_R[dc->rd], *addr, size);
823 gen_load(dc, env_imm, *addr, size);
830 static void gen_store(DisasContext *dc, TCGv addr, TCGv val,
833 int mem_index = cpu_mmu_index(dc->env);
836 tcg_gen_qemu_st8(val, addr, mem_index);
837 else if (size == 2) {
838 tcg_gen_qemu_st16(val, addr, mem_index);
839 } else if (size == 4) {
840 tcg_gen_qemu_st32(val, addr, mem_index);
842 cpu_abort(dc->env, "Incorrect store size %d\n", size);
845 static void dec_store(DisasContext *dc)
850 size = 1 << (dc->opcode & 3);
852 LOG_DIS("s%d%s\n", size, dc->type_b ? "i" : "");
854 /* If we get a fault on a dslot, the jmpstate better be in sync. */
856 addr = compute_ldst_addr(dc, &t);
858 /* Verify alignment if needed. */
859 if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
860 gen_helper_memalign(*addr, tcg_const_tl(dc->rd),
861 tcg_const_tl(1), tcg_const_tl(size));
864 gen_store(dc, *addr, cpu_R[dc->rd], size);
869 static inline void eval_cc(DisasContext *dc, unsigned int cc,
870 TCGv d, TCGv a, TCGv b)
876 l1 = gen_new_label();
877 tcg_gen_movi_tl(env_btaken, 1);
878 tcg_gen_brcond_tl(TCG_COND_EQ, a, b, l1);
879 tcg_gen_movi_tl(env_btaken, 0);
883 l1 = gen_new_label();
884 tcg_gen_movi_tl(env_btaken, 1);
885 tcg_gen_brcond_tl(TCG_COND_NE, a, b, l1);
886 tcg_gen_movi_tl(env_btaken, 0);
890 l1 = gen_new_label();
891 tcg_gen_movi_tl(env_btaken, 1);
892 tcg_gen_brcond_tl(TCG_COND_LT, a, b, l1);
893 tcg_gen_movi_tl(env_btaken, 0);
897 l1 = gen_new_label();
898 tcg_gen_movi_tl(env_btaken, 1);
899 tcg_gen_brcond_tl(TCG_COND_LE, a, b, l1);
900 tcg_gen_movi_tl(env_btaken, 0);
904 l1 = gen_new_label();
905 tcg_gen_movi_tl(env_btaken, 1);
906 tcg_gen_brcond_tl(TCG_COND_GE, a, b, l1);
907 tcg_gen_movi_tl(env_btaken, 0);
911 l1 = gen_new_label();
912 tcg_gen_movi_tl(env_btaken, 1);
913 tcg_gen_brcond_tl(TCG_COND_GT, a, b, l1);
914 tcg_gen_movi_tl(env_btaken, 0);
918 cpu_abort(dc->env, "Unknown condition code %x.\n", cc);
923 static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
927 l1 = gen_new_label();
928 /* Conditional jmp. */
929 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
930 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
931 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
935 static void dec_bcc(DisasContext *dc)
940 cc = EXTRACT_FIELD(dc->ir, 21, 23);
941 dslot = dc->ir & (1 << 25);
942 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
944 dc->delayed_branch = 1;
946 dc->delayed_branch = 2;
947 dc->tb_flags |= D_FLAG;
948 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
949 cpu_env, offsetof(CPUState, bimm));
952 tcg_gen_movi_tl(env_btarget, dc->pc);
953 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
954 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
955 dc->jmp = JMP_INDIRECT;
958 static void dec_br(DisasContext *dc)
960 unsigned int dslot, link, abs;
962 dslot = dc->ir & (1 << 20);
963 abs = dc->ir & (1 << 19);
964 link = dc->ir & (1 << 18);
965 LOG_DIS("br%s%s%s%s imm=%x\n",
966 abs ? "a" : "", link ? "l" : "",
967 dc->type_b ? "i" : "", dslot ? "d" : "",
970 dc->delayed_branch = 1;
972 dc->delayed_branch = 2;
973 dc->tb_flags |= D_FLAG;
974 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
975 cpu_env, offsetof(CPUState, bimm));
978 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
980 dc->jmp = JMP_INDIRECT;
982 tcg_gen_movi_tl(env_btaken, 1);
983 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
984 if (link && !(dc->tb_flags & IMM_FLAG)
985 && (dc->imm == 8 || dc->imm == 0x18))
986 t_gen_raise_exception(dc, EXCP_BREAK);
988 t_gen_raise_exception(dc, EXCP_DEBUG);
990 if (dc->tb_flags & IMM_FLAG) {
991 tcg_gen_movi_tl(env_btaken, 1);
992 tcg_gen_movi_tl(env_btarget, dc->pc);
993 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
995 dc->jmp = JMP_DIRECT;
996 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1001 static inline void do_rti(DisasContext *dc)
1004 t0 = tcg_temp_new();
1005 t1 = tcg_temp_new();
1006 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1007 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1008 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1010 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1011 tcg_gen_or_tl(t1, t1, t0);
1015 dc->tb_flags &= ~DRTI_FLAG;
1018 static inline void do_rtb(DisasContext *dc)
1021 t0 = tcg_temp_new();
1022 t1 = tcg_temp_new();
1023 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1024 tcg_gen_shri_tl(t0, t1, 1);
1025 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1027 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1028 tcg_gen_or_tl(t1, t1, t0);
1032 dc->tb_flags &= ~DRTB_FLAG;
1035 static inline void do_rte(DisasContext *dc)
1038 t0 = tcg_temp_new();
1039 t1 = tcg_temp_new();
1041 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1042 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1043 tcg_gen_shri_tl(t0, t1, 1);
1044 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1046 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1047 tcg_gen_or_tl(t1, t1, t0);
1051 dc->tb_flags &= ~DRTE_FLAG;
1054 static void dec_rts(DisasContext *dc)
1056 unsigned int b_bit, i_bit, e_bit;
1057 int mem_index = cpu_mmu_index(dc->env);
1059 i_bit = dc->ir & (1 << 21);
1060 b_bit = dc->ir & (1 << 22);
1061 e_bit = dc->ir & (1 << 23);
1063 dc->delayed_branch = 2;
1064 dc->tb_flags |= D_FLAG;
1065 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1066 cpu_env, offsetof(CPUState, bimm));
1069 LOG_DIS("rtid ir=%x\n", dc->ir);
1070 if ((dc->tb_flags & MSR_EE_FLAG)
1071 && mem_index == MMU_USER_IDX) {
1072 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1073 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1075 dc->tb_flags |= DRTI_FLAG;
1077 LOG_DIS("rtbd ir=%x\n", dc->ir);
1078 if ((dc->tb_flags & MSR_EE_FLAG)
1079 && mem_index == MMU_USER_IDX) {
1080 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1081 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1083 dc->tb_flags |= DRTB_FLAG;
1085 LOG_DIS("rted ir=%x\n", dc->ir);
1086 if ((dc->tb_flags & MSR_EE_FLAG)
1087 && mem_index == MMU_USER_IDX) {
1088 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1089 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1091 dc->tb_flags |= DRTE_FLAG;
1093 LOG_DIS("rts ir=%x\n", dc->ir);
1095 tcg_gen_movi_tl(env_btaken, 1);
1096 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1099 static void dec_fpu(DisasContext *dc)
1101 if ((dc->tb_flags & MSR_EE_FLAG)
1102 && !(dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1103 && !((dc->env->pvr.regs[2] & PVR2_USE_FPU_MASK))) {
1104 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1105 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1109 qemu_log ("unimplemented FPU insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1110 dc->abort_at_next_insn = 1;
1113 static void dec_null(DisasContext *dc)
1115 qemu_log ("unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1116 dc->abort_at_next_insn = 1;
1119 static struct decoder_info {
1124 void (*dec)(DisasContext *dc);
1132 {DEC_BARREL, dec_barrel},
1134 {DEC_ST, dec_store},
1146 static inline void decode(DisasContext *dc)
1151 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
1152 tcg_gen_debug_insn_start(dc->pc);
1154 dc->ir = ir = ldl_code(dc->pc);
1155 LOG_DIS("%8.8x\t", dc->ir);
1160 if ((dc->tb_flags & MSR_EE_FLAG)
1161 && !(dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1162 && !(dc->env->pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1163 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1164 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1168 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1170 if (dc->nr_nops > 4)
1171 cpu_abort(dc->env, "fetching nop sequence\n");
1173 /* bit 2 seems to indicate insn type. */
1174 dc->type_b = ir & (1 << 29);
1176 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1177 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1178 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1179 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1180 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1182 /* Large switch for all insns. */
1183 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1184 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1191 static void check_breakpoint(CPUState *env, DisasContext *dc)
1195 if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
1196 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1197 if (bp->pc == dc->pc) {
1198 t_gen_raise_exception(dc, EXCP_DEBUG);
1199 dc->is_jmp = DISAS_UPDATE;
1205 /* generate intermediate code for basic block 'tb'. */
1207 gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
1210 uint16_t *gen_opc_end;
1213 struct DisasContext ctx;
1214 struct DisasContext *dc = &ctx;
1215 uint32_t next_page_start, org_flags;
1220 qemu_log_try_set_file(stderr);
1225 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1227 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1229 dc->is_jmp = DISAS_NEXT;
1231 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1235 dc->singlestep_enabled = env->singlestep_enabled;
1236 dc->cpustate_changed = 0;
1237 dc->abort_at_next_insn = 0;
1241 cpu_abort(env, "Microblaze: unaligned PC=%x\n", pc_start);
1243 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1245 qemu_log("--------------\n");
1246 log_cpu_state(env, 0);
1250 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1253 max_insns = tb->cflags & CF_COUNT_MASK;
1255 max_insns = CF_COUNT_MASK;
1261 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1262 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1266 check_breakpoint(env, dc);
1269 j = gen_opc_ptr - gen_opc_buf;
1273 gen_opc_instr_start[lj++] = 0;
1275 gen_opc_pc[lj] = dc->pc;
1276 gen_opc_instr_start[lj] = 1;
1277 gen_opc_icount[lj] = num_insns;
1281 LOG_DIS("%8.8x:\t", dc->pc);
1283 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1289 dc->tb_flags &= ~IMM_FLAG;
1294 if (dc->delayed_branch) {
1295 dc->delayed_branch--;
1296 if (!dc->delayed_branch) {
1297 if (dc->tb_flags & DRTI_FLAG)
1299 if (dc->tb_flags & DRTB_FLAG)
1301 if (dc->tb_flags & DRTE_FLAG)
1303 /* Clear the delay slot flag. */
1304 dc->tb_flags &= ~D_FLAG;
1305 /* If it is a direct jump, try direct chaining. */
1306 if (dc->jmp != JMP_DIRECT) {
1307 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1308 dc->is_jmp = DISAS_JUMP;
1313 if (env->singlestep_enabled)
1315 } while (!dc->is_jmp && !dc->cpustate_changed
1316 && gen_opc_ptr < gen_opc_end
1318 && (dc->pc < next_page_start)
1319 && num_insns < max_insns);
1322 if (dc->jmp == JMP_DIRECT) {
1323 if (dc->tb_flags & D_FLAG) {
1324 dc->is_jmp = DISAS_UPDATE;
1325 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1331 if (tb->cflags & CF_LAST_IO)
1333 /* Force an update if the per-tb cpu state has changed. */
1334 if (dc->is_jmp == DISAS_NEXT
1335 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1336 dc->is_jmp = DISAS_UPDATE;
1337 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1341 if (unlikely(env->singlestep_enabled)) {
1342 t_gen_raise_exception(dc, EXCP_DEBUG);
1343 if (dc->is_jmp == DISAS_NEXT)
1344 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1346 switch(dc->is_jmp) {
1348 gen_goto_tb(dc, 1, npc);
1353 /* indicate that the hash table must be used
1354 to find the next TB */
1358 /* nothing more to generate */
1362 gen_icount_end(tb, num_insns);
1363 *gen_opc_ptr = INDEX_op_end;
1365 j = gen_opc_ptr - gen_opc_buf;
1368 gen_opc_instr_start[lj++] = 0;
1370 tb->size = dc->pc - pc_start;
1371 tb->icount = num_insns;
1376 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1379 log_target_disas(pc_start, dc->pc - pc_start, 0);
1381 qemu_log("\nisize=%d osize=%zd\n",
1382 dc->pc - pc_start, gen_opc_ptr - gen_opc_buf);
1386 assert(!dc->abort_at_next_insn);
1389 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
1391 gen_intermediate_code_internal(env, tb, 0);
1394 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
1396 gen_intermediate_code_internal(env, tb, 1);
1399 void cpu_dump_state (CPUState *env, FILE *f,
1400 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
1408 cpu_fprintf(f, "IN: PC=%x %s\n",
1409 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1410 cpu_fprintf(f, "rmsr=%x resr=%x debug[%x] imm=%x iflags=%x\n",
1411 env->sregs[SR_MSR], env->sregs[SR_ESR],
1412 env->debug, env->imm, env->iflags);
1413 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s)\n",
1414 env->btaken, env->btarget,
1415 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1416 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel");
1417 for (i = 0; i < 32; i++) {
1418 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1419 if ((i + 1) % 4 == 0)
1420 cpu_fprintf(f, "\n");
1422 cpu_fprintf(f, "\n\n");
1425 CPUState *cpu_mb_init (const char *cpu_model)
1428 static int tcg_initialized = 0;
1431 env = qemu_mallocz(sizeof(CPUState));
1436 env->pvr.regs[0] = PVR0_PVR_FULL_MASK \
1437 | PVR0_USE_BARREL_MASK \
1438 | PVR0_USE_DIV_MASK \
1439 | PVR0_USE_HW_MUL_MASK \
1440 | PVR0_USE_EXC_MASK \
1441 | PVR0_USE_ICACHE_MASK \
1442 | PVR0_USE_DCACHE_MASK \
1445 env->pvr.regs[2] = PVR2_D_OPB_MASK \
1449 | PVR2_USE_MSR_INSTR \
1450 | PVR2_USE_PCMP_INSTR \
1451 | PVR2_USE_BARREL_MASK \
1452 | PVR2_USE_DIV_MASK \
1453 | PVR2_USE_HW_MUL_MASK \
1454 | PVR2_USE_MUL64_MASK \
1456 env->pvr.regs[10] = 0x0c000000; /* Default to spartan 3a dsp family. */
1457 env->pvr.regs[11] = PVR11_USE_MMU | (16 << 17);
1458 #if !defined(CONFIG_USER_ONLY)
1460 env->mmu.c_mmu_tlb_access = 3;
1461 env->mmu.c_mmu_zones = 16;
1464 if (tcg_initialized)
1467 tcg_initialized = 1;
1469 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1471 env_debug = tcg_global_mem_new(TCG_AREG0,
1472 offsetof(CPUState, debug),
1474 env_iflags = tcg_global_mem_new(TCG_AREG0,
1475 offsetof(CPUState, iflags),
1477 env_imm = tcg_global_mem_new(TCG_AREG0,
1478 offsetof(CPUState, imm),
1480 env_btarget = tcg_global_mem_new(TCG_AREG0,
1481 offsetof(CPUState, btarget),
1483 env_btaken = tcg_global_mem_new(TCG_AREG0,
1484 offsetof(CPUState, btaken),
1486 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1487 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
1488 offsetof(CPUState, regs[i]),
1491 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1492 cpu_SR[i] = tcg_global_mem_new(TCG_AREG0,
1493 offsetof(CPUState, sregs[i]),
1494 special_regnames[i]);
1496 #define GEN_HELPER 2
1502 void cpu_reset (CPUState *env)
1504 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
1505 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
1506 log_cpu_state(env, 0);
1509 memset(env, 0, offsetof(CPUMBState, breakpoints));
1512 env->sregs[SR_MSR] = 0;
1513 #if defined(CONFIG_USER_ONLY)
1514 /* start in user mode with interrupts enabled. */
1515 env->pvr.regs[10] = 0x0c000000; /* Spartan 3a dsp. */
1517 mmu_init(&env->mmu);
1521 void gen_pc_load(CPUState *env, struct TranslationBlock *tb,
1522 unsigned long searched_pc, int pc_pos, void *puc)
1524 env->sregs[SR_PC] = gen_opc_pc[pc_pos];