2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "qemu/qemu-print.h"
33 #include "trace-tcg.h"
36 #define EXTRACT_FIELD(src, start, end) \
37 (((src) >> start) & ((1 << (end - start + 1)) - 1))
39 /* is_jmp field values */
40 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
41 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
43 static TCGv_i32 cpu_R[32];
44 static TCGv_i32 cpu_pc;
45 static TCGv_i32 cpu_msr;
46 static TCGv_i32 cpu_msr_c;
47 static TCGv_i32 cpu_imm;
48 static TCGv_i32 cpu_btaken;
49 static TCGv_i32 cpu_btarget;
50 static TCGv_i32 cpu_iflags;
51 static TCGv cpu_res_addr;
52 static TCGv_i32 cpu_res_val;
54 #include "exec/gen-icount.h"
56 /* This is the state at translation time. */
57 typedef struct DisasContext {
58 DisasContextBase base;
72 unsigned int cpustate_changed;
73 unsigned int delayed_branch;
74 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
75 unsigned int clear_imm;
80 #define JMP_DIRECT_CC 2
81 #define JMP_INDIRECT 3
85 int abort_at_next_insn;
88 static int typeb_imm(DisasContext *dc, int x)
90 if (dc->tb_flags & IMM_FLAG) {
91 return deposit32(dc->ext_imm, 0, 16, x);
96 /* Include the auto-generated decoder. */
97 #include "decode-insns.c.inc"
99 static inline void t_sync_flags(DisasContext *dc)
101 /* Synch the tb dependent flags between translator and runtime. */
102 if (dc->tb_flags != dc->synced_flags) {
103 tcg_gen_movi_i32(cpu_iflags, dc->tb_flags);
104 dc->synced_flags = dc->tb_flags;
108 static inline void sync_jmpstate(DisasContext *dc)
110 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
111 if (dc->jmp == JMP_DIRECT) {
112 tcg_gen_movi_i32(cpu_btaken, 1);
114 dc->jmp = JMP_INDIRECT;
115 tcg_gen_movi_i32(cpu_btarget, dc->jmp_pc);
119 static void gen_raise_exception(DisasContext *dc, uint32_t index)
121 TCGv_i32 tmp = tcg_const_i32(index);
123 gen_helper_raise_exception(cpu_env, tmp);
124 tcg_temp_free_i32(tmp);
125 dc->base.is_jmp = DISAS_NORETURN;
128 static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
131 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
132 gen_raise_exception(dc, index);
135 static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
137 TCGv_i32 tmp = tcg_const_i32(esr_ec);
138 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, esr));
139 tcg_temp_free_i32(tmp);
141 gen_raise_exception_sync(dc, EXCP_HW_EXCP);
144 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
146 #ifndef CONFIG_USER_ONLY
147 return (dc->base.pc_first & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
153 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
155 if (dc->base.singlestep_enabled) {
156 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
157 tcg_gen_movi_i32(cpu_pc, dest);
158 gen_helper_raise_exception(cpu_env, tmp);
159 tcg_temp_free_i32(tmp);
160 } else if (use_goto_tb(dc, dest)) {
162 tcg_gen_movi_i32(cpu_pc, dest);
163 tcg_gen_exit_tb(dc->base.tb, n);
165 tcg_gen_movi_i32(cpu_pc, dest);
166 tcg_gen_exit_tb(NULL, 0);
168 dc->base.is_jmp = DISAS_NORETURN;
172 * Returns true if the insn an illegal operation.
173 * If exceptions are enabled, an exception is raised.
175 static bool trap_illegal(DisasContext *dc, bool cond)
177 if (cond && (dc->tb_flags & MSR_EE)
178 && dc->cpu->cfg.illegal_opcode_exception) {
179 gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
185 * Returns true if the insn is illegal in userspace.
186 * If exceptions are enabled, an exception is raised.
188 static bool trap_userspace(DisasContext *dc, bool cond)
190 bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
192 if (cond_user && (dc->tb_flags & MSR_EE)) {
193 gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
198 static int32_t dec_alu_typeb_imm(DisasContext *dc)
200 tcg_debug_assert(dc->type_b);
201 return typeb_imm(dc, (int16_t)dc->imm);
204 static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
207 tcg_gen_movi_i32(cpu_imm, dec_alu_typeb_imm(dc));
210 return &cpu_R[dc->rb];
213 static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
215 if (likely(reg != 0)) {
219 if (dc->r0 == NULL) {
220 dc->r0 = tcg_temp_new_i32();
222 tcg_gen_movi_i32(dc->r0, 0);
228 static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
230 if (likely(reg != 0)) {
233 if (dc->r0 == NULL) {
234 dc->r0 = tcg_temp_new_i32();
239 static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
240 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
244 if (arg->rd == 0 && !side_effects) {
248 rd = reg_for_write(dc, arg->rd);
249 ra = reg_for_read(dc, arg->ra);
250 rb = reg_for_read(dc, arg->rb);
255 static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
256 void (*fn)(TCGv_i32, TCGv_i32))
260 if (arg->rd == 0 && !side_effects) {
264 rd = reg_for_write(dc, arg->rd);
265 ra = reg_for_read(dc, arg->ra);
270 static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
271 void (*fni)(TCGv_i32, TCGv_i32, int32_t))
275 if (arg->rd == 0 && !side_effects) {
279 rd = reg_for_write(dc, arg->rd);
280 ra = reg_for_read(dc, arg->ra);
281 fni(rd, ra, arg->imm);
285 static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
286 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
288 TCGv_i32 rd, ra, imm;
290 if (arg->rd == 0 && !side_effects) {
294 rd = reg_for_write(dc, arg->rd);
295 ra = reg_for_read(dc, arg->ra);
296 imm = tcg_const_i32(arg->imm);
300 tcg_temp_free_i32(imm);
304 #define DO_TYPEA(NAME, SE, FN) \
305 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
306 { return do_typea(dc, a, SE, FN); }
308 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
309 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
310 { return dc->cpu->cfg.CFG && do_typea(dc, a, SE, FN); }
312 #define DO_TYPEA0(NAME, SE, FN) \
313 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
314 { return do_typea0(dc, a, SE, FN); }
316 #define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
317 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
318 { return dc->cpu->cfg.CFG && do_typea0(dc, a, SE, FN); }
320 #define DO_TYPEBI(NAME, SE, FNI) \
321 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
322 { return do_typeb_imm(dc, a, SE, FNI); }
324 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
325 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
326 { return dc->cpu->cfg.CFG && do_typeb_imm(dc, a, SE, FNI); }
328 #define DO_TYPEBV(NAME, SE, FN) \
329 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
330 { return do_typeb_val(dc, a, SE, FN); }
332 #define ENV_WRAPPER2(NAME, HELPER) \
333 static void NAME(TCGv_i32 out, TCGv_i32 ina) \
334 { HELPER(out, cpu_env, ina); }
336 #define ENV_WRAPPER3(NAME, HELPER) \
337 static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
338 { HELPER(out, cpu_env, ina, inb); }
340 /* No input carry, but output carry. */
341 static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
343 TCGv_i32 zero = tcg_const_i32(0);
345 tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
347 tcg_temp_free_i32(zero);
350 /* Input and output carry. */
351 static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
353 TCGv_i32 zero = tcg_const_i32(0);
354 TCGv_i32 tmp = tcg_temp_new_i32();
356 tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
357 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
359 tcg_temp_free_i32(tmp);
360 tcg_temp_free_i32(zero);
363 /* Input carry, but no output carry. */
364 static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
366 tcg_gen_add_i32(out, ina, inb);
367 tcg_gen_add_i32(out, out, cpu_msr_c);
370 DO_TYPEA(add, true, gen_add)
371 DO_TYPEA(addc, true, gen_addc)
372 DO_TYPEA(addk, false, tcg_gen_add_i32)
373 DO_TYPEA(addkc, true, gen_addkc)
375 DO_TYPEBV(addi, true, gen_add)
376 DO_TYPEBV(addic, true, gen_addc)
377 DO_TYPEBI(addik, false, tcg_gen_addi_i32)
378 DO_TYPEBV(addikc, true, gen_addkc)
380 static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
382 tcg_gen_andi_i32(out, ina, ~imm);
385 DO_TYPEA(and, false, tcg_gen_and_i32)
386 DO_TYPEBI(andi, false, tcg_gen_andi_i32)
387 DO_TYPEA(andn, false, tcg_gen_andc_i32)
388 DO_TYPEBI(andni, false, gen_andni)
390 static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
392 TCGv_i32 tmp = tcg_temp_new_i32();
393 tcg_gen_andi_i32(tmp, inb, 31);
394 tcg_gen_sar_i32(out, ina, tmp);
395 tcg_temp_free_i32(tmp);
398 static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
400 TCGv_i32 tmp = tcg_temp_new_i32();
401 tcg_gen_andi_i32(tmp, inb, 31);
402 tcg_gen_shr_i32(out, ina, tmp);
403 tcg_temp_free_i32(tmp);
406 static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
408 TCGv_i32 tmp = tcg_temp_new_i32();
409 tcg_gen_andi_i32(tmp, inb, 31);
410 tcg_gen_shl_i32(out, ina, tmp);
411 tcg_temp_free_i32(tmp);
414 static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
416 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
417 int imm_w = extract32(imm, 5, 5);
418 int imm_s = extract32(imm, 0, 5);
420 if (imm_w + imm_s > 32 || imm_w == 0) {
421 /* These inputs have an undefined behavior. */
422 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
425 tcg_gen_extract_i32(out, ina, imm_s, imm_w);
429 static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
431 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
432 int imm_w = extract32(imm, 5, 5);
433 int imm_s = extract32(imm, 0, 5);
434 int width = imm_w - imm_s + 1;
437 /* These inputs have an undefined behavior. */
438 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
441 tcg_gen_deposit_i32(out, out, ina, imm_s, width);
445 DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
446 DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
447 DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
449 DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
450 DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
451 DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
453 DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
454 DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
456 static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
458 tcg_gen_clzi_i32(out, ina, 32);
461 DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
463 static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
465 TCGv_i32 lt = tcg_temp_new_i32();
467 tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
468 tcg_gen_sub_i32(out, inb, ina);
469 tcg_gen_deposit_i32(out, out, lt, 31, 1);
470 tcg_temp_free_i32(lt);
473 static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
475 TCGv_i32 lt = tcg_temp_new_i32();
477 tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
478 tcg_gen_sub_i32(out, inb, ina);
479 tcg_gen_deposit_i32(out, out, lt, 31, 1);
480 tcg_temp_free_i32(lt);
483 DO_TYPEA(cmp, false, gen_cmp)
484 DO_TYPEA(cmpu, false, gen_cmpu)
486 ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
487 ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
488 ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
489 ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
490 ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
491 ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
492 ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
493 ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
494 ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
495 ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
496 ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
498 DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
499 DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
500 DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
501 DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
502 DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
503 DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
504 DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
505 DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
506 DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
507 DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
508 DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
510 ENV_WRAPPER2(gen_flt, gen_helper_flt)
511 ENV_WRAPPER2(gen_fint, gen_helper_fint)
512 ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
514 DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
515 DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
516 DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
518 /* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
519 static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
521 gen_helper_divs(out, cpu_env, inb, ina);
524 static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
526 gen_helper_divu(out, cpu_env, inb, ina);
529 DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
530 DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
532 static bool trans_imm(DisasContext *dc, arg_imm *arg)
534 dc->ext_imm = arg->imm << 16;
535 tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
536 dc->tb_flags |= IMM_FLAG;
541 static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
543 TCGv_i32 tmp = tcg_temp_new_i32();
544 tcg_gen_muls2_i32(tmp, out, ina, inb);
545 tcg_temp_free_i32(tmp);
548 static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
550 TCGv_i32 tmp = tcg_temp_new_i32();
551 tcg_gen_mulu2_i32(tmp, out, ina, inb);
552 tcg_temp_free_i32(tmp);
555 static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
557 TCGv_i32 tmp = tcg_temp_new_i32();
558 tcg_gen_mulsu2_i32(tmp, out, ina, inb);
559 tcg_temp_free_i32(tmp);
562 DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
563 DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
564 DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
565 DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
566 DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
568 DO_TYPEA(or, false, tcg_gen_or_i32)
569 DO_TYPEBI(ori, false, tcg_gen_ori_i32)
571 static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
573 tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
576 static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
578 tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
581 DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
582 DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
583 DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
585 /* No input carry, but output carry. */
586 static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
588 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
589 tcg_gen_sub_i32(out, inb, ina);
592 /* Input and output carry. */
593 static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
595 TCGv_i32 zero = tcg_const_i32(0);
596 TCGv_i32 tmp = tcg_temp_new_i32();
598 tcg_gen_not_i32(tmp, ina);
599 tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
600 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
602 tcg_temp_free_i32(zero);
603 tcg_temp_free_i32(tmp);
606 /* No input or output carry. */
607 static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
609 tcg_gen_sub_i32(out, inb, ina);
612 /* Input carry, no output carry. */
613 static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
615 TCGv_i32 nota = tcg_temp_new_i32();
617 tcg_gen_not_i32(nota, ina);
618 tcg_gen_add_i32(out, inb, nota);
619 tcg_gen_add_i32(out, out, cpu_msr_c);
621 tcg_temp_free_i32(nota);
624 DO_TYPEA(rsub, true, gen_rsub)
625 DO_TYPEA(rsubc, true, gen_rsubc)
626 DO_TYPEA(rsubk, false, gen_rsubk)
627 DO_TYPEA(rsubkc, true, gen_rsubkc)
629 DO_TYPEBV(rsubi, true, gen_rsub)
630 DO_TYPEBV(rsubic, true, gen_rsubc)
631 DO_TYPEBV(rsubik, false, gen_rsubk)
632 DO_TYPEBV(rsubikc, true, gen_rsubkc)
634 DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
635 DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
637 static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
639 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
640 tcg_gen_sari_i32(out, ina, 1);
643 static void gen_src(TCGv_i32 out, TCGv_i32 ina)
645 TCGv_i32 tmp = tcg_temp_new_i32();
647 tcg_gen_mov_i32(tmp, cpu_msr_c);
648 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
649 tcg_gen_extract2_i32(out, ina, tmp, 1);
651 tcg_temp_free_i32(tmp);
654 static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
656 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
657 tcg_gen_shri_i32(out, ina, 1);
660 DO_TYPEA0(sra, false, gen_sra)
661 DO_TYPEA0(src, false, gen_src)
662 DO_TYPEA0(srl, false, gen_srl)
664 static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
666 tcg_gen_rotri_i32(out, ina, 16);
669 DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
670 DO_TYPEA0(swaph, false, gen_swaph)
672 static bool trans_wdic(DisasContext *dc, arg_wdic *a)
674 /* Cache operations are nops: only check for supervisor mode. */
675 trap_userspace(dc, true);
679 DO_TYPEA(xor, false, tcg_gen_xor_i32)
680 DO_TYPEBI(xori, false, tcg_gen_xori_i32)
682 static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
684 TCGv ret = tcg_temp_new();
686 /* If any of the regs is r0, set t to the value of the other reg. */
688 TCGv_i32 tmp = tcg_temp_new_i32();
689 tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
690 tcg_gen_extu_i32_tl(ret, tmp);
691 tcg_temp_free_i32(tmp);
693 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
695 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
697 tcg_gen_movi_tl(ret, 0);
700 if ((ra == 1 || rb == 1) && dc->cpu->cfg.stackprot) {
701 gen_helper_stackprot(cpu_env, ret);
706 static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
708 TCGv ret = tcg_temp_new();
710 /* If any of the regs is r0, set t to the value of the other reg. */
712 TCGv_i32 tmp = tcg_temp_new_i32();
713 tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
714 tcg_gen_extu_i32_tl(ret, tmp);
715 tcg_temp_free_i32(tmp);
717 tcg_gen_movi_tl(ret, (uint32_t)imm);
720 if (ra == 1 && dc->cpu->cfg.stackprot) {
721 gen_helper_stackprot(cpu_env, ret);
726 static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
728 int addr_size = dc->cpu->cfg.addr_size;
729 TCGv ret = tcg_temp_new();
731 if (addr_size == 32 || ra == 0) {
733 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
735 tcg_gen_movi_tl(ret, 0);
739 tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
741 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
742 tcg_gen_shli_tl(ret, ret, 32);
744 if (addr_size < 64) {
745 /* Mask off out of range bits. */
746 tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
752 static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
753 int mem_index, bool rev)
756 MemOp size = mop & MO_SIZE;
759 * When doing reverse accesses we need to do two things.
761 * 1. Reverse the address wrt endianness.
762 * 2. Byteswap the data lanes on the way back into the CPU core.
769 tcg_gen_xori_tl(addr, addr, 3 - size);
777 * Microblaze gives MMU faults priority over faults due to
778 * unaligned addresses. That's why we speculatively do the load
779 * into v. If the load succeeds, we verify alignment of the
780 * address and if that succeeds we write into the destination reg.
782 v = tcg_temp_new_i32();
783 tcg_gen_qemu_ld_i32(v, addr, mem_index, mop);
785 /* TODO: Convert to CPUClass::do_unaligned_access. */
786 if (dc->cpu->cfg.unaligned_exceptions && size > MO_8) {
787 TCGv_i32 t0 = tcg_const_i32(0);
788 TCGv_i32 treg = tcg_const_i32(rd);
789 TCGv_i32 tsize = tcg_const_i32((1 << size) - 1);
791 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
792 gen_helper_memalign(cpu_env, addr, treg, t0, tsize);
794 tcg_temp_free_i32(t0);
795 tcg_temp_free_i32(treg);
796 tcg_temp_free_i32(tsize);
800 tcg_gen_mov_i32(cpu_R[rd], v);
803 tcg_temp_free_i32(v);
808 static bool trans_lbu(DisasContext *dc, arg_typea *arg)
810 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
811 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
814 static bool trans_lbur(DisasContext *dc, arg_typea *arg)
816 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
817 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
820 static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
822 if (trap_userspace(dc, true)) {
825 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
826 return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
829 static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
831 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
832 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
835 static bool trans_lhu(DisasContext *dc, arg_typea *arg)
837 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
838 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
841 static bool trans_lhur(DisasContext *dc, arg_typea *arg)
843 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
844 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
847 static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
849 if (trap_userspace(dc, true)) {
852 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
853 return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
856 static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
858 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
859 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
862 static bool trans_lw(DisasContext *dc, arg_typea *arg)
864 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
865 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
868 static bool trans_lwr(DisasContext *dc, arg_typea *arg)
870 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
871 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
874 static bool trans_lwea(DisasContext *dc, arg_typea *arg)
876 if (trap_userspace(dc, true)) {
879 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
880 return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
883 static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
885 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
886 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
889 static bool trans_lwx(DisasContext *dc, arg_typea *arg)
891 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
893 /* lwx does not throw unaligned access errors, so force alignment */
894 tcg_gen_andi_tl(addr, addr, ~3);
899 tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL);
900 tcg_gen_mov_tl(cpu_res_addr, addr);
904 tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
907 /* No support for AXI exclusive so always clear C */
908 tcg_gen_movi_i32(cpu_msr_c, 0);
912 static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
913 int mem_index, bool rev)
915 MemOp size = mop & MO_SIZE;
918 * When doing reverse accesses we need to do two things.
920 * 1. Reverse the address wrt endianness.
921 * 2. Byteswap the data lanes on the way back into the CPU core.
928 tcg_gen_xori_tl(addr, addr, 3 - size);
935 tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
937 /* TODO: Convert to CPUClass::do_unaligned_access. */
938 if (dc->cpu->cfg.unaligned_exceptions && size > MO_8) {
939 TCGv_i32 t1 = tcg_const_i32(1);
940 TCGv_i32 treg = tcg_const_i32(rd);
941 TCGv_i32 tsize = tcg_const_i32((1 << size) - 1);
943 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
944 /* FIXME: if the alignment is wrong, we should restore the value
945 * in memory. One possible way to achieve this is to probe
946 * the MMU prior to the memaccess, thay way we could put
947 * the alignment checks in between the probe and the mem
950 gen_helper_memalign(cpu_env, addr, treg, t1, tsize);
952 tcg_temp_free_i32(t1);
953 tcg_temp_free_i32(treg);
954 tcg_temp_free_i32(tsize);
961 static bool trans_sb(DisasContext *dc, arg_typea *arg)
963 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
964 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
967 static bool trans_sbr(DisasContext *dc, arg_typea *arg)
969 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
970 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
973 static bool trans_sbea(DisasContext *dc, arg_typea *arg)
975 if (trap_userspace(dc, true)) {
978 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
979 return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
982 static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
984 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
985 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
988 static bool trans_sh(DisasContext *dc, arg_typea *arg)
990 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
991 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
994 static bool trans_shr(DisasContext *dc, arg_typea *arg)
996 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
997 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
1000 static bool trans_shea(DisasContext *dc, arg_typea *arg)
1002 if (trap_userspace(dc, true)) {
1005 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
1006 return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
1009 static bool trans_shi(DisasContext *dc, arg_typeb *arg)
1011 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
1012 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
1015 static bool trans_sw(DisasContext *dc, arg_typea *arg)
1017 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
1018 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
1021 static bool trans_swr(DisasContext *dc, arg_typea *arg)
1023 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
1024 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
1027 static bool trans_swea(DisasContext *dc, arg_typea *arg)
1029 if (trap_userspace(dc, true)) {
1032 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
1033 return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
1036 static bool trans_swi(DisasContext *dc, arg_typeb *arg)
1038 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
1039 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
1042 static bool trans_swx(DisasContext *dc, arg_typea *arg)
1044 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
1045 TCGLabel *swx_done = gen_new_label();
1046 TCGLabel *swx_fail = gen_new_label();
1052 /* swx does not throw unaligned access errors, so force alignment */
1053 tcg_gen_andi_tl(addr, addr, ~3);
1056 * Compare the address vs the one we used during lwx.
1057 * On mismatch, the operation fails. On match, addr dies at the
1058 * branch, but we know we can use the equal version in the global.
1059 * In either case, addr is no longer needed.
1061 tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
1062 tcg_temp_free(addr);
1065 * Compare the value loaded during lwx with current contents of
1066 * the reserved location.
1068 tval = tcg_temp_new_i32();
1070 tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1071 reg_for_write(dc, arg->rd),
1072 dc->mem_index, MO_TEUL);
1074 tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
1075 tcg_temp_free_i32(tval);
1078 tcg_gen_movi_i32(cpu_msr_c, 0);
1079 tcg_gen_br(swx_done);
1082 gen_set_label(swx_fail);
1083 tcg_gen_movi_i32(cpu_msr_c, 1);
1085 gen_set_label(swx_done);
1088 * Prevent the saved address from working again without another ldx.
1089 * Akin to the pseudocode setting reservation = 0.
1091 tcg_gen_movi_tl(cpu_res_addr, -1);
1095 static bool trans_zero(DisasContext *dc, arg_zero *arg)
1097 /* If opcode_0_illegal, trap. */
1098 if (dc->cpu->cfg.opcode_0_illegal) {
1099 trap_illegal(dc, true);
1103 * Otherwise, this is "add r0, r0, r0".
1104 * Continue to trans_add so that MSR[C] gets cleared.
1109 static void msr_read(DisasContext *dc, TCGv_i32 d)
1113 /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
1114 t = tcg_temp_new_i32();
1115 tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1116 tcg_gen_or_i32(d, cpu_msr, t);
1117 tcg_temp_free_i32(t);
1120 static void msr_write(DisasContext *dc, TCGv_i32 v)
1122 dc->cpustate_changed = 1;
1124 /* Install MSR_C. */
1125 tcg_gen_extract_i32(cpu_msr_c, v, 2, 1);
1127 /* Clear MSR_C and MSR_CC; MSR_PVR is not writable, and is always clear. */
1128 tcg_gen_andi_i32(cpu_msr, v, ~(MSR_C | MSR_CC | MSR_PVR));
1131 static void dec_msr(DisasContext *dc)
1133 CPUState *cs = CPU(dc->cpu);
1135 unsigned int sr, rn;
1136 bool to, clrset, extended = false;
1138 sr = extract32(dc->imm, 0, 14);
1139 to = extract32(dc->imm, 14, 1);
1140 clrset = extract32(dc->imm, 15, 1) == 0;
1143 dc->cpustate_changed = 1;
1146 /* Extended MSRs are only available if addr_size > 32. */
1147 if (dc->cpu->cfg.addr_size > 32) {
1148 /* The E-bit is encoded differently for To/From MSR. */
1149 static const unsigned int e_bit[] = { 19, 24 };
1151 extended = extract32(dc->imm, e_bit[to], 1);
1154 /* msrclr and msrset. */
1156 bool clr = extract32(dc->ir, 16, 1);
1158 if (!dc->cpu->cfg.use_msr_instr) {
1163 if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
1168 msr_read(dc, cpu_R[dc->rd]);
1170 t0 = tcg_temp_new_i32();
1171 t1 = tcg_temp_new_i32();
1173 tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
1176 tcg_gen_not_i32(t1, t1);
1177 tcg_gen_and_i32(t0, t0, t1);
1179 tcg_gen_or_i32(t0, t0, t1);
1181 tcg_temp_free_i32(t0);
1182 tcg_temp_free_i32(t1);
1183 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1184 dc->base.is_jmp = DISAS_UPDATE;
1188 if (trap_userspace(dc, to)) {
1192 #if !defined(CONFIG_USER_ONLY)
1193 /* Catch read/writes to the mmu block. */
1194 if ((sr & ~0xff) == 0x1000) {
1195 TCGv_i32 tmp_ext = tcg_const_i32(extended);
1199 tmp_sr = tcg_const_i32(sr);
1201 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_sr, cpu_R[dc->ra]);
1203 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tmp_ext, tmp_sr);
1205 tcg_temp_free_i32(tmp_sr);
1206 tcg_temp_free_i32(tmp_ext);
1216 msr_write(dc, cpu_R[dc->ra]);
1220 TCGv_i64 t64 = tcg_temp_new_i64();
1221 tcg_gen_extu_i32_i64(t64, cpu_R[dc->ra]);
1222 tcg_gen_st_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1223 tcg_temp_free_i64(t64);
1227 tcg_gen_st_i32(cpu_R[dc->ra],
1228 cpu_env, offsetof(CPUMBState, esr));
1231 tcg_gen_st_i32(cpu_R[dc->ra],
1232 cpu_env, offsetof(CPUMBState, fsr));
1235 tcg_gen_st_i32(cpu_R[dc->ra],
1236 cpu_env, offsetof(CPUMBState, btr));
1239 tcg_gen_st_i32(cpu_R[dc->ra],
1240 cpu_env, offsetof(CPUMBState, edr));
1243 tcg_gen_st_i32(cpu_R[dc->ra],
1244 cpu_env, offsetof(CPUMBState, slr));
1247 tcg_gen_st_i32(cpu_R[dc->ra],
1248 cpu_env, offsetof(CPUMBState, shr));
1251 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
1257 tcg_gen_movi_i32(cpu_R[dc->rd], dc->base.pc_next);
1260 msr_read(dc, cpu_R[dc->rd]);
1264 TCGv_i64 t64 = tcg_temp_new_i64();
1265 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1267 tcg_gen_extrh_i64_i32(cpu_R[dc->rd], t64);
1269 tcg_gen_extrl_i64_i32(cpu_R[dc->rd], t64);
1271 tcg_temp_free_i64(t64);
1275 tcg_gen_ld_i32(cpu_R[dc->rd],
1276 cpu_env, offsetof(CPUMBState, esr));
1279 tcg_gen_ld_i32(cpu_R[dc->rd],
1280 cpu_env, offsetof(CPUMBState, fsr));
1283 tcg_gen_ld_i32(cpu_R[dc->rd],
1284 cpu_env, offsetof(CPUMBState, btr));
1287 tcg_gen_ld_i32(cpu_R[dc->rd],
1288 cpu_env, offsetof(CPUMBState, edr));
1291 tcg_gen_ld_i32(cpu_R[dc->rd],
1292 cpu_env, offsetof(CPUMBState, slr));
1295 tcg_gen_ld_i32(cpu_R[dc->rd],
1296 cpu_env, offsetof(CPUMBState, shr));
1298 case 0x2000 ... 0x200c:
1300 tcg_gen_ld_i32(cpu_R[dc->rd],
1301 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
1304 cpu_abort(cs, "unknown mfs reg %x\n", sr);
1310 tcg_gen_movi_i32(cpu_R[0], 0);
1314 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1315 TCGv_i32 d, TCGv_i32 a)
1317 static const int mb_to_tcg_cc[] = {
1318 [CC_EQ] = TCG_COND_EQ,
1319 [CC_NE] = TCG_COND_NE,
1320 [CC_LT] = TCG_COND_LT,
1321 [CC_LE] = TCG_COND_LE,
1322 [CC_GE] = TCG_COND_GE,
1323 [CC_GT] = TCG_COND_GT,
1333 tcg_gen_setcondi_i32(mb_to_tcg_cc[cc], d, a, 0);
1336 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1341 static void eval_cond_jmp(DisasContext *dc, TCGv_i32 pc_true, TCGv_i32 pc_false)
1343 TCGv_i32 zero = tcg_const_i32(0);
1345 tcg_gen_movcond_i32(TCG_COND_NE, cpu_pc,
1349 tcg_temp_free_i32(zero);
1352 static void dec_setup_dslot(DisasContext *dc)
1354 TCGv_i32 tmp = tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG));
1356 dc->delayed_branch = 2;
1357 dc->tb_flags |= D_FLAG;
1359 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, bimm));
1360 tcg_temp_free_i32(tmp);
1363 static void dec_bcc(DisasContext *dc)
1368 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1369 dslot = dc->ir & (1 << 25);
1371 dc->delayed_branch = 1;
1373 dec_setup_dslot(dc);
1377 dc->jmp = JMP_DIRECT_CC;
1378 dc->jmp_pc = dc->base.pc_next + dec_alu_typeb_imm(dc);
1379 tcg_gen_movi_i32(cpu_btarget, dc->jmp_pc);
1381 dc->jmp = JMP_INDIRECT;
1382 tcg_gen_addi_i32(cpu_btarget, cpu_R[dc->rb], dc->base.pc_next);
1384 eval_cc(dc, cc, cpu_btaken, cpu_R[dc->ra]);
1387 static void dec_br(DisasContext *dc)
1389 unsigned int dslot, link, abs, mbar;
1391 dslot = dc->ir & (1 << 20);
1392 abs = dc->ir & (1 << 19);
1393 link = dc->ir & (1 << 18);
1395 /* Memory barrier. */
1396 mbar = (dc->ir >> 16) & 31;
1397 if (mbar == 2 && dc->imm == 4) {
1398 uint16_t mbar_imm = dc->rd;
1400 /* Data access memory barrier. */
1401 if ((mbar_imm & 2) == 0) {
1402 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1405 /* mbar IMM & 16 decodes to sleep. */
1406 if (mbar_imm & 16) {
1409 if (trap_userspace(dc, true)) {
1410 /* Sleep is a privileged instruction. */
1416 tmp_1 = tcg_const_i32(1);
1417 tcg_gen_st_i32(tmp_1, cpu_env,
1418 -offsetof(MicroBlazeCPU, env)
1419 +offsetof(CPUState, halted));
1420 tcg_temp_free_i32(tmp_1);
1422 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1424 gen_raise_exception(dc, EXCP_HLT);
1428 dc->cpustate_changed = 1;
1432 if (abs && link && !dslot) {
1435 uint32_t imm = dec_alu_typeb_imm(dc);
1436 if (trap_userspace(dc, imm != 8 && imm != 0x18)) {
1441 if (trap_userspace(dc, true)) {
1447 dc->delayed_branch = 1;
1449 dec_setup_dslot(dc);
1451 if (link && dc->rd) {
1452 tcg_gen_movi_i32(cpu_R[dc->rd], dc->base.pc_next);
1457 uint32_t dest = dec_alu_typeb_imm(dc);
1459 dc->jmp = JMP_DIRECT;
1461 tcg_gen_movi_i32(cpu_btarget, dest);
1462 if (link && !dslot) {
1466 gen_raise_exception_sync(dc, EXCP_BREAK);
1469 gen_raise_exception_sync(dc, EXCP_DEBUG);
1474 dc->jmp = JMP_INDIRECT;
1475 tcg_gen_mov_i32(cpu_btarget, cpu_R[dc->rb]);
1476 if (link && !dslot) {
1477 gen_raise_exception_sync(dc, EXCP_BREAK);
1480 } else if (dc->type_b) {
1481 dc->jmp = JMP_DIRECT;
1482 dc->jmp_pc = dc->base.pc_next + dec_alu_typeb_imm(dc);
1483 tcg_gen_movi_i32(cpu_btarget, dc->jmp_pc);
1485 dc->jmp = JMP_INDIRECT;
1486 tcg_gen_addi_i32(cpu_btarget, cpu_R[dc->rb], dc->base.pc_next);
1488 tcg_gen_movi_i32(cpu_btaken, 1);
1491 static inline void do_rti(DisasContext *dc)
1494 t0 = tcg_temp_new_i32();
1495 t1 = tcg_temp_new_i32();
1496 tcg_gen_mov_i32(t1, cpu_msr);
1497 tcg_gen_shri_i32(t0, t1, 1);
1498 tcg_gen_ori_i32(t1, t1, MSR_IE);
1499 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1501 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1502 tcg_gen_or_i32(t1, t1, t0);
1504 tcg_temp_free_i32(t1);
1505 tcg_temp_free_i32(t0);
1506 dc->tb_flags &= ~DRTI_FLAG;
1509 static inline void do_rtb(DisasContext *dc)
1512 t0 = tcg_temp_new_i32();
1513 t1 = tcg_temp_new_i32();
1514 tcg_gen_mov_i32(t1, cpu_msr);
1515 tcg_gen_andi_i32(t1, t1, ~MSR_BIP);
1516 tcg_gen_shri_i32(t0, t1, 1);
1517 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1519 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1520 tcg_gen_or_i32(t1, t1, t0);
1522 tcg_temp_free_i32(t1);
1523 tcg_temp_free_i32(t0);
1524 dc->tb_flags &= ~DRTB_FLAG;
1527 static inline void do_rte(DisasContext *dc)
1530 t0 = tcg_temp_new_i32();
1531 t1 = tcg_temp_new_i32();
1533 tcg_gen_mov_i32(t1, cpu_msr);
1534 tcg_gen_ori_i32(t1, t1, MSR_EE);
1535 tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1536 tcg_gen_shri_i32(t0, t1, 1);
1537 tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1539 tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1540 tcg_gen_or_i32(t1, t1, t0);
1542 tcg_temp_free_i32(t1);
1543 tcg_temp_free_i32(t0);
1544 dc->tb_flags &= ~DRTE_FLAG;
1547 static void dec_rts(DisasContext *dc)
1549 unsigned int b_bit, i_bit, e_bit;
1551 i_bit = dc->ir & (1 << 21);
1552 b_bit = dc->ir & (1 << 22);
1553 e_bit = dc->ir & (1 << 23);
1555 if (trap_userspace(dc, i_bit || b_bit || e_bit)) {
1559 dec_setup_dslot(dc);
1562 dc->tb_flags |= DRTI_FLAG;
1564 dc->tb_flags |= DRTB_FLAG;
1566 dc->tb_flags |= DRTE_FLAG;
1569 dc->jmp = JMP_INDIRECT;
1570 tcg_gen_movi_i32(cpu_btaken, 1);
1571 tcg_gen_add_i32(cpu_btarget, cpu_R[dc->ra], *dec_alu_op_b(dc));
1574 static void dec_null(DisasContext *dc)
1576 if (trap_illegal(dc, true)) {
1579 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n",
1580 (uint32_t)dc->base.pc_next, dc->opcode);
1581 dc->abort_at_next_insn = 1;
1584 /* Insns connected to FSL or AXI stream attached devices. */
1585 static void dec_stream(DisasContext *dc)
1587 TCGv_i32 t_id, t_ctrl;
1590 if (trap_userspace(dc, true)) {
1594 t_id = tcg_temp_new_i32();
1596 tcg_gen_movi_i32(t_id, dc->imm & 0xf);
1597 ctrl = dc->imm >> 10;
1599 tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
1600 ctrl = dc->imm >> 5;
1603 t_ctrl = tcg_const_i32(ctrl);
1606 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1608 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1610 tcg_temp_free_i32(t_id);
1611 tcg_temp_free_i32(t_ctrl);
1614 static struct decoder_info {
1619 void (*dec)(DisasContext *dc);
1625 {DEC_STREAM, dec_stream},
1629 static void old_decode(DisasContext *dc, uint32_t ir)
1635 /* bit 2 seems to indicate insn type. */
1636 dc->type_b = ir & (1 << 29);
1638 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1639 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1640 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1641 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1642 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1644 /* Large switch for all insns. */
1645 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1646 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1653 static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1655 DisasContext *dc = container_of(dcb, DisasContext, base);
1656 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1660 dc->synced_flags = dc->tb_flags = dc->base.tb->flags;
1661 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1662 dc->jmp = dc->delayed_branch ? JMP_INDIRECT : JMP_NOJMP;
1663 dc->cpustate_changed = 0;
1664 dc->abort_at_next_insn = 0;
1665 dc->ext_imm = dc->base.tb->cs_base;
1668 dc->mem_index = cpu_mmu_index(&cpu->env, false);
1670 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1671 dc->base.max_insns = MIN(dc->base.max_insns, bound);
1674 static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1678 static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1680 tcg_gen_insn_start(dcb->pc_next);
1683 static bool mb_tr_breakpoint_check(DisasContextBase *dcb, CPUState *cs,
1684 const CPUBreakpoint *bp)
1686 DisasContext *dc = container_of(dcb, DisasContext, base);
1688 gen_raise_exception_sync(dc, EXCP_DEBUG);
1691 * The address covered by the breakpoint must be included in
1692 * [tb->pc, tb->pc + tb->size) in order to for it to be
1693 * properly cleared -- thus we increment the PC here so that
1694 * the logic setting tb->size below does the right thing.
1696 dc->base.pc_next += 4;
1700 static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1702 DisasContext *dc = container_of(dcb, DisasContext, base);
1703 CPUMBState *env = cs->env_ptr;
1706 /* TODO: This should raise an exception, not terminate qemu. */
1707 if (dc->base.pc_next & 3) {
1708 cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1709 (uint32_t)dc->base.pc_next);
1713 ir = cpu_ldl_code(env, dc->base.pc_next);
1714 if (!decode(dc, ir)) {
1719 tcg_temp_free_i32(dc->r0);
1724 if (dc->clear_imm && (dc->tb_flags & IMM_FLAG)) {
1725 dc->tb_flags &= ~IMM_FLAG;
1726 tcg_gen_discard_i32(cpu_imm);
1728 dc->base.pc_next += 4;
1730 if (dc->delayed_branch && --dc->delayed_branch == 0) {
1731 if (dc->tb_flags & DRTI_FLAG) {
1734 if (dc->tb_flags & DRTB_FLAG) {
1737 if (dc->tb_flags & DRTE_FLAG) {
1740 /* Clear the delay slot flag. */
1741 dc->tb_flags &= ~D_FLAG;
1742 dc->base.is_jmp = DISAS_JUMP;
1745 /* Force an exit if the per-tb cpu state has changed. */
1746 if (dc->base.is_jmp == DISAS_NEXT && dc->cpustate_changed) {
1747 dc->base.is_jmp = DISAS_UPDATE;
1748 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1752 static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1754 DisasContext *dc = container_of(dcb, DisasContext, base);
1756 assert(!dc->abort_at_next_insn);
1758 if (dc->base.is_jmp == DISAS_NORETURN) {
1759 /* We have already exited the TB. */
1764 if (dc->tb_flags & D_FLAG) {
1766 dc->jmp = JMP_NOJMP;
1769 switch (dc->base.is_jmp) {
1770 case DISAS_TOO_MANY:
1771 assert(dc->jmp == JMP_NOJMP);
1772 gen_goto_tb(dc, 0, dc->base.pc_next);
1776 assert(dc->jmp == JMP_NOJMP);
1777 if (unlikely(cs->singlestep_enabled)) {
1778 gen_raise_exception(dc, EXCP_DEBUG);
1780 tcg_gen_exit_tb(NULL, 0);
1788 TCGv_i32 tmp_pc = tcg_const_i32(dc->base.pc_next);
1789 eval_cond_jmp(dc, cpu_btarget, tmp_pc);
1790 tcg_temp_free_i32(tmp_pc);
1792 if (unlikely(cs->singlestep_enabled)) {
1793 gen_raise_exception(dc, EXCP_DEBUG);
1795 tcg_gen_exit_tb(NULL, 0);
1802 TCGLabel *l1 = gen_new_label();
1803 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_btaken, 0, l1);
1804 gen_goto_tb(dc, 1, dc->base.pc_next);
1810 gen_goto_tb(dc, 0, dc->jmp_pc);
1816 g_assert_not_reached();
1820 static void mb_tr_disas_log(const DisasContextBase *dcb, CPUState *cs)
1822 qemu_log("IN: %s\n", lookup_symbol(dcb->pc_first));
1823 log_target_disas(cs, dcb->pc_first, dcb->tb->size);
1826 static const TranslatorOps mb_tr_ops = {
1827 .init_disas_context = mb_tr_init_disas_context,
1828 .tb_start = mb_tr_tb_start,
1829 .insn_start = mb_tr_insn_start,
1830 .breakpoint_check = mb_tr_breakpoint_check,
1831 .translate_insn = mb_tr_translate_insn,
1832 .tb_stop = mb_tr_tb_stop,
1833 .disas_log = mb_tr_disas_log,
1836 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
1839 translator_loop(&mb_tr_ops, &dc.base, cpu, tb, max_insns);
1842 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1844 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1845 CPUMBState *env = &cpu->env;
1852 qemu_fprintf(f, "IN: PC=%x %s\n",
1853 env->pc, lookup_symbol(env->pc));
1854 qemu_fprintf(f, "rmsr=%x resr=%x rear=%" PRIx64 " "
1855 "imm=%x iflags=%x fsr=%x rbtr=%x\n",
1856 env->msr, env->esr, env->ear,
1857 env->imm, env->iflags, env->fsr, env->btr);
1858 qemu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1859 env->btaken, env->btarget,
1860 (env->msr & MSR_UM) ? "user" : "kernel",
1861 (env->msr & MSR_UMS) ? "user" : "kernel",
1862 (bool)(env->msr & MSR_EIP),
1863 (bool)(env->msr & MSR_IE));
1864 for (i = 0; i < 12; i++) {
1865 qemu_fprintf(f, "rpvr%2.2d=%8.8x ", i, env->pvr.regs[i]);
1866 if ((i + 1) % 4 == 0) {
1867 qemu_fprintf(f, "\n");
1871 /* Registers that aren't modeled are reported as 0 */
1872 qemu_fprintf(f, "redr=%x rpid=0 rzpr=0 rtlbx=0 rtlbsx=0 "
1873 "rtlblo=0 rtlbhi=0\n", env->edr);
1874 qemu_fprintf(f, "slr=%x shr=%x\n", env->slr, env->shr);
1875 for (i = 0; i < 32; i++) {
1876 qemu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1877 if ((i + 1) % 4 == 0)
1878 qemu_fprintf(f, "\n");
1880 qemu_fprintf(f, "\n\n");
1883 void mb_tcg_init(void)
1885 #define R(X) { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1886 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1888 static const struct {
1889 TCGv_i32 *var; int ofs; char name[8];
1891 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
1892 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
1893 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1894 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1909 for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1911 tcg_global_mem_new_i32(cpu_env, i32s[i].ofs, i32s[i].name);
1915 tcg_global_mem_new(cpu_env, offsetof(CPUMBState, res_addr), "res_addr");
1918 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,