2 * Moxie emulation for qemu: main translation routines.
4 * Copyright (c) 2009, 2013 Anthony Green
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public License
8 * as published by the Free Software Foundation; either version 2 of
9 * the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 /* For information on the Moxie architecture, see
21 * http://moxielogic.org/wiki
32 #include "exec/exec-all.h"
33 #include "disas/disas.h"
40 /* This is the state at translation time. */
41 typedef struct DisasContext {
42 struct TranslationBlock *tb;
43 target_ulong pc, saved_pc;
46 /* Routine used to access memory */
50 int singlestep_enabled;
54 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
55 * exception condition */
56 BS_STOP = 1, /* We want to stop translation for any reason */
57 BS_BRANCH = 2, /* We reached a branch condition */
58 BS_EXCP = 3, /* We reached an exception condition */
62 static TCGv cpu_gregs[16];
63 static TCGv_ptr cpu_env;
64 static TCGv cc_a, cc_b;
66 #include "exec/gen-icount.h"
68 #define REG(x) (cpu_gregs[x])
70 /* Extract the signed 10-bit offset from a 16-bit branch
72 static int extract_branch_offset(int opcode)
74 return (((signed short)((opcode & ((1 << 10) - 1)) << 6)) >> 6) << 1;
77 void cpu_dump_state(CPUArchState *env, FILE *f, fprintf_function cpu_fprintf,
81 cpu_fprintf(f, "pc=0x%08x\n", env->pc);
82 cpu_fprintf(f, "$fp=0x%08x $sp=0x%08x $r0=0x%08x $r1=0x%08x\n",
83 env->gregs[0], env->gregs[1], env->gregs[2], env->gregs[3]);
84 for (i = 4; i < 16; i += 4) {
85 cpu_fprintf(f, "$r%d=0x%08x $r%d=0x%08x $r%d=0x%08x $r%d=0x%08x\n",
86 i-2, env->gregs[i], i-1, env->gregs[i + 1],
87 i, env->gregs[i + 2], i+1, env->gregs[i + 3]);
89 for (i = 4; i < 16; i += 4) {
90 cpu_fprintf(f, "sr%d=0x%08x sr%d=0x%08x sr%d=0x%08x sr%d=0x%08x\n",
91 i-2, env->sregs[i], i-1, env->sregs[i + 1],
92 i, env->sregs[i + 2], i+1, env->sregs[i + 3]);
96 void moxie_translate_init(void)
100 static const char * const gregnames[16] = {
101 "$fp", "$sp", "$r0", "$r1",
102 "$r2", "$r3", "$r4", "$r5",
103 "$r6", "$r7", "$r8", "$r9",
104 "$r10", "$r11", "$r12", "$r13"
110 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
111 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
112 offsetof(CPUMoxieState, pc), "$pc");
113 for (i = 0; i < 16; i++)
114 cpu_gregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUMoxieState, gregs[i]),
118 cc_a = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUMoxieState, cc_a), "cc_a");
120 cc_b = tcg_global_mem_new_i32(TCG_AREG0,
121 offsetof(CPUMoxieState, cc_b), "cc_b");
126 static inline void gen_goto_tb(CPUMoxieState *env, DisasContext *ctx,
127 int n, target_ulong dest)
129 TranslationBlock *tb;
132 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
133 !ctx->singlestep_enabled) {
135 tcg_gen_movi_i32(cpu_pc, dest);
136 tcg_gen_exit_tb((tcg_target_long)tb + n);
138 tcg_gen_movi_i32(cpu_pc, dest);
139 if (ctx->singlestep_enabled) {
140 gen_helper_debug(cpu_env);
146 static int decode_opc(MoxieCPU *cpu, DisasContext *ctx)
148 CPUMoxieState *env = &cpu->env;
150 /* Local cache for the instruction opcode. */
152 /* Set the default instruction length. */
155 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
156 tcg_gen_debug_insn_start(ctx->pc);
159 /* Examine the 16-bit opcode. */
160 opcode = ctx->opcode;
162 /* Decode instruction. */
163 if (opcode & (1 << 15)) {
164 if (opcode & (1 << 14)) {
165 /* This is a Form 3 instruction. */
166 int inst = (opcode >> 10 & 0xf);
168 #define BRANCH(cond) \
170 int l1 = gen_new_label(); \
171 tcg_gen_brcond_i32(cond, cc_a, cc_b, l1); \
172 gen_goto_tb(env, ctx, 1, ctx->pc+2); \
174 gen_goto_tb(env, ctx, 0, extract_branch_offset(opcode) + ctx->pc+2); \
175 ctx->bstate = BS_BRANCH; \
191 case 0x04: /* bltu */
192 BRANCH(TCG_COND_LTU);
194 case 0x05: /* bgtu */
195 BRANCH(TCG_COND_GTU);
203 case 0x08: /* bgeu */
204 BRANCH(TCG_COND_GEU);
206 case 0x09: /* bleu */
207 BRANCH(TCG_COND_LEU);
211 TCGv temp = tcg_temp_new_i32();
212 tcg_gen_movi_i32(cpu_pc, ctx->pc);
213 tcg_gen_movi_i32(temp, MOXIE_EX_BAD);
214 gen_helper_raise_exception(cpu_env, temp);
215 tcg_temp_free_i32(temp);
220 /* This is a Form 2 instruction. */
221 int inst = (opcode >> 12 & 0x3);
225 int a = (opcode >> 8) & 0xf;
226 unsigned int v = (opcode & 0xff);
227 tcg_gen_addi_i32(REG(a), REG(a), v);
232 int a = (opcode >> 8) & 0xf;
233 unsigned int v = (opcode & 0xff);
234 tcg_gen_subi_i32(REG(a), REG(a), v);
239 int a = (opcode >> 8) & 0xf;
240 unsigned v = (opcode & 0xff);
241 tcg_gen_ld_i32(REG(a), cpu_env,
242 offsetof(CPUMoxieState, sregs[v]));
247 int a = (opcode >> 8) & 0xf;
248 unsigned v = (opcode & 0xff);
249 tcg_gen_st_i32(REG(a), cpu_env,
250 offsetof(CPUMoxieState, sregs[v]));
255 TCGv temp = tcg_temp_new_i32();
256 tcg_gen_movi_i32(cpu_pc, ctx->pc);
257 tcg_gen_movi_i32(temp, MOXIE_EX_BAD);
258 gen_helper_raise_exception(cpu_env, temp);
259 tcg_temp_free_i32(temp);
265 /* This is a Form 1 instruction. */
266 int inst = opcode >> 8;
270 case 0x01: /* ldi.l (immediate) */
272 int reg = (opcode >> 4) & 0xf;
273 int val = cpu_ldl_code(env, ctx->pc+2);
274 tcg_gen_movi_i32(REG(reg), val);
278 case 0x02: /* mov (register-to-register) */
280 int dest = (opcode >> 4) & 0xf;
281 int src = opcode & 0xf;
282 tcg_gen_mov_i32(REG(dest), REG(src));
285 case 0x03: /* jsra */
287 TCGv t1 = tcg_temp_new_i32();
288 TCGv t2 = tcg_temp_new_i32();
290 tcg_gen_movi_i32(t1, ctx->pc + 6);
292 /* Make space for the static chain and return address. */
293 tcg_gen_subi_i32(t2, REG(1), 8);
294 tcg_gen_mov_i32(REG(1), t2);
295 tcg_gen_qemu_st32(t1, REG(1), ctx->memidx);
297 /* Push the current frame pointer. */
298 tcg_gen_subi_i32(t2, REG(1), 4);
299 tcg_gen_mov_i32(REG(1), t2);
300 tcg_gen_qemu_st32(REG(0), REG(1), ctx->memidx);
302 /* Set the pc and $fp. */
303 tcg_gen_mov_i32(REG(0), REG(1));
305 gen_goto_tb(env, ctx, 0, cpu_ldl_code(env, ctx->pc+2));
307 tcg_temp_free_i32(t1);
308 tcg_temp_free_i32(t2);
310 ctx->bstate = BS_BRANCH;
316 TCGv t1 = tcg_temp_new_i32();
318 /* The new $sp is the old $fp. */
319 tcg_gen_mov_i32(REG(1), REG(0));
321 /* Pop the frame pointer. */
322 tcg_gen_qemu_ld32u(REG(0), REG(1), ctx->memidx);
323 tcg_gen_addi_i32(t1, REG(1), 4);
324 tcg_gen_mov_i32(REG(1), t1);
327 /* Pop the return address and skip over the static chain
329 tcg_gen_qemu_ld32u(cpu_pc, REG(1), ctx->memidx);
330 tcg_gen_addi_i32(t1, REG(1), 8);
331 tcg_gen_mov_i32(REG(1), t1);
333 tcg_temp_free_i32(t1);
338 ctx->bstate = BS_BRANCH;
341 case 0x05: /* add.l */
343 int a = (opcode >> 4) & 0xf;
344 int b = opcode & 0xf;
346 tcg_gen_add_i32(REG(a), REG(a), REG(b));
349 case 0x06: /* push */
351 int a = (opcode >> 4) & 0xf;
352 int b = opcode & 0xf;
354 TCGv t1 = tcg_temp_new_i32();
355 tcg_gen_subi_i32(t1, REG(a), 4);
356 tcg_gen_mov_i32(REG(a), t1);
357 tcg_gen_qemu_st32(REG(b), REG(a), ctx->memidx);
358 tcg_temp_free_i32(t1);
363 int a = (opcode >> 4) & 0xf;
364 int b = opcode & 0xf;
365 TCGv t1 = tcg_temp_new_i32();
367 tcg_gen_qemu_ld32u(REG(b), REG(a), ctx->memidx);
368 tcg_gen_addi_i32(t1, REG(a), 4);
369 tcg_gen_mov_i32(REG(a), t1);
370 tcg_temp_free_i32(t1);
373 case 0x08: /* lda.l */
375 int reg = (opcode >> 4) & 0xf;
377 TCGv ptr = tcg_temp_new_i32();
378 tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2));
379 tcg_gen_qemu_ld32u(REG(reg), ptr, ctx->memidx);
380 tcg_temp_free_i32(ptr);
385 case 0x09: /* sta.l */
387 int val = (opcode >> 4) & 0xf;
389 TCGv ptr = tcg_temp_new_i32();
390 tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2));
391 tcg_gen_qemu_st32(REG(val), ptr, ctx->memidx);
392 tcg_temp_free_i32(ptr);
397 case 0x0a: /* ld.l (register indirect) */
399 int src = opcode & 0xf;
400 int dest = (opcode >> 4) & 0xf;
402 tcg_gen_qemu_ld32u(REG(dest), REG(src), ctx->memidx);
405 case 0x0b: /* st.l */
407 int dest = (opcode >> 4) & 0xf;
408 int val = opcode & 0xf;
410 tcg_gen_qemu_st32(REG(val), REG(dest), ctx->memidx);
413 case 0x0c: /* ldo.l */
415 int a = (opcode >> 4) & 0xf;
416 int b = opcode & 0xf;
418 TCGv t1 = tcg_temp_new_i32();
419 TCGv t2 = tcg_temp_new_i32();
420 tcg_gen_addi_i32(t1, REG(b), cpu_ldl_code(env, ctx->pc+2));
421 tcg_gen_qemu_ld32u(t2, t1, ctx->memidx);
422 tcg_gen_mov_i32(REG(a), t2);
424 tcg_temp_free_i32(t1);
425 tcg_temp_free_i32(t2);
430 case 0x0d: /* sto.l */
432 int a = (opcode >> 4) & 0xf;
433 int b = opcode & 0xf;
435 TCGv t1 = tcg_temp_new_i32();
436 TCGv t2 = tcg_temp_new_i32();
437 tcg_gen_addi_i32(t1, REG(a), cpu_ldl_code(env, ctx->pc+2));
438 tcg_gen_qemu_st32(REG(b), t1, ctx->memidx);
440 tcg_temp_free_i32(t1);
441 tcg_temp_free_i32(t2);
448 int a = (opcode >> 4) & 0xf;
449 int b = opcode & 0xf;
451 tcg_gen_mov_i32(cc_a, REG(a));
452 tcg_gen_mov_i32(cc_b, REG(b));
457 int fnreg = (opcode >> 4) & 0xf;
459 /* Load the stack pointer into T0. */
460 TCGv t1 = tcg_temp_new_i32();
461 TCGv t2 = tcg_temp_new_i32();
463 tcg_gen_movi_i32(t1, ctx->pc+2);
465 /* Make space for the static chain and return address. */
466 tcg_gen_subi_i32(t2, REG(1), 8);
467 tcg_gen_mov_i32(REG(1), t2);
468 tcg_gen_qemu_st32(t1, REG(1), ctx->memidx);
470 /* Push the current frame pointer. */
471 tcg_gen_subi_i32(t2, REG(1), 4);
472 tcg_gen_mov_i32(REG(1), t2);
473 tcg_gen_qemu_st32(REG(0), REG(1), ctx->memidx);
475 /* Set the pc and $fp. */
476 tcg_gen_mov_i32(REG(0), REG(1));
477 tcg_gen_mov_i32(cpu_pc, REG(fnreg));
478 tcg_temp_free_i32(t1);
479 tcg_temp_free_i32(t2);
481 ctx->bstate = BS_BRANCH;
484 case 0x1a: /* jmpa */
486 tcg_gen_movi_i32(cpu_pc, cpu_ldl_code(env, ctx->pc+2));
488 ctx->bstate = BS_BRANCH;
492 case 0x1b: /* ldi.b (immediate) */
494 int reg = (opcode >> 4) & 0xf;
495 int val = cpu_ldl_code(env, ctx->pc+2);
496 tcg_gen_movi_i32(REG(reg), val);
500 case 0x1c: /* ld.b (register indirect) */
502 int src = opcode & 0xf;
503 int dest = (opcode >> 4) & 0xf;
505 tcg_gen_qemu_ld8u(REG(dest), REG(src), ctx->memidx);
508 case 0x1d: /* lda.b */
510 int reg = (opcode >> 4) & 0xf;
512 TCGv ptr = tcg_temp_new_i32();
513 tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2));
514 tcg_gen_qemu_ld8u(REG(reg), ptr, ctx->memidx);
515 tcg_temp_free_i32(ptr);
520 case 0x1e: /* st.b */
522 int dest = (opcode >> 4) & 0xf;
523 int val = opcode & 0xf;
525 tcg_gen_qemu_st8(REG(val), REG(dest), ctx->memidx);
528 case 0x1f: /* sta.b */
530 int val = (opcode >> 4) & 0xf;
532 TCGv ptr = tcg_temp_new_i32();
533 tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2));
534 tcg_gen_qemu_st8(REG(val), ptr, ctx->memidx);
535 tcg_temp_free_i32(ptr);
540 case 0x20: /* ldi.s (immediate) */
542 int reg = (opcode >> 4) & 0xf;
543 int val = cpu_ldl_code(env, ctx->pc+2);
544 tcg_gen_movi_i32(REG(reg), val);
548 case 0x21: /* ld.s (register indirect) */
550 int src = opcode & 0xf;
551 int dest = (opcode >> 4) & 0xf;
553 tcg_gen_qemu_ld16u(REG(dest), REG(src), ctx->memidx);
556 case 0x22: /* lda.s */
558 int reg = (opcode >> 4) & 0xf;
560 TCGv ptr = tcg_temp_new_i32();
561 tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2));
562 tcg_gen_qemu_ld16u(REG(reg), ptr, ctx->memidx);
563 tcg_temp_free_i32(ptr);
568 case 0x23: /* st.s */
570 int dest = (opcode >> 4) & 0xf;
571 int val = opcode & 0xf;
573 tcg_gen_qemu_st16(REG(val), REG(dest), ctx->memidx);
576 case 0x24: /* sta.s */
578 int val = (opcode >> 4) & 0xf;
580 TCGv ptr = tcg_temp_new_i32();
581 tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2));
582 tcg_gen_qemu_st16(REG(val), ptr, ctx->memidx);
583 tcg_temp_free_i32(ptr);
590 int reg = (opcode >> 4) & 0xf;
591 tcg_gen_mov_i32(cpu_pc, REG(reg));
593 ctx->bstate = BS_BRANCH;
598 int a = (opcode >> 4) & 0xf;
599 int b = opcode & 0xf;
601 tcg_gen_and_i32(REG(a), REG(a), REG(b));
604 case 0x27: /* lshr */
606 int a = (opcode >> 4) & 0xf;
607 int b = opcode & 0xf;
609 TCGv sv = tcg_temp_new_i32();
610 tcg_gen_andi_i32(sv, REG(b), 0x1f);
611 tcg_gen_shr_i32(REG(a), REG(a), sv);
612 tcg_temp_free_i32(sv);
615 case 0x28: /* ashl */
617 int a = (opcode >> 4) & 0xf;
618 int b = opcode & 0xf;
620 TCGv sv = tcg_temp_new_i32();
621 tcg_gen_andi_i32(sv, REG(b), 0x1f);
622 tcg_gen_shl_i32(REG(a), REG(a), sv);
623 tcg_temp_free_i32(sv);
626 case 0x29: /* sub.l */
628 int a = (opcode >> 4) & 0xf;
629 int b = opcode & 0xf;
631 tcg_gen_sub_i32(REG(a), REG(a), REG(b));
636 int a = (opcode >> 4) & 0xf;
637 int b = opcode & 0xf;
639 tcg_gen_neg_i32(REG(a), REG(b));
644 int a = (opcode >> 4) & 0xf;
645 int b = opcode & 0xf;
647 tcg_gen_or_i32(REG(a), REG(a), REG(b));
652 int a = (opcode >> 4) & 0xf;
653 int b = opcode & 0xf;
655 tcg_gen_not_i32(REG(a), REG(b));
658 case 0x2d: /* ashr */
660 int a = (opcode >> 4) & 0xf;
661 int b = opcode & 0xf;
663 TCGv sv = tcg_temp_new_i32();
664 tcg_gen_andi_i32(sv, REG(b), 0x1f);
665 tcg_gen_sar_i32(REG(a), REG(a), sv);
666 tcg_temp_free_i32(sv);
671 int a = (opcode >> 4) & 0xf;
672 int b = opcode & 0xf;
674 tcg_gen_xor_i32(REG(a), REG(a), REG(b));
677 case 0x2f: /* mul.l */
679 int a = (opcode >> 4) & 0xf;
680 int b = opcode & 0xf;
682 tcg_gen_mul_i32(REG(a), REG(a), REG(b));
687 int val = cpu_ldl_code(env, ctx->pc+2);
689 TCGv temp = tcg_temp_new_i32();
690 tcg_gen_movi_i32(temp, val);
691 tcg_gen_st_i32(temp, cpu_env,
692 offsetof(CPUMoxieState, sregs[3]));
693 tcg_gen_movi_i32(cpu_pc, ctx->pc);
694 tcg_gen_movi_i32(temp, MOXIE_EX_SWI);
695 gen_helper_raise_exception(cpu_env, temp);
696 tcg_temp_free_i32(temp);
701 case 0x31: /* div.l */
703 int a = (opcode >> 4) & 0xf;
704 int b = opcode & 0xf;
705 tcg_gen_movi_i32(cpu_pc, ctx->pc);
706 gen_helper_div(REG(a), cpu_env, REG(a), REG(b));
709 case 0x32: /* udiv.l */
711 int a = (opcode >> 4) & 0xf;
712 int b = opcode & 0xf;
713 tcg_gen_movi_i32(cpu_pc, ctx->pc);
714 gen_helper_udiv(REG(a), cpu_env, REG(a), REG(b));
717 case 0x33: /* mod.l */
719 int a = (opcode >> 4) & 0xf;
720 int b = opcode & 0xf;
721 tcg_gen_rem_i32(REG(a), REG(a), REG(b));
724 case 0x34: /* umod.l */
726 int a = (opcode >> 4) & 0xf;
727 int b = opcode & 0xf;
728 tcg_gen_remu_i32(REG(a), REG(a), REG(b));
733 TCGv temp = tcg_temp_new_i32();
734 tcg_gen_movi_i32(cpu_pc, ctx->pc);
735 tcg_gen_movi_i32(temp, MOXIE_EX_BREAK);
736 gen_helper_raise_exception(cpu_env, temp);
737 tcg_temp_free_i32(temp);
740 case 0x36: /* ldo.b */
742 int a = (opcode >> 4) & 0xf;
743 int b = opcode & 0xf;
745 TCGv t1 = tcg_temp_new_i32();
746 TCGv t2 = tcg_temp_new_i32();
747 tcg_gen_addi_i32(t1, REG(b), cpu_ldl_code(env, ctx->pc+2));
748 tcg_gen_qemu_ld8u(t2, t1, ctx->memidx);
749 tcg_gen_mov_i32(REG(a), t2);
751 tcg_temp_free_i32(t1);
752 tcg_temp_free_i32(t2);
757 case 0x37: /* sto.b */
759 int a = (opcode >> 4) & 0xf;
760 int b = opcode & 0xf;
762 TCGv t1 = tcg_temp_new_i32();
763 TCGv t2 = tcg_temp_new_i32();
764 tcg_gen_addi_i32(t1, REG(a), cpu_ldl_code(env, ctx->pc+2));
765 tcg_gen_qemu_st8(REG(b), t1, ctx->memidx);
767 tcg_temp_free_i32(t1);
768 tcg_temp_free_i32(t2);
773 case 0x38: /* ldo.s */
775 int a = (opcode >> 4) & 0xf;
776 int b = opcode & 0xf;
778 TCGv t1 = tcg_temp_new_i32();
779 TCGv t2 = tcg_temp_new_i32();
780 tcg_gen_addi_i32(t1, REG(b), cpu_ldl_code(env, ctx->pc+2));
781 tcg_gen_qemu_ld16u(t2, t1, ctx->memidx);
782 tcg_gen_mov_i32(REG(a), t2);
784 tcg_temp_free_i32(t1);
785 tcg_temp_free_i32(t2);
790 case 0x39: /* sto.s */
792 int a = (opcode >> 4) & 0xf;
793 int b = opcode & 0xf;
795 TCGv t1 = tcg_temp_new_i32();
796 TCGv t2 = tcg_temp_new_i32();
797 tcg_gen_addi_i32(t1, REG(a), cpu_ldl_code(env, ctx->pc+2));
798 tcg_gen_qemu_st16(REG(b), t1, ctx->memidx);
799 tcg_temp_free_i32(t1);
800 tcg_temp_free_i32(t2);
807 TCGv temp = tcg_temp_new_i32();
808 tcg_gen_movi_i32(cpu_pc, ctx->pc);
809 tcg_gen_movi_i32(temp, MOXIE_EX_BAD);
810 gen_helper_raise_exception(cpu_env, temp);
811 tcg_temp_free_i32(temp);
820 /* generate intermediate code for basic block 'tb'. */
822 gen_intermediate_code_internal(MoxieCPU *cpu, TranslationBlock *tb,
826 target_ulong pc_start;
827 uint16_t *gen_opc_end;
830 CPUMoxieState *env = &cpu->env;
834 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
839 ctx.singlestep_enabled = 0;
840 ctx.bstate = BS_NONE;
845 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
846 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
847 if (ctx.pc == bp->pc) {
848 tcg_gen_movi_i32(cpu_pc, ctx.pc);
849 gen_helper_debug(cpu_env);
850 ctx.bstate = BS_EXCP;
851 goto done_generating;
857 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
861 tcg_ctx.gen_opc_instr_start[lj++] = 0;
864 tcg_ctx.gen_opc_pc[lj] = ctx.pc;
865 tcg_ctx.gen_opc_instr_start[lj] = 1;
866 tcg_ctx.gen_opc_icount[lj] = num_insns;
868 ctx.opcode = cpu_lduw_code(env, ctx.pc);
869 ctx.pc += decode_opc(cpu, &ctx);
872 if (env->singlestep_enabled) {
876 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0) {
879 } while (ctx.bstate == BS_NONE && tcg_ctx.gen_opc_ptr < gen_opc_end);
881 if (env->singlestep_enabled) {
882 tcg_gen_movi_tl(cpu_pc, ctx.pc);
883 gen_helper_debug(cpu_env);
885 switch (ctx.bstate) {
888 gen_goto_tb(env, &ctx, 0, ctx.pc);
899 gen_tb_end(tb, num_insns);
900 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
902 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
905 tcg_ctx.gen_opc_instr_start[lj++] = 0;
908 tb->size = ctx.pc - pc_start;
909 tb->icount = num_insns;
913 void gen_intermediate_code(CPUMoxieState *env, struct TranslationBlock *tb)
915 gen_intermediate_code_internal(moxie_env_get_cpu(env), tb, false);
918 void gen_intermediate_code_pc(CPUMoxieState *env, struct TranslationBlock *tb)
920 gen_intermediate_code_internal(moxie_env_get_cpu(env), tb, true);
923 void restore_state_to_opc(CPUMoxieState *env, TranslationBlock *tb, int pc_pos)
925 env->pc = tcg_ctx.gen_opc_pc[pc_pos];