4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
36 typedef struct DisasContext {
37 struct TranslationBlock *tb;
40 uint32_t tbflags; /* should stay unmodified during the TB translation */
41 uint32_t envflags; /* should stay in sync with env->flags using TCG ops */
47 int singlestep_enabled;
52 #if defined(CONFIG_USER_ONLY)
53 #define IS_USER(ctx) 1
55 #define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
59 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
62 BS_STOP = 1, /* We want to stop translation for any reason */
63 BS_BRANCH = 2, /* We reached a branch condition */
64 BS_EXCP = 3, /* We reached an exception condition */
67 /* global register indexes */
68 static TCGv cpu_gregs[32];
69 static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
70 static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
71 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
72 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
73 static TCGv cpu_fregs[32];
75 /* internal register indexes */
76 static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
78 #include "exec/gen-icount.h"
80 void sh4_translate_init(void)
83 static const char * const gregnames[24] = {
84 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
85 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
86 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
87 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
88 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
90 static const char * const fregnames[32] = {
91 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
92 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
93 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
94 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
95 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
96 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
97 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
98 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
101 for (i = 0; i < 24; i++) {
102 cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
103 offsetof(CPUSH4State, gregs[i]),
106 memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv));
108 cpu_pc = tcg_global_mem_new_i32(cpu_env,
109 offsetof(CPUSH4State, pc), "PC");
110 cpu_sr = tcg_global_mem_new_i32(cpu_env,
111 offsetof(CPUSH4State, sr), "SR");
112 cpu_sr_m = tcg_global_mem_new_i32(cpu_env,
113 offsetof(CPUSH4State, sr_m), "SR_M");
114 cpu_sr_q = tcg_global_mem_new_i32(cpu_env,
115 offsetof(CPUSH4State, sr_q), "SR_Q");
116 cpu_sr_t = tcg_global_mem_new_i32(cpu_env,
117 offsetof(CPUSH4State, sr_t), "SR_T");
118 cpu_ssr = tcg_global_mem_new_i32(cpu_env,
119 offsetof(CPUSH4State, ssr), "SSR");
120 cpu_spc = tcg_global_mem_new_i32(cpu_env,
121 offsetof(CPUSH4State, spc), "SPC");
122 cpu_gbr = tcg_global_mem_new_i32(cpu_env,
123 offsetof(CPUSH4State, gbr), "GBR");
124 cpu_vbr = tcg_global_mem_new_i32(cpu_env,
125 offsetof(CPUSH4State, vbr), "VBR");
126 cpu_sgr = tcg_global_mem_new_i32(cpu_env,
127 offsetof(CPUSH4State, sgr), "SGR");
128 cpu_dbr = tcg_global_mem_new_i32(cpu_env,
129 offsetof(CPUSH4State, dbr), "DBR");
130 cpu_mach = tcg_global_mem_new_i32(cpu_env,
131 offsetof(CPUSH4State, mach), "MACH");
132 cpu_macl = tcg_global_mem_new_i32(cpu_env,
133 offsetof(CPUSH4State, macl), "MACL");
134 cpu_pr = tcg_global_mem_new_i32(cpu_env,
135 offsetof(CPUSH4State, pr), "PR");
136 cpu_fpscr = tcg_global_mem_new_i32(cpu_env,
137 offsetof(CPUSH4State, fpscr), "FPSCR");
138 cpu_fpul = tcg_global_mem_new_i32(cpu_env,
139 offsetof(CPUSH4State, fpul), "FPUL");
141 cpu_flags = tcg_global_mem_new_i32(cpu_env,
142 offsetof(CPUSH4State, flags), "_flags_");
143 cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env,
144 offsetof(CPUSH4State, delayed_pc),
146 cpu_delayed_cond = tcg_global_mem_new_i32(cpu_env,
147 offsetof(CPUSH4State,
150 cpu_ldst = tcg_global_mem_new_i32(cpu_env,
151 offsetof(CPUSH4State, ldst), "_ldst_");
153 for (i = 0; i < 32; i++)
154 cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
155 offsetof(CPUSH4State, fregs[i]),
159 void superh_cpu_dump_state(CPUState *cs, FILE *f,
160 fprintf_function cpu_fprintf, int flags)
162 SuperHCPU *cpu = SUPERH_CPU(cs);
163 CPUSH4State *env = &cpu->env;
165 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
166 env->pc, cpu_read_sr(env), env->pr, env->fpscr);
167 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
168 env->spc, env->ssr, env->gbr, env->vbr);
169 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
170 env->sgr, env->dbr, env->delayed_pc, env->fpul);
171 for (i = 0; i < 24; i += 4) {
172 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
173 i, env->gregs[i], i + 1, env->gregs[i + 1],
174 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
176 if (env->flags & DELAY_SLOT) {
177 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
179 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
180 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
182 } else if (env->flags & DELAY_SLOT_RTE) {
183 cpu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
188 static void gen_read_sr(TCGv dst)
190 TCGv t0 = tcg_temp_new();
191 tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
192 tcg_gen_or_i32(dst, dst, t0);
193 tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
194 tcg_gen_or_i32(dst, dst, t0);
195 tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
196 tcg_gen_or_i32(dst, cpu_sr, t0);
197 tcg_temp_free_i32(t0);
200 static void gen_write_sr(TCGv src)
202 tcg_gen_andi_i32(cpu_sr, src,
203 ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
204 tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1);
205 tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1);
206 tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1);
209 static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
212 tcg_gen_movi_i32(cpu_pc, ctx->pc);
214 if (ctx->delayed_pc != (uint32_t) -1) {
215 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
217 if ((ctx->tbflags & TB_FLAG_ENVFLAGS_MASK) != ctx->envflags) {
218 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
222 static inline bool use_exit_tb(DisasContext *ctx)
224 return (ctx->tbflags & GUSA_EXCLUSIVE) != 0;
227 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
229 /* Use a direct jump if in same page and singlestep not enabled */
230 if (unlikely(ctx->singlestep_enabled || use_exit_tb(ctx))) {
233 #ifndef CONFIG_USER_ONLY
234 return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
240 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
242 if (use_goto_tb(ctx, dest)) {
244 tcg_gen_movi_i32(cpu_pc, dest);
245 tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
247 tcg_gen_movi_i32(cpu_pc, dest);
248 if (ctx->singlestep_enabled) {
249 gen_helper_debug(cpu_env);
250 } else if (use_exit_tb(ctx)) {
253 tcg_gen_lookup_and_goto_ptr();
258 static void gen_jump(DisasContext * ctx)
260 if (ctx->delayed_pc == -1) {
261 /* Target is not statically known, it comes necessarily from a
262 delayed jump as immediate jump are conditinal jumps */
263 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
264 tcg_gen_discard_i32(cpu_delayed_pc);
265 if (ctx->singlestep_enabled) {
266 gen_helper_debug(cpu_env);
267 } else if (use_exit_tb(ctx)) {
270 tcg_gen_lookup_and_goto_ptr();
273 gen_goto_tb(ctx, 0, ctx->delayed_pc);
277 /* Immediate conditional jump (bt or bf) */
278 static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
281 TCGLabel *l1 = gen_new_label();
282 TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE;
284 if (ctx->tbflags & GUSA_EXCLUSIVE) {
285 /* When in an exclusive region, we must continue to the end.
286 Therefore, exit the region on a taken branch, but otherwise
287 fall through to the next instruction. */
288 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
289 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
290 /* Note that this won't actually use a goto_tb opcode because we
291 disallow it in use_goto_tb, but it handles exit + singlestep. */
292 gen_goto_tb(ctx, 0, dest);
297 gen_save_cpu_state(ctx, false);
298 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
299 gen_goto_tb(ctx, 0, dest);
301 gen_goto_tb(ctx, 1, ctx->pc + 2);
302 ctx->bstate = BS_BRANCH;
305 /* Delayed conditional jump (bt or bf) */
306 static void gen_delayed_conditional_jump(DisasContext * ctx)
308 TCGLabel *l1 = gen_new_label();
309 TCGv ds = tcg_temp_new();
311 tcg_gen_mov_i32(ds, cpu_delayed_cond);
312 tcg_gen_discard_i32(cpu_delayed_cond);
314 if (ctx->tbflags & GUSA_EXCLUSIVE) {
315 /* When in an exclusive region, we must continue to the end.
316 Therefore, exit the region on a taken branch, but otherwise
317 fall through to the next instruction. */
318 tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1);
320 /* Leave the gUSA region. */
321 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
328 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
329 gen_goto_tb(ctx, 1, ctx->pc + 2);
334 static inline void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
336 /* We have already signaled illegal instruction for odd Dr. */
337 tcg_debug_assert((reg & 1) == 0);
339 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
342 static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
344 /* We have already signaled illegal instruction for odd Dr. */
345 tcg_debug_assert((reg & 1) == 0);
347 tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t);
350 #define B3_0 (ctx->opcode & 0xf)
351 #define B6_4 ((ctx->opcode >> 4) & 0x7)
352 #define B7_4 ((ctx->opcode >> 4) & 0xf)
353 #define B7_0 (ctx->opcode & 0xff)
354 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
355 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
356 (ctx->opcode & 0xfff))
357 #define B11_8 ((ctx->opcode >> 8) & 0xf)
358 #define B15_12 ((ctx->opcode >> 12) & 0xf)
360 #define REG(x) cpu_gregs[(x) ^ ctx->gbank]
361 #define ALTREG(x) cpu_gregs[(x) ^ ctx->gbank ^ 0x10]
362 #define FREG(x) cpu_fregs[(x) ^ ctx->fbank]
364 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
366 #define CHECK_NOT_DELAY_SLOT \
367 if (ctx->envflags & DELAY_SLOT_MASK) { \
368 goto do_illegal_slot; \
371 #define CHECK_PRIVILEGED \
372 if (IS_USER(ctx)) { \
376 #define CHECK_FPU_ENABLED \
377 if (ctx->tbflags & (1u << SR_FD)) { \
378 goto do_fpu_disabled; \
381 #define CHECK_FPSCR_PR_0 \
382 if (ctx->tbflags & FPSCR_PR) { \
386 #define CHECK_FPSCR_PR_1 \
387 if (!(ctx->tbflags & FPSCR_PR)) { \
392 if (!(ctx->features & SH_FEATURE_SH4A)) { \
396 static void _decode_opc(DisasContext * ctx)
398 /* This code tries to make movcal emulation sufficiently
399 accurate for Linux purposes. This instruction writes
400 memory, and prior to that, always allocates a cache line.
401 It is used in two contexts:
402 - in memcpy, where data is copied in blocks, the first write
403 of to a block uses movca.l for performance.
404 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
405 to flush the cache. Here, the data written by movcal.l is never
406 written to memory, and the data written is just bogus.
408 To simulate this, we simulate movcal.l, we store the value to memory,
409 but we also remember the previous content. If we see ocbi, we check
410 if movcal.l for that address was done previously. If so, the write should
411 not have hit the memory, so we restore the previous content.
412 When we see an instruction that is neither movca.l
413 nor ocbi, the previous content is discarded.
415 To optimize, we only try to flush stores when we're at the start of
416 TB, or if we already saw movca.l in this TB and did not flush stores
420 int opcode = ctx->opcode & 0xf0ff;
421 if (opcode != 0x0093 /* ocbi */
422 && opcode != 0x00c3 /* movca.l */)
424 gen_helper_discard_movcal_backup(cpu_env);
430 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
433 switch (ctx->opcode) {
434 case 0x0019: /* div0u */
435 tcg_gen_movi_i32(cpu_sr_m, 0);
436 tcg_gen_movi_i32(cpu_sr_q, 0);
437 tcg_gen_movi_i32(cpu_sr_t, 0);
439 case 0x000b: /* rts */
441 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
442 ctx->envflags |= DELAY_SLOT;
443 ctx->delayed_pc = (uint32_t) - 1;
445 case 0x0028: /* clrmac */
446 tcg_gen_movi_i32(cpu_mach, 0);
447 tcg_gen_movi_i32(cpu_macl, 0);
449 case 0x0048: /* clrs */
450 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
452 case 0x0008: /* clrt */
453 tcg_gen_movi_i32(cpu_sr_t, 0);
455 case 0x0038: /* ldtlb */
457 gen_helper_ldtlb(cpu_env);
459 case 0x002b: /* rte */
462 gen_write_sr(cpu_ssr);
463 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
464 ctx->envflags |= DELAY_SLOT_RTE;
465 ctx->delayed_pc = (uint32_t) - 1;
466 ctx->bstate = BS_STOP;
468 case 0x0058: /* sets */
469 tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
471 case 0x0018: /* sett */
472 tcg_gen_movi_i32(cpu_sr_t, 1);
474 case 0xfbfd: /* frchg */
476 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
477 ctx->bstate = BS_STOP;
479 case 0xf3fd: /* fschg */
481 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
482 ctx->bstate = BS_STOP;
484 case 0xf7fd: /* fpchg */
486 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_PR);
487 ctx->bstate = BS_STOP;
489 case 0x0009: /* nop */
491 case 0x001b: /* sleep */
493 tcg_gen_movi_i32(cpu_pc, ctx->pc + 2);
494 gen_helper_sleep(cpu_env);
498 switch (ctx->opcode & 0xf000) {
499 case 0x1000: /* mov.l Rm,@(disp,Rn) */
501 TCGv addr = tcg_temp_new();
502 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
503 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
507 case 0x5000: /* mov.l @(disp,Rm),Rn */
509 TCGv addr = tcg_temp_new();
510 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
511 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
515 case 0xe000: /* mov #imm,Rn */
516 #ifdef CONFIG_USER_ONLY
517 /* Detect the start of a gUSA region. If so, update envflags
518 and end the TB. This will allow us to see the end of the
519 region (stored in R0) in the next TB. */
520 if (B11_8 == 15 && B7_0s < 0 && (tb_cflags(ctx->tb) & CF_PARALLEL)) {
521 ctx->envflags = deposit32(ctx->envflags, GUSA_SHIFT, 8, B7_0s);
522 ctx->bstate = BS_STOP;
525 tcg_gen_movi_i32(REG(B11_8), B7_0s);
527 case 0x9000: /* mov.w @(disp,PC),Rn */
529 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
530 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
534 case 0xd000: /* mov.l @(disp,PC),Rn */
536 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
537 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
541 case 0x7000: /* add #imm,Rn */
542 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
544 case 0xa000: /* bra disp */
546 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
547 ctx->envflags |= DELAY_SLOT;
549 case 0xb000: /* bsr disp */
551 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
552 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
553 ctx->envflags |= DELAY_SLOT;
557 switch (ctx->opcode & 0xf00f) {
558 case 0x6003: /* mov Rm,Rn */
559 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
561 case 0x2000: /* mov.b Rm,@Rn */
562 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
564 case 0x2001: /* mov.w Rm,@Rn */
565 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
567 case 0x2002: /* mov.l Rm,@Rn */
568 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
570 case 0x6000: /* mov.b @Rm,Rn */
571 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
573 case 0x6001: /* mov.w @Rm,Rn */
574 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
576 case 0x6002: /* mov.l @Rm,Rn */
577 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
579 case 0x2004: /* mov.b Rm,@-Rn */
581 TCGv addr = tcg_temp_new();
582 tcg_gen_subi_i32(addr, REG(B11_8), 1);
583 /* might cause re-execution */
584 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
585 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
589 case 0x2005: /* mov.w Rm,@-Rn */
591 TCGv addr = tcg_temp_new();
592 tcg_gen_subi_i32(addr, REG(B11_8), 2);
593 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
594 tcg_gen_mov_i32(REG(B11_8), addr);
598 case 0x2006: /* mov.l Rm,@-Rn */
600 TCGv addr = tcg_temp_new();
601 tcg_gen_subi_i32(addr, REG(B11_8), 4);
602 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
603 tcg_gen_mov_i32(REG(B11_8), addr);
606 case 0x6004: /* mov.b @Rm+,Rn */
607 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
609 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
611 case 0x6005: /* mov.w @Rm+,Rn */
612 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
614 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
616 case 0x6006: /* mov.l @Rm+,Rn */
617 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
619 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
621 case 0x0004: /* mov.b Rm,@(R0,Rn) */
623 TCGv addr = tcg_temp_new();
624 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
625 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
629 case 0x0005: /* mov.w Rm,@(R0,Rn) */
631 TCGv addr = tcg_temp_new();
632 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
633 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
637 case 0x0006: /* mov.l Rm,@(R0,Rn) */
639 TCGv addr = tcg_temp_new();
640 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
641 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
645 case 0x000c: /* mov.b @(R0,Rm),Rn */
647 TCGv addr = tcg_temp_new();
648 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
649 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
653 case 0x000d: /* mov.w @(R0,Rm),Rn */
655 TCGv addr = tcg_temp_new();
656 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
657 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
661 case 0x000e: /* mov.l @(R0,Rm),Rn */
663 TCGv addr = tcg_temp_new();
664 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
665 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
669 case 0x6008: /* swap.b Rm,Rn */
671 TCGv low = tcg_temp_new();;
672 tcg_gen_ext16u_i32(low, REG(B7_4));
673 tcg_gen_bswap16_i32(low, low);
674 tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
678 case 0x6009: /* swap.w Rm,Rn */
679 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
681 case 0x200d: /* xtrct Rm,Rn */
684 high = tcg_temp_new();
685 tcg_gen_shli_i32(high, REG(B7_4), 16);
686 low = tcg_temp_new();
687 tcg_gen_shri_i32(low, REG(B11_8), 16);
688 tcg_gen_or_i32(REG(B11_8), high, low);
693 case 0x300c: /* add Rm,Rn */
694 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
696 case 0x300e: /* addc Rm,Rn */
699 t0 = tcg_const_tl(0);
701 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
702 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
703 REG(B11_8), t0, t1, cpu_sr_t);
708 case 0x300f: /* addv Rm,Rn */
712 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
714 tcg_gen_xor_i32(t1, t0, REG(B11_8));
716 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
717 tcg_gen_andc_i32(cpu_sr_t, t1, t2);
719 tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
721 tcg_gen_mov_i32(REG(B7_4), t0);
725 case 0x2009: /* and Rm,Rn */
726 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
728 case 0x3000: /* cmp/eq Rm,Rn */
729 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
731 case 0x3003: /* cmp/ge Rm,Rn */
732 tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
734 case 0x3007: /* cmp/gt Rm,Rn */
735 tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
737 case 0x3006: /* cmp/hi Rm,Rn */
738 tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
740 case 0x3002: /* cmp/hs Rm,Rn */
741 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
743 case 0x200c: /* cmp/str Rm,Rn */
745 TCGv cmp1 = tcg_temp_new();
746 TCGv cmp2 = tcg_temp_new();
747 tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
748 tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
749 tcg_gen_andc_i32(cmp1, cmp1, cmp2);
750 tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
751 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
756 case 0x2007: /* div0s Rm,Rn */
757 tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */
758 tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */
759 tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */
761 case 0x3004: /* div1 Rm,Rn */
763 TCGv t0 = tcg_temp_new();
764 TCGv t1 = tcg_temp_new();
765 TCGv t2 = tcg_temp_new();
766 TCGv zero = tcg_const_i32(0);
768 /* shift left arg1, saving the bit being pushed out and inserting
770 tcg_gen_shri_i32(t0, REG(B11_8), 31);
771 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
772 tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
774 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
775 using 64-bit temps, we compute arg0's high part from q ^ m, so
776 that it is 0x00000000 when adding the value or 0xffffffff when
778 tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
779 tcg_gen_subi_i32(t1, t1, 1);
780 tcg_gen_neg_i32(t2, REG(B7_4));
781 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
782 tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
784 /* compute T and Q depending on carry */
785 tcg_gen_andi_i32(t1, t1, 1);
786 tcg_gen_xor_i32(t1, t1, t0);
787 tcg_gen_xori_i32(cpu_sr_t, t1, 1);
788 tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
796 case 0x300d: /* dmuls.l Rm,Rn */
797 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
799 case 0x3005: /* dmulu.l Rm,Rn */
800 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
802 case 0x600e: /* exts.b Rm,Rn */
803 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
805 case 0x600f: /* exts.w Rm,Rn */
806 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
808 case 0x600c: /* extu.b Rm,Rn */
809 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
811 case 0x600d: /* extu.w Rm,Rn */
812 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
814 case 0x000f: /* mac.l @Rm+,@Rn+ */
817 arg0 = tcg_temp_new();
818 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
819 arg1 = tcg_temp_new();
820 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
821 gen_helper_macl(cpu_env, arg0, arg1);
824 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
825 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
828 case 0x400f: /* mac.w @Rm+,@Rn+ */
831 arg0 = tcg_temp_new();
832 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
833 arg1 = tcg_temp_new();
834 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
835 gen_helper_macw(cpu_env, arg0, arg1);
838 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
839 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
842 case 0x0007: /* mul.l Rm,Rn */
843 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
845 case 0x200f: /* muls.w Rm,Rn */
848 arg0 = tcg_temp_new();
849 tcg_gen_ext16s_i32(arg0, REG(B7_4));
850 arg1 = tcg_temp_new();
851 tcg_gen_ext16s_i32(arg1, REG(B11_8));
852 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
857 case 0x200e: /* mulu.w Rm,Rn */
860 arg0 = tcg_temp_new();
861 tcg_gen_ext16u_i32(arg0, REG(B7_4));
862 arg1 = tcg_temp_new();
863 tcg_gen_ext16u_i32(arg1, REG(B11_8));
864 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
869 case 0x600b: /* neg Rm,Rn */
870 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
872 case 0x600a: /* negc Rm,Rn */
874 TCGv t0 = tcg_const_i32(0);
875 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
876 REG(B7_4), t0, cpu_sr_t, t0);
877 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
878 t0, t0, REG(B11_8), cpu_sr_t);
879 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
883 case 0x6007: /* not Rm,Rn */
884 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
886 case 0x200b: /* or Rm,Rn */
887 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
889 case 0x400c: /* shad Rm,Rn */
891 TCGv t0 = tcg_temp_new();
892 TCGv t1 = tcg_temp_new();
893 TCGv t2 = tcg_temp_new();
895 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
897 /* positive case: shift to the left */
898 tcg_gen_shl_i32(t1, REG(B11_8), t0);
900 /* negative case: shift to the right in two steps to
901 correctly handle the -32 case */
902 tcg_gen_xori_i32(t0, t0, 0x1f);
903 tcg_gen_sar_i32(t2, REG(B11_8), t0);
904 tcg_gen_sari_i32(t2, t2, 1);
906 /* select between the two cases */
907 tcg_gen_movi_i32(t0, 0);
908 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
915 case 0x400d: /* shld Rm,Rn */
917 TCGv t0 = tcg_temp_new();
918 TCGv t1 = tcg_temp_new();
919 TCGv t2 = tcg_temp_new();
921 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
923 /* positive case: shift to the left */
924 tcg_gen_shl_i32(t1, REG(B11_8), t0);
926 /* negative case: shift to the right in two steps to
927 correctly handle the -32 case */
928 tcg_gen_xori_i32(t0, t0, 0x1f);
929 tcg_gen_shr_i32(t2, REG(B11_8), t0);
930 tcg_gen_shri_i32(t2, t2, 1);
932 /* select between the two cases */
933 tcg_gen_movi_i32(t0, 0);
934 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
941 case 0x3008: /* sub Rm,Rn */
942 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
944 case 0x300a: /* subc Rm,Rn */
947 t0 = tcg_const_tl(0);
949 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
950 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
951 REG(B11_8), t0, t1, cpu_sr_t);
952 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
957 case 0x300b: /* subv Rm,Rn */
961 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
963 tcg_gen_xor_i32(t1, t0, REG(B7_4));
965 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
966 tcg_gen_and_i32(t1, t1, t2);
968 tcg_gen_shri_i32(cpu_sr_t, t1, 31);
970 tcg_gen_mov_i32(REG(B11_8), t0);
974 case 0x2008: /* tst Rm,Rn */
976 TCGv val = tcg_temp_new();
977 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
978 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
982 case 0x200a: /* xor Rm,Rn */
983 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
985 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
987 if (ctx->tbflags & FPSCR_SZ) {
988 int xsrc = XHACK(B7_4);
989 int xdst = XHACK(B11_8);
990 tcg_gen_mov_i32(FREG(xdst), FREG(xsrc));
991 tcg_gen_mov_i32(FREG(xdst + 1), FREG(xsrc + 1));
993 tcg_gen_mov_i32(FREG(B11_8), FREG(B7_4));
996 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
998 if (ctx->tbflags & FPSCR_SZ) {
999 TCGv_i64 fp = tcg_temp_new_i64();
1000 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1001 tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, MO_TEQ);
1002 tcg_temp_free_i64(fp);
1004 tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
1007 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1009 if (ctx->tbflags & FPSCR_SZ) {
1010 TCGv_i64 fp = tcg_temp_new_i64();
1011 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
1012 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1013 tcg_temp_free_i64(fp);
1015 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
1018 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1020 if (ctx->tbflags & FPSCR_SZ) {
1021 TCGv_i64 fp = tcg_temp_new_i64();
1022 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
1023 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1024 tcg_temp_free_i64(fp);
1025 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1027 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
1028 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1031 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1034 TCGv addr = tcg_temp_new_i32();
1035 if (ctx->tbflags & FPSCR_SZ) {
1036 TCGv_i64 fp = tcg_temp_new_i64();
1037 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1038 tcg_gen_subi_i32(addr, REG(B11_8), 8);
1039 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
1040 tcg_temp_free_i64(fp);
1042 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1043 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
1045 tcg_gen_mov_i32(REG(B11_8), addr);
1046 tcg_temp_free(addr);
1049 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1052 TCGv addr = tcg_temp_new_i32();
1053 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1054 if (ctx->tbflags & FPSCR_SZ) {
1055 TCGv_i64 fp = tcg_temp_new_i64();
1056 tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, MO_TEQ);
1057 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1058 tcg_temp_free_i64(fp);
1060 tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx, MO_TEUL);
1062 tcg_temp_free(addr);
1065 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1068 TCGv addr = tcg_temp_new();
1069 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1070 if (ctx->tbflags & FPSCR_SZ) {
1071 TCGv_i64 fp = tcg_temp_new_i64();
1072 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1073 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
1074 tcg_temp_free_i64(fp);
1076 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
1078 tcg_temp_free(addr);
1081 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1082 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1083 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1084 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1085 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1086 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1089 if (ctx->tbflags & FPSCR_PR) {
1092 if (ctx->opcode & 0x0110) {
1095 fp0 = tcg_temp_new_i64();
1096 fp1 = tcg_temp_new_i64();
1097 gen_load_fpr64(ctx, fp0, B11_8);
1098 gen_load_fpr64(ctx, fp1, B7_4);
1099 switch (ctx->opcode & 0xf00f) {
1100 case 0xf000: /* fadd Rm,Rn */
1101 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1103 case 0xf001: /* fsub Rm,Rn */
1104 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1106 case 0xf002: /* fmul Rm,Rn */
1107 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1109 case 0xf003: /* fdiv Rm,Rn */
1110 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1112 case 0xf004: /* fcmp/eq Rm,Rn */
1113 gen_helper_fcmp_eq_DT(cpu_sr_t, cpu_env, fp0, fp1);
1115 case 0xf005: /* fcmp/gt Rm,Rn */
1116 gen_helper_fcmp_gt_DT(cpu_sr_t, cpu_env, fp0, fp1);
1119 gen_store_fpr64(ctx, fp0, B11_8);
1120 tcg_temp_free_i64(fp0);
1121 tcg_temp_free_i64(fp1);
1123 switch (ctx->opcode & 0xf00f) {
1124 case 0xf000: /* fadd Rm,Rn */
1125 gen_helper_fadd_FT(FREG(B11_8), cpu_env,
1126 FREG(B11_8), FREG(B7_4));
1128 case 0xf001: /* fsub Rm,Rn */
1129 gen_helper_fsub_FT(FREG(B11_8), cpu_env,
1130 FREG(B11_8), FREG(B7_4));
1132 case 0xf002: /* fmul Rm,Rn */
1133 gen_helper_fmul_FT(FREG(B11_8), cpu_env,
1134 FREG(B11_8), FREG(B7_4));
1136 case 0xf003: /* fdiv Rm,Rn */
1137 gen_helper_fdiv_FT(FREG(B11_8), cpu_env,
1138 FREG(B11_8), FREG(B7_4));
1140 case 0xf004: /* fcmp/eq Rm,Rn */
1141 gen_helper_fcmp_eq_FT(cpu_sr_t, cpu_env,
1142 FREG(B11_8), FREG(B7_4));
1144 case 0xf005: /* fcmp/gt Rm,Rn */
1145 gen_helper_fcmp_gt_FT(cpu_sr_t, cpu_env,
1146 FREG(B11_8), FREG(B7_4));
1152 case 0xf00e: /* fmac FR0,RM,Rn */
1155 gen_helper_fmac_FT(FREG(B11_8), cpu_env,
1156 FREG(0), FREG(B7_4), FREG(B11_8));
1160 switch (ctx->opcode & 0xff00) {
1161 case 0xc900: /* and #imm,R0 */
1162 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1164 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1167 addr = tcg_temp_new();
1168 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1169 val = tcg_temp_new();
1170 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1171 tcg_gen_andi_i32(val, val, B7_0);
1172 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1174 tcg_temp_free(addr);
1177 case 0x8b00: /* bf label */
1178 CHECK_NOT_DELAY_SLOT
1179 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2, false);
1181 case 0x8f00: /* bf/s label */
1182 CHECK_NOT_DELAY_SLOT
1183 tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
1184 ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2;
1185 ctx->envflags |= DELAY_SLOT_CONDITIONAL;
1187 case 0x8900: /* bt label */
1188 CHECK_NOT_DELAY_SLOT
1189 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2, true);
1191 case 0x8d00: /* bt/s label */
1192 CHECK_NOT_DELAY_SLOT
1193 tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
1194 ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2;
1195 ctx->envflags |= DELAY_SLOT_CONDITIONAL;
1197 case 0x8800: /* cmp/eq #imm,R0 */
1198 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
1200 case 0xc400: /* mov.b @(disp,GBR),R0 */
1202 TCGv addr = tcg_temp_new();
1203 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1204 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1205 tcg_temp_free(addr);
1208 case 0xc500: /* mov.w @(disp,GBR),R0 */
1210 TCGv addr = tcg_temp_new();
1211 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1212 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1213 tcg_temp_free(addr);
1216 case 0xc600: /* mov.l @(disp,GBR),R0 */
1218 TCGv addr = tcg_temp_new();
1219 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1220 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
1221 tcg_temp_free(addr);
1224 case 0xc000: /* mov.b R0,@(disp,GBR) */
1226 TCGv addr = tcg_temp_new();
1227 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1228 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1229 tcg_temp_free(addr);
1232 case 0xc100: /* mov.w R0,@(disp,GBR) */
1234 TCGv addr = tcg_temp_new();
1235 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1236 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1237 tcg_temp_free(addr);
1240 case 0xc200: /* mov.l R0,@(disp,GBR) */
1242 TCGv addr = tcg_temp_new();
1243 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1244 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
1245 tcg_temp_free(addr);
1248 case 0x8000: /* mov.b R0,@(disp,Rn) */
1250 TCGv addr = tcg_temp_new();
1251 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1252 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1253 tcg_temp_free(addr);
1256 case 0x8100: /* mov.w R0,@(disp,Rn) */
1258 TCGv addr = tcg_temp_new();
1259 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1260 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1261 tcg_temp_free(addr);
1264 case 0x8400: /* mov.b @(disp,Rn),R0 */
1266 TCGv addr = tcg_temp_new();
1267 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1268 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1269 tcg_temp_free(addr);
1272 case 0x8500: /* mov.w @(disp,Rn),R0 */
1274 TCGv addr = tcg_temp_new();
1275 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1276 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1277 tcg_temp_free(addr);
1280 case 0xc700: /* mova @(disp,PC),R0 */
1281 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1283 case 0xcb00: /* or #imm,R0 */
1284 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1286 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1289 addr = tcg_temp_new();
1290 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1291 val = tcg_temp_new();
1292 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1293 tcg_gen_ori_i32(val, val, B7_0);
1294 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1296 tcg_temp_free(addr);
1299 case 0xc300: /* trapa #imm */
1302 CHECK_NOT_DELAY_SLOT
1303 gen_save_cpu_state(ctx, true);
1304 imm = tcg_const_i32(B7_0);
1305 gen_helper_trapa(cpu_env, imm);
1307 ctx->bstate = BS_EXCP;
1310 case 0xc800: /* tst #imm,R0 */
1312 TCGv val = tcg_temp_new();
1313 tcg_gen_andi_i32(val, REG(0), B7_0);
1314 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1318 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1320 TCGv val = tcg_temp_new();
1321 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1322 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
1323 tcg_gen_andi_i32(val, val, B7_0);
1324 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1328 case 0xca00: /* xor #imm,R0 */
1329 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1331 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1334 addr = tcg_temp_new();
1335 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1336 val = tcg_temp_new();
1337 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1338 tcg_gen_xori_i32(val, val, B7_0);
1339 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1341 tcg_temp_free(addr);
1346 switch (ctx->opcode & 0xf08f) {
1347 case 0x408e: /* ldc Rm,Rn_BANK */
1349 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1351 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1353 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
1354 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1356 case 0x0082: /* stc Rm_BANK,Rn */
1358 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1360 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1363 TCGv addr = tcg_temp_new();
1364 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1365 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
1366 tcg_gen_mov_i32(REG(B11_8), addr);
1367 tcg_temp_free(addr);
1372 switch (ctx->opcode & 0xf0ff) {
1373 case 0x0023: /* braf Rn */
1374 CHECK_NOT_DELAY_SLOT
1375 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1376 ctx->envflags |= DELAY_SLOT;
1377 ctx->delayed_pc = (uint32_t) - 1;
1379 case 0x0003: /* bsrf Rn */
1380 CHECK_NOT_DELAY_SLOT
1381 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1382 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1383 ctx->envflags |= DELAY_SLOT;
1384 ctx->delayed_pc = (uint32_t) - 1;
1386 case 0x4015: /* cmp/pl Rn */
1387 tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
1389 case 0x4011: /* cmp/pz Rn */
1390 tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
1392 case 0x4010: /* dt Rn */
1393 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1394 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
1396 case 0x402b: /* jmp @Rn */
1397 CHECK_NOT_DELAY_SLOT
1398 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1399 ctx->envflags |= DELAY_SLOT;
1400 ctx->delayed_pc = (uint32_t) - 1;
1402 case 0x400b: /* jsr @Rn */
1403 CHECK_NOT_DELAY_SLOT
1404 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1405 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1406 ctx->envflags |= DELAY_SLOT;
1407 ctx->delayed_pc = (uint32_t) - 1;
1409 case 0x400e: /* ldc Rm,SR */
1412 TCGv val = tcg_temp_new();
1413 tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1416 ctx->bstate = BS_STOP;
1419 case 0x4007: /* ldc.l @Rm+,SR */
1422 TCGv val = tcg_temp_new();
1423 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
1424 tcg_gen_andi_i32(val, val, 0x700083f3);
1427 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1428 ctx->bstate = BS_STOP;
1431 case 0x0002: /* stc SR,Rn */
1433 gen_read_sr(REG(B11_8));
1435 case 0x4003: /* stc SR,@-Rn */
1438 TCGv addr = tcg_temp_new();
1439 TCGv val = tcg_temp_new();
1440 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1442 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1443 tcg_gen_mov_i32(REG(B11_8), addr);
1445 tcg_temp_free(addr);
1448 #define LD(reg,ldnum,ldpnum,prechk) \
1451 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1455 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1456 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1458 #define ST(reg,stnum,stpnum,prechk) \
1461 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1466 TCGv addr = tcg_temp_new(); \
1467 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1468 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1469 tcg_gen_mov_i32(REG(B11_8), addr); \
1470 tcg_temp_free(addr); \
1473 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1474 LD(reg,ldnum,ldpnum,prechk) \
1475 ST(reg,stnum,stpnum,prechk)
1476 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1477 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1478 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1479 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1480 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1481 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A)
1482 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1483 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1484 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1485 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1486 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1487 case 0x406a: /* lds Rm,FPSCR */
1489 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1490 ctx->bstate = BS_STOP;
1492 case 0x4066: /* lds.l @Rm+,FPSCR */
1495 TCGv addr = tcg_temp_new();
1496 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
1497 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1498 gen_helper_ld_fpscr(cpu_env, addr);
1499 tcg_temp_free(addr);
1500 ctx->bstate = BS_STOP;
1503 case 0x006a: /* sts FPSCR,Rn */
1505 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1507 case 0x4062: /* sts FPSCR,@-Rn */
1511 val = tcg_temp_new();
1512 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1513 addr = tcg_temp_new();
1514 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1515 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1516 tcg_gen_mov_i32(REG(B11_8), addr);
1517 tcg_temp_free(addr);
1521 case 0x00c3: /* movca.l R0,@Rm */
1523 TCGv val = tcg_temp_new();
1524 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
1525 gen_helper_movcal(cpu_env, REG(B11_8), val);
1526 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1528 ctx->has_movcal = 1;
1530 case 0x40a9: /* movua.l @Rm,R0 */
1532 /* Load non-boundary-aligned data */
1533 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1534 MO_TEUL | MO_UNALN);
1537 case 0x40e9: /* movua.l @Rm+,R0 */
1539 /* Load non-boundary-aligned data */
1540 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1541 MO_TEUL | MO_UNALN);
1542 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1545 case 0x0029: /* movt Rn */
1546 tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
1551 If (T == 1) R0 -> (Rn)
1556 TCGLabel *label = gen_new_label();
1557 tcg_gen_mov_i32(cpu_sr_t, cpu_ldst);
1558 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1559 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1560 gen_set_label(label);
1561 tcg_gen_movi_i32(cpu_ldst, 0);
1568 When interrupt/exception
1572 tcg_gen_movi_i32(cpu_ldst, 0);
1573 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1574 tcg_gen_movi_i32(cpu_ldst, 1);
1576 case 0x0093: /* ocbi @Rn */
1578 gen_helper_ocbi(cpu_env, REG(B11_8));
1581 case 0x00a3: /* ocbp @Rn */
1582 case 0x00b3: /* ocbwb @Rn */
1583 /* These instructions are supposed to do nothing in case of
1584 a cache miss. Given that we only partially emulate caches
1585 it is safe to simply ignore them. */
1587 case 0x0083: /* pref @Rn */
1589 case 0x00d3: /* prefi @Rn */
1592 case 0x00e3: /* icbi @Rn */
1595 case 0x00ab: /* synco */
1597 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1600 case 0x4024: /* rotcl Rn */
1602 TCGv tmp = tcg_temp_new();
1603 tcg_gen_mov_i32(tmp, cpu_sr_t);
1604 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1605 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1606 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1610 case 0x4025: /* rotcr Rn */
1612 TCGv tmp = tcg_temp_new();
1613 tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1614 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1615 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1616 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1620 case 0x4004: /* rotl Rn */
1621 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1622 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1624 case 0x4005: /* rotr Rn */
1625 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1626 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1628 case 0x4000: /* shll Rn */
1629 case 0x4020: /* shal Rn */
1630 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1631 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1633 case 0x4021: /* shar Rn */
1634 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1635 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1637 case 0x4001: /* shlr Rn */
1638 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1639 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1641 case 0x4008: /* shll2 Rn */
1642 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1644 case 0x4018: /* shll8 Rn */
1645 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1647 case 0x4028: /* shll16 Rn */
1648 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1650 case 0x4009: /* shlr2 Rn */
1651 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1653 case 0x4019: /* shlr8 Rn */
1654 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1656 case 0x4029: /* shlr16 Rn */
1657 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1659 case 0x401b: /* tas.b @Rn */
1661 TCGv val = tcg_const_i32(0x80);
1662 tcg_gen_atomic_fetch_or_i32(val, REG(B11_8), val,
1663 ctx->memidx, MO_UB);
1664 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1668 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1670 tcg_gen_mov_i32(FREG(B11_8), cpu_fpul);
1672 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1674 tcg_gen_mov_i32(cpu_fpul, FREG(B11_8));
1676 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1678 if (ctx->tbflags & FPSCR_PR) {
1680 if (ctx->opcode & 0x0100) {
1683 fp = tcg_temp_new_i64();
1684 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1685 gen_store_fpr64(ctx, fp, B11_8);
1686 tcg_temp_free_i64(fp);
1689 gen_helper_float_FT(FREG(B11_8), cpu_env, cpu_fpul);
1692 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1694 if (ctx->tbflags & FPSCR_PR) {
1696 if (ctx->opcode & 0x0100) {
1699 fp = tcg_temp_new_i64();
1700 gen_load_fpr64(ctx, fp, B11_8);
1701 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1702 tcg_temp_free_i64(fp);
1705 gen_helper_ftrc_FT(cpu_fpul, cpu_env, FREG(B11_8));
1708 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1710 tcg_gen_xori_i32(FREG(B11_8), FREG(B11_8), 0x80000000);
1712 case 0xf05d: /* fabs FRn/DRn - FPCSR: Nothing */
1714 tcg_gen_andi_i32(FREG(B11_8), FREG(B11_8), 0x7fffffff);
1716 case 0xf06d: /* fsqrt FRn */
1718 if (ctx->tbflags & FPSCR_PR) {
1719 if (ctx->opcode & 0x0100) {
1722 TCGv_i64 fp = tcg_temp_new_i64();
1723 gen_load_fpr64(ctx, fp, B11_8);
1724 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1725 gen_store_fpr64(ctx, fp, B11_8);
1726 tcg_temp_free_i64(fp);
1728 gen_helper_fsqrt_FT(FREG(B11_8), cpu_env, FREG(B11_8));
1731 case 0xf07d: /* fsrra FRn */
1734 gen_helper_fsrra_FT(FREG(B11_8), cpu_env, FREG(B11_8));
1736 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1739 tcg_gen_movi_i32(FREG(B11_8), 0);
1741 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1744 tcg_gen_movi_i32(FREG(B11_8), 0x3f800000);
1746 case 0xf0ad: /* fcnvsd FPUL,DRn */
1749 TCGv_i64 fp = tcg_temp_new_i64();
1750 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1751 gen_store_fpr64(ctx, fp, B11_8);
1752 tcg_temp_free_i64(fp);
1755 case 0xf0bd: /* fcnvds DRn,FPUL */
1758 TCGv_i64 fp = tcg_temp_new_i64();
1759 gen_load_fpr64(ctx, fp, B11_8);
1760 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1761 tcg_temp_free_i64(fp);
1764 case 0xf0ed: /* fipr FVm,FVn */
1768 TCGv m = tcg_const_i32((ctx->opcode >> 8) & 3);
1769 TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
1770 gen_helper_fipr(cpu_env, m, n);
1776 case 0xf0fd: /* ftrv XMTRX,FVn */
1780 if ((ctx->opcode & 0x0300) != 0x0100) {
1783 TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
1784 gen_helper_ftrv(cpu_env, n);
1791 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1792 ctx->opcode, ctx->pc);
1796 if (ctx->envflags & DELAY_SLOT_MASK) {
1798 gen_save_cpu_state(ctx, true);
1799 gen_helper_raise_slot_illegal_instruction(cpu_env);
1801 gen_save_cpu_state(ctx, true);
1802 gen_helper_raise_illegal_instruction(cpu_env);
1804 ctx->bstate = BS_EXCP;
1808 gen_save_cpu_state(ctx, true);
1809 if (ctx->envflags & DELAY_SLOT_MASK) {
1810 gen_helper_raise_slot_fpu_disable(cpu_env);
1812 gen_helper_raise_fpu_disable(cpu_env);
1814 ctx->bstate = BS_EXCP;
1818 static void decode_opc(DisasContext * ctx)
1820 uint32_t old_flags = ctx->envflags;
1824 if (old_flags & DELAY_SLOT_MASK) {
1825 /* go out of the delay slot */
1826 ctx->envflags &= ~DELAY_SLOT_MASK;
1828 /* When in an exclusive region, we must continue to the end
1829 for conditional branches. */
1830 if (ctx->tbflags & GUSA_EXCLUSIVE
1831 && old_flags & DELAY_SLOT_CONDITIONAL) {
1832 gen_delayed_conditional_jump(ctx);
1835 /* Otherwise this is probably an invalid gUSA region.
1836 Drop the GUSA bits so the next TB doesn't see them. */
1837 ctx->envflags &= ~GUSA_MASK;
1839 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
1840 ctx->bstate = BS_BRANCH;
1841 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1842 gen_delayed_conditional_jump(ctx);
1849 #ifdef CONFIG_USER_ONLY
1850 /* For uniprocessors, SH4 uses optimistic restartable atomic sequences.
1851 Upon an interrupt, a real kernel would simply notice magic values in
1852 the registers and reset the PC to the start of the sequence.
1854 For QEMU, we cannot do this in quite the same way. Instead, we notice
1855 the normal start of such a sequence (mov #-x,r15). While we can handle
1856 any sequence via cpu_exec_step_atomic, we can recognize the "normal"
1857 sequences and transform them into atomic operations as seen by the host.
1859 static int decode_gusa(DisasContext *ctx, CPUSH4State *env, int *pmax_insns)
1862 int ld_adr, ld_dst, ld_mop;
1863 int op_dst, op_src, op_opc;
1864 int mv_src, mt_dst, st_src, st_mop;
1867 uint32_t pc = ctx->pc;
1868 uint32_t pc_end = ctx->tb->cs_base;
1869 int backup = sextract32(ctx->tbflags, GUSA_SHIFT, 8);
1870 int max_insns = (pc_end - pc) / 2;
1873 if (pc != pc_end + backup || max_insns < 2) {
1874 /* This is a malformed gUSA region. Don't do anything special,
1875 since the interpreter is likely to get confused. */
1876 ctx->envflags &= ~GUSA_MASK;
1880 if (ctx->tbflags & GUSA_EXCLUSIVE) {
1881 /* Regardless of single-stepping or the end of the page,
1882 we must complete execution of the gUSA region while
1883 holding the exclusive lock. */
1884 *pmax_insns = max_insns;
1888 /* The state machine below will consume only a few insns.
1889 If there are more than that in a region, fail now. */
1890 if (max_insns > ARRAY_SIZE(insns)) {
1894 /* Read all of the insns for the region. */
1895 for (i = 0; i < max_insns; ++i) {
1896 insns[i] = cpu_lduw_code(env, pc + i * 2);
1899 ld_adr = ld_dst = ld_mop = -1;
1901 op_dst = op_src = op_opc = -1;
1903 st_src = st_mop = -1;
1904 TCGV_UNUSED(op_arg);
1908 do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0)
1911 * Expect a load to begin the region.
1914 switch (ctx->opcode & 0xf00f) {
1915 case 0x6000: /* mov.b @Rm,Rn */
1918 case 0x6001: /* mov.w @Rm,Rn */
1921 case 0x6002: /* mov.l @Rm,Rn */
1929 if (ld_adr == ld_dst) {
1932 /* Unless we see a mov, any two-operand operation must use ld_dst. */
1936 * Expect an optional register move.
1939 switch (ctx->opcode & 0xf00f) {
1940 case 0x6003: /* mov Rm,Rn */
1941 /* Here we want to recognize ld_dst being saved for later consumtion,
1942 or for another input register being copied so that ld_dst need not
1943 be clobbered during the operation. */
1946 if (op_dst == ld_dst) {
1947 /* Overwriting the load output. */
1950 if (mv_src != ld_dst) {
1951 /* Copying a new input; constrain op_src to match the load. */
1957 /* Put back and re-examine as operation. */
1962 * Expect the operation.
1965 switch (ctx->opcode & 0xf00f) {
1966 case 0x300c: /* add Rm,Rn */
1967 op_opc = INDEX_op_add_i32;
1969 case 0x2009: /* and Rm,Rn */
1970 op_opc = INDEX_op_and_i32;
1972 case 0x200a: /* xor Rm,Rn */
1973 op_opc = INDEX_op_xor_i32;
1975 case 0x200b: /* or Rm,Rn */
1976 op_opc = INDEX_op_or_i32;
1978 /* The operation register should be as expected, and the
1979 other input cannot depend on the load. */
1980 if (op_dst != B11_8) {
1984 /* Unconstrainted input. */
1986 } else if (op_src == B7_4) {
1987 /* Constrained input matched load. All operations are
1988 commutative; "swap" them by "moving" the load output
1989 to the (implicit) first argument and the move source
1990 to the (explicit) second argument. */
1995 op_arg = REG(op_src);
1998 case 0x6007: /* not Rm,Rn */
1999 if (ld_dst != B7_4 || mv_src >= 0) {
2003 op_opc = INDEX_op_xor_i32;
2004 op_arg = tcg_const_i32(-1);
2007 case 0x7000 ... 0x700f: /* add #imm,Rn */
2008 if (op_dst != B11_8 || mv_src >= 0) {
2011 op_opc = INDEX_op_add_i32;
2012 op_arg = tcg_const_i32(B7_0s);
2015 case 0x3000: /* cmp/eq Rm,Rn */
2016 /* Looking for the middle of a compare-and-swap sequence,
2017 beginning with the compare. Operands can be either order,
2018 but with only one overlapping the load. */
2019 if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) {
2022 op_opc = INDEX_op_setcond_i32; /* placeholder */
2023 op_src = (ld_dst == B11_8 ? B7_4 : B11_8);
2024 op_arg = REG(op_src);
2027 switch (ctx->opcode & 0xff00) {
2028 case 0x8b00: /* bf label */
2029 case 0x8f00: /* bf/s label */
2030 if (pc + (i + 1 + B7_0s) * 2 != pc_end) {
2033 if ((ctx->opcode & 0xff00) == 0x8b00) { /* bf label */
2036 /* We're looking to unconditionally modify Rn with the
2037 result of the comparison, within the delay slot of
2038 the branch. This is used by older gcc. */
2040 if ((ctx->opcode & 0xf0ff) == 0x0029) { /* movt Rn */
2052 case 0x2008: /* tst Rm,Rn */
2053 /* Looking for a compare-and-swap against zero. */
2054 if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) {
2057 op_opc = INDEX_op_setcond_i32;
2058 op_arg = tcg_const_i32(0);
2061 if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */
2062 || pc + (i + 1 + B7_0s) * 2 != pc_end) {
2068 /* Put back and re-examine as store. */
2075 /* The store must be the last insn. */
2076 if (i != max_insns - 1) {
2080 switch (ctx->opcode & 0xf00f) {
2081 case 0x2000: /* mov.b Rm,@Rn */
2084 case 0x2001: /* mov.w Rm,@Rn */
2087 case 0x2002: /* mov.l Rm,@Rn */
2093 /* The store must match the load. */
2094 if (ld_adr != B11_8 || st_mop != (ld_mop & MO_SIZE)) {
2102 * Emit the operation.
2104 tcg_gen_insn_start(pc, ctx->envflags);
2107 /* No operation found. Look for exchange pattern. */
2108 if (st_src == ld_dst || mv_src >= 0) {
2111 tcg_gen_atomic_xchg_i32(REG(ld_dst), REG(ld_adr), REG(st_src),
2112 ctx->memidx, ld_mop);
2115 case INDEX_op_add_i32:
2116 if (op_dst != st_src) {
2119 if (op_dst == ld_dst && st_mop == MO_UL) {
2120 tcg_gen_atomic_add_fetch_i32(REG(ld_dst), REG(ld_adr),
2121 op_arg, ctx->memidx, ld_mop);
2123 tcg_gen_atomic_fetch_add_i32(REG(ld_dst), REG(ld_adr),
2124 op_arg, ctx->memidx, ld_mop);
2125 if (op_dst != ld_dst) {
2126 /* Note that mop sizes < 4 cannot use add_fetch
2127 because it won't carry into the higher bits. */
2128 tcg_gen_add_i32(REG(op_dst), REG(ld_dst), op_arg);
2133 case INDEX_op_and_i32:
2134 if (op_dst != st_src) {
2137 if (op_dst == ld_dst) {
2138 tcg_gen_atomic_and_fetch_i32(REG(ld_dst), REG(ld_adr),
2139 op_arg, ctx->memidx, ld_mop);
2141 tcg_gen_atomic_fetch_and_i32(REG(ld_dst), REG(ld_adr),
2142 op_arg, ctx->memidx, ld_mop);
2143 tcg_gen_and_i32(REG(op_dst), REG(ld_dst), op_arg);
2147 case INDEX_op_or_i32:
2148 if (op_dst != st_src) {
2151 if (op_dst == ld_dst) {
2152 tcg_gen_atomic_or_fetch_i32(REG(ld_dst), REG(ld_adr),
2153 op_arg, ctx->memidx, ld_mop);
2155 tcg_gen_atomic_fetch_or_i32(REG(ld_dst), REG(ld_adr),
2156 op_arg, ctx->memidx, ld_mop);
2157 tcg_gen_or_i32(REG(op_dst), REG(ld_dst), op_arg);
2161 case INDEX_op_xor_i32:
2162 if (op_dst != st_src) {
2165 if (op_dst == ld_dst) {
2166 tcg_gen_atomic_xor_fetch_i32(REG(ld_dst), REG(ld_adr),
2167 op_arg, ctx->memidx, ld_mop);
2169 tcg_gen_atomic_fetch_xor_i32(REG(ld_dst), REG(ld_adr),
2170 op_arg, ctx->memidx, ld_mop);
2171 tcg_gen_xor_i32(REG(op_dst), REG(ld_dst), op_arg);
2175 case INDEX_op_setcond_i32:
2176 if (st_src == ld_dst) {
2179 tcg_gen_atomic_cmpxchg_i32(REG(ld_dst), REG(ld_adr), op_arg,
2180 REG(st_src), ctx->memidx, ld_mop);
2181 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(ld_dst), op_arg);
2183 tcg_gen_mov_i32(REG(mt_dst), cpu_sr_t);
2188 g_assert_not_reached();
2191 /* If op_src is not a valid register, then op_arg was a constant. */
2193 tcg_temp_free_i32(op_arg);
2196 /* The entire region has been translated. */
2197 ctx->envflags &= ~GUSA_MASK;
2202 qemu_log_mask(LOG_UNIMP, "Unrecognized gUSA sequence %08x-%08x\n",
2205 /* Restart with the EXCLUSIVE bit set, within a TB run via
2206 cpu_exec_step_atomic holding the exclusive lock. */
2207 tcg_gen_insn_start(pc, ctx->envflags);
2208 ctx->envflags |= GUSA_EXCLUSIVE;
2209 gen_save_cpu_state(ctx, false);
2210 gen_helper_exclusive(cpu_env);
2211 ctx->bstate = BS_EXCP;
2213 /* We're not executing an instruction, but we must report one for the
2214 purposes of accounting within the TB. We might as well report the
2215 entire region consumed via ctx->pc so that it's immediately available
2216 in the disassembly dump. */
2222 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
2224 CPUSH4State *env = cs->env_ptr;
2226 target_ulong pc_start;
2232 ctx.tbflags = (uint32_t)tb->flags;
2233 ctx.envflags = tb->flags & TB_FLAG_ENVFLAGS_MASK;
2234 ctx.bstate = BS_NONE;
2235 ctx.memidx = (ctx.tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
2236 /* We don't know if the delayed pc came from a dynamic or static branch,
2237 so assume it is a dynamic branch. */
2238 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
2240 ctx.singlestep_enabled = cs->singlestep_enabled;
2241 ctx.features = env->features;
2242 ctx.has_movcal = (ctx.tbflags & TB_FLAG_PENDING_MOVCA);
2243 ctx.gbank = ((ctx.tbflags & (1 << SR_MD)) &&
2244 (ctx.tbflags & (1 << SR_RB))) * 0x10;
2245 ctx.fbank = ctx.tbflags & FPSCR_FR ? 0x10 : 0;
2247 max_insns = tb_cflags(tb) & CF_COUNT_MASK;
2248 if (max_insns == 0) {
2249 max_insns = CF_COUNT_MASK;
2251 max_insns = MIN(max_insns, TCG_MAX_INSNS);
2253 /* Since the ISA is fixed-width, we can bound by the number
2254 of instructions remaining on the page. */
2255 num_insns = -(ctx.pc | TARGET_PAGE_MASK) / 2;
2256 max_insns = MIN(max_insns, num_insns);
2258 /* Single stepping means just that. */
2259 if (ctx.singlestep_enabled || singlestep) {
2266 #ifdef CONFIG_USER_ONLY
2267 if (ctx.tbflags & GUSA_MASK) {
2268 num_insns = decode_gusa(&ctx, env, &max_insns);
2272 while (ctx.bstate == BS_NONE
2273 && num_insns < max_insns
2274 && !tcg_op_buf_full()) {
2275 tcg_gen_insn_start(ctx.pc, ctx.envflags);
2278 if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
2279 /* We have hit a breakpoint - make sure PC is up-to-date */
2280 gen_save_cpu_state(&ctx, true);
2281 gen_helper_debug(cpu_env);
2282 ctx.bstate = BS_EXCP;
2283 /* The address covered by the breakpoint must be included in
2284 [tb->pc, tb->pc + tb->size) in order to for it to be
2285 properly cleared -- thus we increment the PC here so that
2286 the logic setting tb->size below does the right thing. */
2291 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
2295 ctx.opcode = cpu_lduw_code(env, ctx.pc);
2299 if (tb_cflags(tb) & CF_LAST_IO) {
2303 if (ctx.tbflags & GUSA_EXCLUSIVE) {
2304 /* Ending the region of exclusivity. Clear the bits. */
2305 ctx.envflags &= ~GUSA_MASK;
2308 if (cs->singlestep_enabled) {
2309 gen_save_cpu_state(&ctx, true);
2310 gen_helper_debug(cpu_env);
2312 switch (ctx.bstate) {
2314 gen_save_cpu_state(&ctx, true);
2318 gen_save_cpu_state(&ctx, false);
2319 gen_goto_tb(&ctx, 0, ctx.pc);
2329 gen_tb_end(tb, num_insns);
2331 tb->size = ctx.pc - pc_start;
2332 tb->icount = num_insns;
2335 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
2336 && qemu_log_in_addr_range(pc_start)) {
2338 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2339 log_target_disas(cs, pc_start, ctx.pc - pc_start);
2346 void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
2350 env->flags = data[1];
2351 /* Theoretically delayed_pc should also be restored. In practice the
2352 branch instruction is re-executed after exception, so the delayed
2353 branch target will be recomputed. */