4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "trace-tcg.h"
35 typedef struct DisasContext {
36 DisasContextBase base;
38 uint32_t tbflags; /* should stay unmodified during the TB translation */
39 uint32_t envflags; /* should stay in sync with env->flags using TCG ops */
51 #if defined(CONFIG_USER_ONLY)
52 #define IS_USER(ctx) 1
54 #define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
57 /* Target-specific values for ctx->base.is_jmp. */
58 /* We want to exit back to the cpu loop for some reason.
59 Usually this is to recognize interrupts immediately. */
60 #define DISAS_STOP DISAS_TARGET_0
62 /* global register indexes */
63 static TCGv cpu_gregs[32];
64 static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
65 static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
66 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
67 static TCGv cpu_pr, cpu_fpscr, cpu_fpul;
68 static TCGv cpu_lock_addr, cpu_lock_value;
69 static TCGv cpu_fregs[32];
71 /* internal register indexes */
72 static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
74 #include "exec/gen-icount.h"
76 void sh4_translate_init(void)
79 static const char * const gregnames[24] = {
80 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
81 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
82 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
83 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
84 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
86 static const char * const fregnames[32] = {
87 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
88 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
89 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
90 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
91 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
92 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
93 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
94 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
97 for (i = 0; i < 24; i++) {
98 cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
99 offsetof(CPUSH4State, gregs[i]),
102 memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv));
104 cpu_pc = tcg_global_mem_new_i32(cpu_env,
105 offsetof(CPUSH4State, pc), "PC");
106 cpu_sr = tcg_global_mem_new_i32(cpu_env,
107 offsetof(CPUSH4State, sr), "SR");
108 cpu_sr_m = tcg_global_mem_new_i32(cpu_env,
109 offsetof(CPUSH4State, sr_m), "SR_M");
110 cpu_sr_q = tcg_global_mem_new_i32(cpu_env,
111 offsetof(CPUSH4State, sr_q), "SR_Q");
112 cpu_sr_t = tcg_global_mem_new_i32(cpu_env,
113 offsetof(CPUSH4State, sr_t), "SR_T");
114 cpu_ssr = tcg_global_mem_new_i32(cpu_env,
115 offsetof(CPUSH4State, ssr), "SSR");
116 cpu_spc = tcg_global_mem_new_i32(cpu_env,
117 offsetof(CPUSH4State, spc), "SPC");
118 cpu_gbr = tcg_global_mem_new_i32(cpu_env,
119 offsetof(CPUSH4State, gbr), "GBR");
120 cpu_vbr = tcg_global_mem_new_i32(cpu_env,
121 offsetof(CPUSH4State, vbr), "VBR");
122 cpu_sgr = tcg_global_mem_new_i32(cpu_env,
123 offsetof(CPUSH4State, sgr), "SGR");
124 cpu_dbr = tcg_global_mem_new_i32(cpu_env,
125 offsetof(CPUSH4State, dbr), "DBR");
126 cpu_mach = tcg_global_mem_new_i32(cpu_env,
127 offsetof(CPUSH4State, mach), "MACH");
128 cpu_macl = tcg_global_mem_new_i32(cpu_env,
129 offsetof(CPUSH4State, macl), "MACL");
130 cpu_pr = tcg_global_mem_new_i32(cpu_env,
131 offsetof(CPUSH4State, pr), "PR");
132 cpu_fpscr = tcg_global_mem_new_i32(cpu_env,
133 offsetof(CPUSH4State, fpscr), "FPSCR");
134 cpu_fpul = tcg_global_mem_new_i32(cpu_env,
135 offsetof(CPUSH4State, fpul), "FPUL");
137 cpu_flags = tcg_global_mem_new_i32(cpu_env,
138 offsetof(CPUSH4State, flags), "_flags_");
139 cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env,
140 offsetof(CPUSH4State, delayed_pc),
142 cpu_delayed_cond = tcg_global_mem_new_i32(cpu_env,
143 offsetof(CPUSH4State,
146 cpu_lock_addr = tcg_global_mem_new_i32(cpu_env,
147 offsetof(CPUSH4State, lock_addr),
149 cpu_lock_value = tcg_global_mem_new_i32(cpu_env,
150 offsetof(CPUSH4State, lock_value),
153 for (i = 0; i < 32; i++)
154 cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
155 offsetof(CPUSH4State, fregs[i]),
159 void superh_cpu_dump_state(CPUState *cs, FILE *f,
160 fprintf_function cpu_fprintf, int flags)
162 SuperHCPU *cpu = SUPERH_CPU(cs);
163 CPUSH4State *env = &cpu->env;
165 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
166 env->pc, cpu_read_sr(env), env->pr, env->fpscr);
167 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
168 env->spc, env->ssr, env->gbr, env->vbr);
169 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
170 env->sgr, env->dbr, env->delayed_pc, env->fpul);
171 for (i = 0; i < 24; i += 4) {
172 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
173 i, env->gregs[i], i + 1, env->gregs[i + 1],
174 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
176 if (env->flags & DELAY_SLOT) {
177 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
179 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
180 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
182 } else if (env->flags & DELAY_SLOT_RTE) {
183 cpu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
188 static void gen_read_sr(TCGv dst)
190 TCGv t0 = tcg_temp_new();
191 tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
192 tcg_gen_or_i32(dst, dst, t0);
193 tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
194 tcg_gen_or_i32(dst, dst, t0);
195 tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
196 tcg_gen_or_i32(dst, cpu_sr, t0);
197 tcg_temp_free_i32(t0);
200 static void gen_write_sr(TCGv src)
202 tcg_gen_andi_i32(cpu_sr, src,
203 ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
204 tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1);
205 tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1);
206 tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1);
209 static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
212 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
214 if (ctx->delayed_pc != (uint32_t) -1) {
215 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
217 if ((ctx->tbflags & TB_FLAG_ENVFLAGS_MASK) != ctx->envflags) {
218 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
222 static inline bool use_exit_tb(DisasContext *ctx)
224 return (ctx->tbflags & GUSA_EXCLUSIVE) != 0;
227 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
229 /* Use a direct jump if in same page and singlestep not enabled */
230 if (unlikely(ctx->base.singlestep_enabled || use_exit_tb(ctx))) {
233 #ifndef CONFIG_USER_ONLY
234 return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
240 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
242 if (use_goto_tb(ctx, dest)) {
244 tcg_gen_movi_i32(cpu_pc, dest);
245 tcg_gen_exit_tb((uintptr_t)ctx->base.tb + n);
247 tcg_gen_movi_i32(cpu_pc, dest);
248 if (ctx->base.singlestep_enabled) {
249 gen_helper_debug(cpu_env);
250 } else if (use_exit_tb(ctx)) {
253 tcg_gen_lookup_and_goto_ptr();
256 ctx->base.is_jmp = DISAS_NORETURN;
259 static void gen_jump(DisasContext * ctx)
261 if (ctx->delayed_pc == -1) {
262 /* Target is not statically known, it comes necessarily from a
263 delayed jump as immediate jump are conditinal jumps */
264 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
265 tcg_gen_discard_i32(cpu_delayed_pc);
266 if (ctx->base.singlestep_enabled) {
267 gen_helper_debug(cpu_env);
268 } else if (use_exit_tb(ctx)) {
271 tcg_gen_lookup_and_goto_ptr();
273 ctx->base.is_jmp = DISAS_NORETURN;
275 gen_goto_tb(ctx, 0, ctx->delayed_pc);
279 /* Immediate conditional jump (bt or bf) */
280 static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
283 TCGLabel *l1 = gen_new_label();
284 TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE;
286 if (ctx->tbflags & GUSA_EXCLUSIVE) {
287 /* When in an exclusive region, we must continue to the end.
288 Therefore, exit the region on a taken branch, but otherwise
289 fall through to the next instruction. */
290 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
291 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
292 /* Note that this won't actually use a goto_tb opcode because we
293 disallow it in use_goto_tb, but it handles exit + singlestep. */
294 gen_goto_tb(ctx, 0, dest);
299 gen_save_cpu_state(ctx, false);
300 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
301 gen_goto_tb(ctx, 0, dest);
303 gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
304 ctx->base.is_jmp = DISAS_NORETURN;
307 /* Delayed conditional jump (bt or bf) */
308 static void gen_delayed_conditional_jump(DisasContext * ctx)
310 TCGLabel *l1 = gen_new_label();
311 TCGv ds = tcg_temp_new();
313 tcg_gen_mov_i32(ds, cpu_delayed_cond);
314 tcg_gen_discard_i32(cpu_delayed_cond);
316 if (ctx->tbflags & GUSA_EXCLUSIVE) {
317 /* When in an exclusive region, we must continue to the end.
318 Therefore, exit the region on a taken branch, but otherwise
319 fall through to the next instruction. */
320 tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1);
322 /* Leave the gUSA region. */
323 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
327 ctx->base.is_jmp = DISAS_NEXT;
331 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
332 gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
337 static inline void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
339 /* We have already signaled illegal instruction for odd Dr. */
340 tcg_debug_assert((reg & 1) == 0);
342 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
345 static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
347 /* We have already signaled illegal instruction for odd Dr. */
348 tcg_debug_assert((reg & 1) == 0);
350 tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t);
353 #define B3_0 (ctx->opcode & 0xf)
354 #define B6_4 ((ctx->opcode >> 4) & 0x7)
355 #define B7_4 ((ctx->opcode >> 4) & 0xf)
356 #define B7_0 (ctx->opcode & 0xff)
357 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
358 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
359 (ctx->opcode & 0xfff))
360 #define B11_8 ((ctx->opcode >> 8) & 0xf)
361 #define B15_12 ((ctx->opcode >> 12) & 0xf)
363 #define REG(x) cpu_gregs[(x) ^ ctx->gbank]
364 #define ALTREG(x) cpu_gregs[(x) ^ ctx->gbank ^ 0x10]
365 #define FREG(x) cpu_fregs[(x) ^ ctx->fbank]
367 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
369 #define CHECK_NOT_DELAY_SLOT \
370 if (ctx->envflags & DELAY_SLOT_MASK) { \
371 goto do_illegal_slot; \
374 #define CHECK_PRIVILEGED \
375 if (IS_USER(ctx)) { \
379 #define CHECK_FPU_ENABLED \
380 if (ctx->tbflags & (1u << SR_FD)) { \
381 goto do_fpu_disabled; \
384 #define CHECK_FPSCR_PR_0 \
385 if (ctx->tbflags & FPSCR_PR) { \
389 #define CHECK_FPSCR_PR_1 \
390 if (!(ctx->tbflags & FPSCR_PR)) { \
395 if (!(ctx->features & SH_FEATURE_SH4A)) { \
399 static void _decode_opc(DisasContext * ctx)
401 /* This code tries to make movcal emulation sufficiently
402 accurate for Linux purposes. This instruction writes
403 memory, and prior to that, always allocates a cache line.
404 It is used in two contexts:
405 - in memcpy, where data is copied in blocks, the first write
406 of to a block uses movca.l for performance.
407 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
408 to flush the cache. Here, the data written by movcal.l is never
409 written to memory, and the data written is just bogus.
411 To simulate this, we simulate movcal.l, we store the value to memory,
412 but we also remember the previous content. If we see ocbi, we check
413 if movcal.l for that address was done previously. If so, the write should
414 not have hit the memory, so we restore the previous content.
415 When we see an instruction that is neither movca.l
416 nor ocbi, the previous content is discarded.
418 To optimize, we only try to flush stores when we're at the start of
419 TB, or if we already saw movca.l in this TB and did not flush stores
423 int opcode = ctx->opcode & 0xf0ff;
424 if (opcode != 0x0093 /* ocbi */
425 && opcode != 0x00c3 /* movca.l */)
427 gen_helper_discard_movcal_backup(cpu_env);
433 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
436 switch (ctx->opcode) {
437 case 0x0019: /* div0u */
438 tcg_gen_movi_i32(cpu_sr_m, 0);
439 tcg_gen_movi_i32(cpu_sr_q, 0);
440 tcg_gen_movi_i32(cpu_sr_t, 0);
442 case 0x000b: /* rts */
444 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
445 ctx->envflags |= DELAY_SLOT;
446 ctx->delayed_pc = (uint32_t) - 1;
448 case 0x0028: /* clrmac */
449 tcg_gen_movi_i32(cpu_mach, 0);
450 tcg_gen_movi_i32(cpu_macl, 0);
452 case 0x0048: /* clrs */
453 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
455 case 0x0008: /* clrt */
456 tcg_gen_movi_i32(cpu_sr_t, 0);
458 case 0x0038: /* ldtlb */
460 gen_helper_ldtlb(cpu_env);
462 case 0x002b: /* rte */
465 gen_write_sr(cpu_ssr);
466 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
467 ctx->envflags |= DELAY_SLOT_RTE;
468 ctx->delayed_pc = (uint32_t) - 1;
469 ctx->base.is_jmp = DISAS_STOP;
471 case 0x0058: /* sets */
472 tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
474 case 0x0018: /* sett */
475 tcg_gen_movi_i32(cpu_sr_t, 1);
477 case 0xfbfd: /* frchg */
479 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
480 ctx->base.is_jmp = DISAS_STOP;
482 case 0xf3fd: /* fschg */
484 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
485 ctx->base.is_jmp = DISAS_STOP;
487 case 0xf7fd: /* fpchg */
489 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_PR);
490 ctx->base.is_jmp = DISAS_STOP;
492 case 0x0009: /* nop */
494 case 0x001b: /* sleep */
496 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next + 2);
497 gen_helper_sleep(cpu_env);
501 switch (ctx->opcode & 0xf000) {
502 case 0x1000: /* mov.l Rm,@(disp,Rn) */
504 TCGv addr = tcg_temp_new();
505 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
506 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
510 case 0x5000: /* mov.l @(disp,Rm),Rn */
512 TCGv addr = tcg_temp_new();
513 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
514 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
518 case 0xe000: /* mov #imm,Rn */
519 #ifdef CONFIG_USER_ONLY
520 /* Detect the start of a gUSA region. If so, update envflags
521 and end the TB. This will allow us to see the end of the
522 region (stored in R0) in the next TB. */
523 if (B11_8 == 15 && B7_0s < 0 &&
524 (tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
525 ctx->envflags = deposit32(ctx->envflags, GUSA_SHIFT, 8, B7_0s);
526 ctx->base.is_jmp = DISAS_STOP;
529 tcg_gen_movi_i32(REG(B11_8), B7_0s);
531 case 0x9000: /* mov.w @(disp,PC),Rn */
533 TCGv addr = tcg_const_i32(ctx->base.pc_next + 4 + B7_0 * 2);
534 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
538 case 0xd000: /* mov.l @(disp,PC),Rn */
540 TCGv addr = tcg_const_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3);
541 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
545 case 0x7000: /* add #imm,Rn */
546 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
548 case 0xa000: /* bra disp */
550 ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
551 ctx->envflags |= DELAY_SLOT;
553 case 0xb000: /* bsr disp */
555 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
556 ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
557 ctx->envflags |= DELAY_SLOT;
561 switch (ctx->opcode & 0xf00f) {
562 case 0x6003: /* mov Rm,Rn */
563 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
565 case 0x2000: /* mov.b Rm,@Rn */
566 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
568 case 0x2001: /* mov.w Rm,@Rn */
569 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
571 case 0x2002: /* mov.l Rm,@Rn */
572 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
574 case 0x6000: /* mov.b @Rm,Rn */
575 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
577 case 0x6001: /* mov.w @Rm,Rn */
578 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
580 case 0x6002: /* mov.l @Rm,Rn */
581 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
583 case 0x2004: /* mov.b Rm,@-Rn */
585 TCGv addr = tcg_temp_new();
586 tcg_gen_subi_i32(addr, REG(B11_8), 1);
587 /* might cause re-execution */
588 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
589 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
593 case 0x2005: /* mov.w Rm,@-Rn */
595 TCGv addr = tcg_temp_new();
596 tcg_gen_subi_i32(addr, REG(B11_8), 2);
597 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
598 tcg_gen_mov_i32(REG(B11_8), addr);
602 case 0x2006: /* mov.l Rm,@-Rn */
604 TCGv addr = tcg_temp_new();
605 tcg_gen_subi_i32(addr, REG(B11_8), 4);
606 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
607 tcg_gen_mov_i32(REG(B11_8), addr);
611 case 0x6004: /* mov.b @Rm+,Rn */
612 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
614 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
616 case 0x6005: /* mov.w @Rm+,Rn */
617 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
619 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
621 case 0x6006: /* mov.l @Rm+,Rn */
622 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
624 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
626 case 0x0004: /* mov.b Rm,@(R0,Rn) */
628 TCGv addr = tcg_temp_new();
629 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
630 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
634 case 0x0005: /* mov.w Rm,@(R0,Rn) */
636 TCGv addr = tcg_temp_new();
637 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
638 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
642 case 0x0006: /* mov.l Rm,@(R0,Rn) */
644 TCGv addr = tcg_temp_new();
645 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
646 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
650 case 0x000c: /* mov.b @(R0,Rm),Rn */
652 TCGv addr = tcg_temp_new();
653 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
654 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
658 case 0x000d: /* mov.w @(R0,Rm),Rn */
660 TCGv addr = tcg_temp_new();
661 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
662 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
666 case 0x000e: /* mov.l @(R0,Rm),Rn */
668 TCGv addr = tcg_temp_new();
669 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
670 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
674 case 0x6008: /* swap.b Rm,Rn */
676 TCGv low = tcg_temp_new();
677 tcg_gen_ext16u_i32(low, REG(B7_4));
678 tcg_gen_bswap16_i32(low, low);
679 tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
683 case 0x6009: /* swap.w Rm,Rn */
684 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
686 case 0x200d: /* xtrct Rm,Rn */
689 high = tcg_temp_new();
690 tcg_gen_shli_i32(high, REG(B7_4), 16);
691 low = tcg_temp_new();
692 tcg_gen_shri_i32(low, REG(B11_8), 16);
693 tcg_gen_or_i32(REG(B11_8), high, low);
698 case 0x300c: /* add Rm,Rn */
699 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
701 case 0x300e: /* addc Rm,Rn */
704 t0 = tcg_const_tl(0);
706 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
707 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
708 REG(B11_8), t0, t1, cpu_sr_t);
713 case 0x300f: /* addv Rm,Rn */
717 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
719 tcg_gen_xor_i32(t1, t0, REG(B11_8));
721 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
722 tcg_gen_andc_i32(cpu_sr_t, t1, t2);
724 tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
726 tcg_gen_mov_i32(REG(B7_4), t0);
730 case 0x2009: /* and Rm,Rn */
731 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
733 case 0x3000: /* cmp/eq Rm,Rn */
734 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
736 case 0x3003: /* cmp/ge Rm,Rn */
737 tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
739 case 0x3007: /* cmp/gt Rm,Rn */
740 tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
742 case 0x3006: /* cmp/hi Rm,Rn */
743 tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
745 case 0x3002: /* cmp/hs Rm,Rn */
746 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
748 case 0x200c: /* cmp/str Rm,Rn */
750 TCGv cmp1 = tcg_temp_new();
751 TCGv cmp2 = tcg_temp_new();
752 tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
753 tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
754 tcg_gen_andc_i32(cmp1, cmp1, cmp2);
755 tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
756 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
761 case 0x2007: /* div0s Rm,Rn */
762 tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */
763 tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */
764 tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */
766 case 0x3004: /* div1 Rm,Rn */
768 TCGv t0 = tcg_temp_new();
769 TCGv t1 = tcg_temp_new();
770 TCGv t2 = tcg_temp_new();
771 TCGv zero = tcg_const_i32(0);
773 /* shift left arg1, saving the bit being pushed out and inserting
775 tcg_gen_shri_i32(t0, REG(B11_8), 31);
776 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
777 tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
779 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
780 using 64-bit temps, we compute arg0's high part from q ^ m, so
781 that it is 0x00000000 when adding the value or 0xffffffff when
783 tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
784 tcg_gen_subi_i32(t1, t1, 1);
785 tcg_gen_neg_i32(t2, REG(B7_4));
786 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
787 tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
789 /* compute T and Q depending on carry */
790 tcg_gen_andi_i32(t1, t1, 1);
791 tcg_gen_xor_i32(t1, t1, t0);
792 tcg_gen_xori_i32(cpu_sr_t, t1, 1);
793 tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
801 case 0x300d: /* dmuls.l Rm,Rn */
802 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
804 case 0x3005: /* dmulu.l Rm,Rn */
805 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
807 case 0x600e: /* exts.b Rm,Rn */
808 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
810 case 0x600f: /* exts.w Rm,Rn */
811 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
813 case 0x600c: /* extu.b Rm,Rn */
814 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
816 case 0x600d: /* extu.w Rm,Rn */
817 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
819 case 0x000f: /* mac.l @Rm+,@Rn+ */
822 arg0 = tcg_temp_new();
823 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
824 arg1 = tcg_temp_new();
825 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
826 gen_helper_macl(cpu_env, arg0, arg1);
829 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
830 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
833 case 0x400f: /* mac.w @Rm+,@Rn+ */
836 arg0 = tcg_temp_new();
837 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
838 arg1 = tcg_temp_new();
839 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
840 gen_helper_macw(cpu_env, arg0, arg1);
843 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
844 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
847 case 0x0007: /* mul.l Rm,Rn */
848 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
850 case 0x200f: /* muls.w Rm,Rn */
853 arg0 = tcg_temp_new();
854 tcg_gen_ext16s_i32(arg0, REG(B7_4));
855 arg1 = tcg_temp_new();
856 tcg_gen_ext16s_i32(arg1, REG(B11_8));
857 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
862 case 0x200e: /* mulu.w Rm,Rn */
865 arg0 = tcg_temp_new();
866 tcg_gen_ext16u_i32(arg0, REG(B7_4));
867 arg1 = tcg_temp_new();
868 tcg_gen_ext16u_i32(arg1, REG(B11_8));
869 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
874 case 0x600b: /* neg Rm,Rn */
875 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
877 case 0x600a: /* negc Rm,Rn */
879 TCGv t0 = tcg_const_i32(0);
880 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
881 REG(B7_4), t0, cpu_sr_t, t0);
882 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
883 t0, t0, REG(B11_8), cpu_sr_t);
884 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
888 case 0x6007: /* not Rm,Rn */
889 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
891 case 0x200b: /* or Rm,Rn */
892 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
894 case 0x400c: /* shad Rm,Rn */
896 TCGv t0 = tcg_temp_new();
897 TCGv t1 = tcg_temp_new();
898 TCGv t2 = tcg_temp_new();
900 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
902 /* positive case: shift to the left */
903 tcg_gen_shl_i32(t1, REG(B11_8), t0);
905 /* negative case: shift to the right in two steps to
906 correctly handle the -32 case */
907 tcg_gen_xori_i32(t0, t0, 0x1f);
908 tcg_gen_sar_i32(t2, REG(B11_8), t0);
909 tcg_gen_sari_i32(t2, t2, 1);
911 /* select between the two cases */
912 tcg_gen_movi_i32(t0, 0);
913 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
920 case 0x400d: /* shld Rm,Rn */
922 TCGv t0 = tcg_temp_new();
923 TCGv t1 = tcg_temp_new();
924 TCGv t2 = tcg_temp_new();
926 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
928 /* positive case: shift to the left */
929 tcg_gen_shl_i32(t1, REG(B11_8), t0);
931 /* negative case: shift to the right in two steps to
932 correctly handle the -32 case */
933 tcg_gen_xori_i32(t0, t0, 0x1f);
934 tcg_gen_shr_i32(t2, REG(B11_8), t0);
935 tcg_gen_shri_i32(t2, t2, 1);
937 /* select between the two cases */
938 tcg_gen_movi_i32(t0, 0);
939 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
946 case 0x3008: /* sub Rm,Rn */
947 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
949 case 0x300a: /* subc Rm,Rn */
952 t0 = tcg_const_tl(0);
954 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
955 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
956 REG(B11_8), t0, t1, cpu_sr_t);
957 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
962 case 0x300b: /* subv Rm,Rn */
966 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
968 tcg_gen_xor_i32(t1, t0, REG(B7_4));
970 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
971 tcg_gen_and_i32(t1, t1, t2);
973 tcg_gen_shri_i32(cpu_sr_t, t1, 31);
975 tcg_gen_mov_i32(REG(B11_8), t0);
979 case 0x2008: /* tst Rm,Rn */
981 TCGv val = tcg_temp_new();
982 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
983 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
987 case 0x200a: /* xor Rm,Rn */
988 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
990 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
992 if (ctx->tbflags & FPSCR_SZ) {
993 int xsrc = XHACK(B7_4);
994 int xdst = XHACK(B11_8);
995 tcg_gen_mov_i32(FREG(xdst), FREG(xsrc));
996 tcg_gen_mov_i32(FREG(xdst + 1), FREG(xsrc + 1));
998 tcg_gen_mov_i32(FREG(B11_8), FREG(B7_4));
1001 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
1003 if (ctx->tbflags & FPSCR_SZ) {
1004 TCGv_i64 fp = tcg_temp_new_i64();
1005 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1006 tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, MO_TEQ);
1007 tcg_temp_free_i64(fp);
1009 tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
1012 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1014 if (ctx->tbflags & FPSCR_SZ) {
1015 TCGv_i64 fp = tcg_temp_new_i64();
1016 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
1017 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1018 tcg_temp_free_i64(fp);
1020 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
1023 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1025 if (ctx->tbflags & FPSCR_SZ) {
1026 TCGv_i64 fp = tcg_temp_new_i64();
1027 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
1028 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1029 tcg_temp_free_i64(fp);
1030 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1032 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
1033 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1036 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1039 TCGv addr = tcg_temp_new_i32();
1040 if (ctx->tbflags & FPSCR_SZ) {
1041 TCGv_i64 fp = tcg_temp_new_i64();
1042 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1043 tcg_gen_subi_i32(addr, REG(B11_8), 8);
1044 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
1045 tcg_temp_free_i64(fp);
1047 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1048 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
1050 tcg_gen_mov_i32(REG(B11_8), addr);
1051 tcg_temp_free(addr);
1054 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1057 TCGv addr = tcg_temp_new_i32();
1058 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1059 if (ctx->tbflags & FPSCR_SZ) {
1060 TCGv_i64 fp = tcg_temp_new_i64();
1061 tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, MO_TEQ);
1062 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1063 tcg_temp_free_i64(fp);
1065 tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx, MO_TEUL);
1067 tcg_temp_free(addr);
1070 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1073 TCGv addr = tcg_temp_new();
1074 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1075 if (ctx->tbflags & FPSCR_SZ) {
1076 TCGv_i64 fp = tcg_temp_new_i64();
1077 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1078 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
1079 tcg_temp_free_i64(fp);
1081 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
1083 tcg_temp_free(addr);
1086 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1087 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1088 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1089 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1090 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1091 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1094 if (ctx->tbflags & FPSCR_PR) {
1097 if (ctx->opcode & 0x0110) {
1100 fp0 = tcg_temp_new_i64();
1101 fp1 = tcg_temp_new_i64();
1102 gen_load_fpr64(ctx, fp0, B11_8);
1103 gen_load_fpr64(ctx, fp1, B7_4);
1104 switch (ctx->opcode & 0xf00f) {
1105 case 0xf000: /* fadd Rm,Rn */
1106 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1108 case 0xf001: /* fsub Rm,Rn */
1109 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1111 case 0xf002: /* fmul Rm,Rn */
1112 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1114 case 0xf003: /* fdiv Rm,Rn */
1115 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1117 case 0xf004: /* fcmp/eq Rm,Rn */
1118 gen_helper_fcmp_eq_DT(cpu_sr_t, cpu_env, fp0, fp1);
1120 case 0xf005: /* fcmp/gt Rm,Rn */
1121 gen_helper_fcmp_gt_DT(cpu_sr_t, cpu_env, fp0, fp1);
1124 gen_store_fpr64(ctx, fp0, B11_8);
1125 tcg_temp_free_i64(fp0);
1126 tcg_temp_free_i64(fp1);
1128 switch (ctx->opcode & 0xf00f) {
1129 case 0xf000: /* fadd Rm,Rn */
1130 gen_helper_fadd_FT(FREG(B11_8), cpu_env,
1131 FREG(B11_8), FREG(B7_4));
1133 case 0xf001: /* fsub Rm,Rn */
1134 gen_helper_fsub_FT(FREG(B11_8), cpu_env,
1135 FREG(B11_8), FREG(B7_4));
1137 case 0xf002: /* fmul Rm,Rn */
1138 gen_helper_fmul_FT(FREG(B11_8), cpu_env,
1139 FREG(B11_8), FREG(B7_4));
1141 case 0xf003: /* fdiv Rm,Rn */
1142 gen_helper_fdiv_FT(FREG(B11_8), cpu_env,
1143 FREG(B11_8), FREG(B7_4));
1145 case 0xf004: /* fcmp/eq Rm,Rn */
1146 gen_helper_fcmp_eq_FT(cpu_sr_t, cpu_env,
1147 FREG(B11_8), FREG(B7_4));
1149 case 0xf005: /* fcmp/gt Rm,Rn */
1150 gen_helper_fcmp_gt_FT(cpu_sr_t, cpu_env,
1151 FREG(B11_8), FREG(B7_4));
1157 case 0xf00e: /* fmac FR0,RM,Rn */
1160 gen_helper_fmac_FT(FREG(B11_8), cpu_env,
1161 FREG(0), FREG(B7_4), FREG(B11_8));
1165 switch (ctx->opcode & 0xff00) {
1166 case 0xc900: /* and #imm,R0 */
1167 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1169 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1172 addr = tcg_temp_new();
1173 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1174 val = tcg_temp_new();
1175 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1176 tcg_gen_andi_i32(val, val, B7_0);
1177 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1179 tcg_temp_free(addr);
1182 case 0x8b00: /* bf label */
1183 CHECK_NOT_DELAY_SLOT
1184 gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, false);
1186 case 0x8f00: /* bf/s label */
1187 CHECK_NOT_DELAY_SLOT
1188 tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
1189 ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
1190 ctx->envflags |= DELAY_SLOT_CONDITIONAL;
1192 case 0x8900: /* bt label */
1193 CHECK_NOT_DELAY_SLOT
1194 gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, true);
1196 case 0x8d00: /* bt/s label */
1197 CHECK_NOT_DELAY_SLOT
1198 tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
1199 ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
1200 ctx->envflags |= DELAY_SLOT_CONDITIONAL;
1202 case 0x8800: /* cmp/eq #imm,R0 */
1203 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
1205 case 0xc400: /* mov.b @(disp,GBR),R0 */
1207 TCGv addr = tcg_temp_new();
1208 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1209 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1210 tcg_temp_free(addr);
1213 case 0xc500: /* mov.w @(disp,GBR),R0 */
1215 TCGv addr = tcg_temp_new();
1216 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1217 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1218 tcg_temp_free(addr);
1221 case 0xc600: /* mov.l @(disp,GBR),R0 */
1223 TCGv addr = tcg_temp_new();
1224 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1225 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
1226 tcg_temp_free(addr);
1229 case 0xc000: /* mov.b R0,@(disp,GBR) */
1231 TCGv addr = tcg_temp_new();
1232 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1233 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1234 tcg_temp_free(addr);
1237 case 0xc100: /* mov.w R0,@(disp,GBR) */
1239 TCGv addr = tcg_temp_new();
1240 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1241 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1242 tcg_temp_free(addr);
1245 case 0xc200: /* mov.l R0,@(disp,GBR) */
1247 TCGv addr = tcg_temp_new();
1248 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1249 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
1250 tcg_temp_free(addr);
1253 case 0x8000: /* mov.b R0,@(disp,Rn) */
1255 TCGv addr = tcg_temp_new();
1256 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1257 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1258 tcg_temp_free(addr);
1261 case 0x8100: /* mov.w R0,@(disp,Rn) */
1263 TCGv addr = tcg_temp_new();
1264 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1265 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1266 tcg_temp_free(addr);
1269 case 0x8400: /* mov.b @(disp,Rn),R0 */
1271 TCGv addr = tcg_temp_new();
1272 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1273 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1274 tcg_temp_free(addr);
1277 case 0x8500: /* mov.w @(disp,Rn),R0 */
1279 TCGv addr = tcg_temp_new();
1280 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1281 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1282 tcg_temp_free(addr);
1285 case 0xc700: /* mova @(disp,PC),R0 */
1286 tcg_gen_movi_i32(REG(0), ((ctx->base.pc_next & 0xfffffffc) +
1287 4 + B7_0 * 4) & ~3);
1289 case 0xcb00: /* or #imm,R0 */
1290 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1292 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1295 addr = tcg_temp_new();
1296 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1297 val = tcg_temp_new();
1298 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1299 tcg_gen_ori_i32(val, val, B7_0);
1300 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1302 tcg_temp_free(addr);
1305 case 0xc300: /* trapa #imm */
1308 CHECK_NOT_DELAY_SLOT
1309 gen_save_cpu_state(ctx, true);
1310 imm = tcg_const_i32(B7_0);
1311 gen_helper_trapa(cpu_env, imm);
1313 ctx->base.is_jmp = DISAS_NORETURN;
1316 case 0xc800: /* tst #imm,R0 */
1318 TCGv val = tcg_temp_new();
1319 tcg_gen_andi_i32(val, REG(0), B7_0);
1320 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1324 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1326 TCGv val = tcg_temp_new();
1327 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1328 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
1329 tcg_gen_andi_i32(val, val, B7_0);
1330 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1334 case 0xca00: /* xor #imm,R0 */
1335 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1337 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1340 addr = tcg_temp_new();
1341 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1342 val = tcg_temp_new();
1343 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1344 tcg_gen_xori_i32(val, val, B7_0);
1345 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1347 tcg_temp_free(addr);
1352 switch (ctx->opcode & 0xf08f) {
1353 case 0x408e: /* ldc Rm,Rn_BANK */
1355 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1357 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1359 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
1360 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1362 case 0x0082: /* stc Rm_BANK,Rn */
1364 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1366 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1369 TCGv addr = tcg_temp_new();
1370 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1371 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
1372 tcg_gen_mov_i32(REG(B11_8), addr);
1373 tcg_temp_free(addr);
1378 switch (ctx->opcode & 0xf0ff) {
1379 case 0x0023: /* braf Rn */
1380 CHECK_NOT_DELAY_SLOT
1381 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->base.pc_next + 4);
1382 ctx->envflags |= DELAY_SLOT;
1383 ctx->delayed_pc = (uint32_t) - 1;
1385 case 0x0003: /* bsrf Rn */
1386 CHECK_NOT_DELAY_SLOT
1387 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
1388 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1389 ctx->envflags |= DELAY_SLOT;
1390 ctx->delayed_pc = (uint32_t) - 1;
1392 case 0x4015: /* cmp/pl Rn */
1393 tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
1395 case 0x4011: /* cmp/pz Rn */
1396 tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
1398 case 0x4010: /* dt Rn */
1399 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1400 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
1402 case 0x402b: /* jmp @Rn */
1403 CHECK_NOT_DELAY_SLOT
1404 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1405 ctx->envflags |= DELAY_SLOT;
1406 ctx->delayed_pc = (uint32_t) - 1;
1408 case 0x400b: /* jsr @Rn */
1409 CHECK_NOT_DELAY_SLOT
1410 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
1411 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1412 ctx->envflags |= DELAY_SLOT;
1413 ctx->delayed_pc = (uint32_t) - 1;
1415 case 0x400e: /* ldc Rm,SR */
1418 TCGv val = tcg_temp_new();
1419 tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1422 ctx->base.is_jmp = DISAS_STOP;
1425 case 0x4007: /* ldc.l @Rm+,SR */
1428 TCGv val = tcg_temp_new();
1429 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
1430 tcg_gen_andi_i32(val, val, 0x700083f3);
1433 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1434 ctx->base.is_jmp = DISAS_STOP;
1437 case 0x0002: /* stc SR,Rn */
1439 gen_read_sr(REG(B11_8));
1441 case 0x4003: /* stc SR,@-Rn */
1444 TCGv addr = tcg_temp_new();
1445 TCGv val = tcg_temp_new();
1446 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1448 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1449 tcg_gen_mov_i32(REG(B11_8), addr);
1451 tcg_temp_free(addr);
1454 #define LD(reg,ldnum,ldpnum,prechk) \
1457 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1461 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1462 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1464 #define ST(reg,stnum,stpnum,prechk) \
1467 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1472 TCGv addr = tcg_temp_new(); \
1473 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1474 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1475 tcg_gen_mov_i32(REG(B11_8), addr); \
1476 tcg_temp_free(addr); \
1479 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1480 LD(reg,ldnum,ldpnum,prechk) \
1481 ST(reg,stnum,stpnum,prechk)
1482 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1483 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1484 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1485 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1486 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1487 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A)
1488 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1489 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1490 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1491 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1492 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1493 case 0x406a: /* lds Rm,FPSCR */
1495 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1496 ctx->base.is_jmp = DISAS_STOP;
1498 case 0x4066: /* lds.l @Rm+,FPSCR */
1501 TCGv addr = tcg_temp_new();
1502 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
1503 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1504 gen_helper_ld_fpscr(cpu_env, addr);
1505 tcg_temp_free(addr);
1506 ctx->base.is_jmp = DISAS_STOP;
1509 case 0x006a: /* sts FPSCR,Rn */
1511 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1513 case 0x4062: /* sts FPSCR,@-Rn */
1517 val = tcg_temp_new();
1518 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1519 addr = tcg_temp_new();
1520 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1521 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1522 tcg_gen_mov_i32(REG(B11_8), addr);
1523 tcg_temp_free(addr);
1527 case 0x00c3: /* movca.l R0,@Rm */
1529 TCGv val = tcg_temp_new();
1530 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
1531 gen_helper_movcal(cpu_env, REG(B11_8), val);
1532 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1535 ctx->has_movcal = 1;
1537 case 0x40a9: /* movua.l @Rm,R0 */
1539 /* Load non-boundary-aligned data */
1540 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1541 MO_TEUL | MO_UNALN);
1544 case 0x40e9: /* movua.l @Rm+,R0 */
1546 /* Load non-boundary-aligned data */
1547 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1548 MO_TEUL | MO_UNALN);
1549 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1552 case 0x0029: /* movt Rn */
1553 tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
1558 * If (T == 1) R0 -> (Rn)
1561 * The above description doesn't work in a parallel context.
1562 * Since we currently support no smp boards, this implies user-mode.
1563 * But we can still support the official mechanism while user-mode
1564 * is single-threaded. */
1567 TCGLabel *fail = gen_new_label();
1568 TCGLabel *done = gen_new_label();
1570 if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
1573 tcg_gen_brcond_i32(TCG_COND_NE, REG(B11_8),
1574 cpu_lock_addr, fail);
1575 tmp = tcg_temp_new();
1576 tcg_gen_atomic_cmpxchg_i32(tmp, REG(B11_8), cpu_lock_value,
1577 REG(0), ctx->memidx, MO_TEUL);
1578 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, tmp, cpu_lock_value);
1581 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_lock_addr, -1, fail);
1582 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1583 tcg_gen_movi_i32(cpu_sr_t, 1);
1587 gen_set_label(fail);
1588 tcg_gen_movi_i32(cpu_sr_t, 0);
1590 gen_set_label(done);
1591 tcg_gen_movi_i32(cpu_lock_addr, -1);
1598 * When interrupt/exception
1599 * occurred 0 -> LDST
1601 * In a parallel context, we must also save the loaded value
1602 * for use with the cmpxchg that we'll use with movco.l. */
1604 if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
1605 TCGv tmp = tcg_temp_new();
1606 tcg_gen_mov_i32(tmp, REG(B11_8));
1607 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1608 tcg_gen_mov_i32(cpu_lock_value, REG(0));
1609 tcg_gen_mov_i32(cpu_lock_addr, tmp);
1612 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1613 tcg_gen_movi_i32(cpu_lock_addr, 0);
1616 case 0x0093: /* ocbi @Rn */
1618 gen_helper_ocbi(cpu_env, REG(B11_8));
1621 case 0x00a3: /* ocbp @Rn */
1622 case 0x00b3: /* ocbwb @Rn */
1623 /* These instructions are supposed to do nothing in case of
1624 a cache miss. Given that we only partially emulate caches
1625 it is safe to simply ignore them. */
1627 case 0x0083: /* pref @Rn */
1629 case 0x00d3: /* prefi @Rn */
1632 case 0x00e3: /* icbi @Rn */
1635 case 0x00ab: /* synco */
1637 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1640 case 0x4024: /* rotcl Rn */
1642 TCGv tmp = tcg_temp_new();
1643 tcg_gen_mov_i32(tmp, cpu_sr_t);
1644 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1645 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1646 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1650 case 0x4025: /* rotcr Rn */
1652 TCGv tmp = tcg_temp_new();
1653 tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1654 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1655 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1656 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1660 case 0x4004: /* rotl Rn */
1661 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1662 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1664 case 0x4005: /* rotr Rn */
1665 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1666 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1668 case 0x4000: /* shll Rn */
1669 case 0x4020: /* shal Rn */
1670 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1671 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1673 case 0x4021: /* shar Rn */
1674 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1675 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1677 case 0x4001: /* shlr Rn */
1678 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1679 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1681 case 0x4008: /* shll2 Rn */
1682 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1684 case 0x4018: /* shll8 Rn */
1685 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1687 case 0x4028: /* shll16 Rn */
1688 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1690 case 0x4009: /* shlr2 Rn */
1691 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1693 case 0x4019: /* shlr8 Rn */
1694 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1696 case 0x4029: /* shlr16 Rn */
1697 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1699 case 0x401b: /* tas.b @Rn */
1701 TCGv val = tcg_const_i32(0x80);
1702 tcg_gen_atomic_fetch_or_i32(val, REG(B11_8), val,
1703 ctx->memidx, MO_UB);
1704 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1708 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1710 tcg_gen_mov_i32(FREG(B11_8), cpu_fpul);
1712 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1714 tcg_gen_mov_i32(cpu_fpul, FREG(B11_8));
1716 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1718 if (ctx->tbflags & FPSCR_PR) {
1720 if (ctx->opcode & 0x0100) {
1723 fp = tcg_temp_new_i64();
1724 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1725 gen_store_fpr64(ctx, fp, B11_8);
1726 tcg_temp_free_i64(fp);
1729 gen_helper_float_FT(FREG(B11_8), cpu_env, cpu_fpul);
1732 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1734 if (ctx->tbflags & FPSCR_PR) {
1736 if (ctx->opcode & 0x0100) {
1739 fp = tcg_temp_new_i64();
1740 gen_load_fpr64(ctx, fp, B11_8);
1741 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1742 tcg_temp_free_i64(fp);
1745 gen_helper_ftrc_FT(cpu_fpul, cpu_env, FREG(B11_8));
1748 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1750 tcg_gen_xori_i32(FREG(B11_8), FREG(B11_8), 0x80000000);
1752 case 0xf05d: /* fabs FRn/DRn - FPCSR: Nothing */
1754 tcg_gen_andi_i32(FREG(B11_8), FREG(B11_8), 0x7fffffff);
1756 case 0xf06d: /* fsqrt FRn */
1758 if (ctx->tbflags & FPSCR_PR) {
1759 if (ctx->opcode & 0x0100) {
1762 TCGv_i64 fp = tcg_temp_new_i64();
1763 gen_load_fpr64(ctx, fp, B11_8);
1764 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1765 gen_store_fpr64(ctx, fp, B11_8);
1766 tcg_temp_free_i64(fp);
1768 gen_helper_fsqrt_FT(FREG(B11_8), cpu_env, FREG(B11_8));
1771 case 0xf07d: /* fsrra FRn */
1774 gen_helper_fsrra_FT(FREG(B11_8), cpu_env, FREG(B11_8));
1776 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1779 tcg_gen_movi_i32(FREG(B11_8), 0);
1781 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1784 tcg_gen_movi_i32(FREG(B11_8), 0x3f800000);
1786 case 0xf0ad: /* fcnvsd FPUL,DRn */
1789 TCGv_i64 fp = tcg_temp_new_i64();
1790 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1791 gen_store_fpr64(ctx, fp, B11_8);
1792 tcg_temp_free_i64(fp);
1795 case 0xf0bd: /* fcnvds DRn,FPUL */
1798 TCGv_i64 fp = tcg_temp_new_i64();
1799 gen_load_fpr64(ctx, fp, B11_8);
1800 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1801 tcg_temp_free_i64(fp);
1804 case 0xf0ed: /* fipr FVm,FVn */
1808 TCGv m = tcg_const_i32((ctx->opcode >> 8) & 3);
1809 TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
1810 gen_helper_fipr(cpu_env, m, n);
1816 case 0xf0fd: /* ftrv XMTRX,FVn */
1820 if ((ctx->opcode & 0x0300) != 0x0100) {
1823 TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
1824 gen_helper_ftrv(cpu_env, n);
1831 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1832 ctx->opcode, ctx->base.pc_next);
1836 if (ctx->envflags & DELAY_SLOT_MASK) {
1838 gen_save_cpu_state(ctx, true);
1839 gen_helper_raise_slot_illegal_instruction(cpu_env);
1841 gen_save_cpu_state(ctx, true);
1842 gen_helper_raise_illegal_instruction(cpu_env);
1844 ctx->base.is_jmp = DISAS_NORETURN;
1848 gen_save_cpu_state(ctx, true);
1849 if (ctx->envflags & DELAY_SLOT_MASK) {
1850 gen_helper_raise_slot_fpu_disable(cpu_env);
1852 gen_helper_raise_fpu_disable(cpu_env);
1854 ctx->base.is_jmp = DISAS_NORETURN;
1858 static void decode_opc(DisasContext * ctx)
1860 uint32_t old_flags = ctx->envflags;
1864 if (old_flags & DELAY_SLOT_MASK) {
1865 /* go out of the delay slot */
1866 ctx->envflags &= ~DELAY_SLOT_MASK;
1868 /* When in an exclusive region, we must continue to the end
1869 for conditional branches. */
1870 if (ctx->tbflags & GUSA_EXCLUSIVE
1871 && old_flags & DELAY_SLOT_CONDITIONAL) {
1872 gen_delayed_conditional_jump(ctx);
1875 /* Otherwise this is probably an invalid gUSA region.
1876 Drop the GUSA bits so the next TB doesn't see them. */
1877 ctx->envflags &= ~GUSA_MASK;
1879 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
1880 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1881 gen_delayed_conditional_jump(ctx);
1888 #ifdef CONFIG_USER_ONLY
1889 /* For uniprocessors, SH4 uses optimistic restartable atomic sequences.
1890 Upon an interrupt, a real kernel would simply notice magic values in
1891 the registers and reset the PC to the start of the sequence.
1893 For QEMU, we cannot do this in quite the same way. Instead, we notice
1894 the normal start of such a sequence (mov #-x,r15). While we can handle
1895 any sequence via cpu_exec_step_atomic, we can recognize the "normal"
1896 sequences and transform them into atomic operations as seen by the host.
1898 static int decode_gusa(DisasContext *ctx, CPUSH4State *env, int *pmax_insns)
1901 int ld_adr, ld_dst, ld_mop;
1902 int op_dst, op_src, op_opc;
1903 int mv_src, mt_dst, st_src, st_mop;
1906 uint32_t pc = ctx->base.pc_next;
1907 uint32_t pc_end = ctx->base.tb->cs_base;
1908 int backup = sextract32(ctx->tbflags, GUSA_SHIFT, 8);
1909 int max_insns = (pc_end - pc) / 2;
1912 if (pc != pc_end + backup || max_insns < 2) {
1913 /* This is a malformed gUSA region. Don't do anything special,
1914 since the interpreter is likely to get confused. */
1915 ctx->envflags &= ~GUSA_MASK;
1919 if (ctx->tbflags & GUSA_EXCLUSIVE) {
1920 /* Regardless of single-stepping or the end of the page,
1921 we must complete execution of the gUSA region while
1922 holding the exclusive lock. */
1923 *pmax_insns = max_insns;
1927 /* The state machine below will consume only a few insns.
1928 If there are more than that in a region, fail now. */
1929 if (max_insns > ARRAY_SIZE(insns)) {
1933 /* Read all of the insns for the region. */
1934 for (i = 0; i < max_insns; ++i) {
1935 insns[i] = cpu_lduw_code(env, pc + i * 2);
1938 ld_adr = ld_dst = ld_mop = -1;
1940 op_dst = op_src = op_opc = -1;
1942 st_src = st_mop = -1;
1943 TCGV_UNUSED(op_arg);
1947 do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0)
1950 * Expect a load to begin the region.
1953 switch (ctx->opcode & 0xf00f) {
1954 case 0x6000: /* mov.b @Rm,Rn */
1957 case 0x6001: /* mov.w @Rm,Rn */
1960 case 0x6002: /* mov.l @Rm,Rn */
1968 if (ld_adr == ld_dst) {
1971 /* Unless we see a mov, any two-operand operation must use ld_dst. */
1975 * Expect an optional register move.
1978 switch (ctx->opcode & 0xf00f) {
1979 case 0x6003: /* mov Rm,Rn */
1980 /* Here we want to recognize ld_dst being saved for later consumtion,
1981 or for another input register being copied so that ld_dst need not
1982 be clobbered during the operation. */
1985 if (op_dst == ld_dst) {
1986 /* Overwriting the load output. */
1989 if (mv_src != ld_dst) {
1990 /* Copying a new input; constrain op_src to match the load. */
1996 /* Put back and re-examine as operation. */
2001 * Expect the operation.
2004 switch (ctx->opcode & 0xf00f) {
2005 case 0x300c: /* add Rm,Rn */
2006 op_opc = INDEX_op_add_i32;
2008 case 0x2009: /* and Rm,Rn */
2009 op_opc = INDEX_op_and_i32;
2011 case 0x200a: /* xor Rm,Rn */
2012 op_opc = INDEX_op_xor_i32;
2014 case 0x200b: /* or Rm,Rn */
2015 op_opc = INDEX_op_or_i32;
2017 /* The operation register should be as expected, and the
2018 other input cannot depend on the load. */
2019 if (op_dst != B11_8) {
2023 /* Unconstrainted input. */
2025 } else if (op_src == B7_4) {
2026 /* Constrained input matched load. All operations are
2027 commutative; "swap" them by "moving" the load output
2028 to the (implicit) first argument and the move source
2029 to the (explicit) second argument. */
2034 op_arg = REG(op_src);
2037 case 0x6007: /* not Rm,Rn */
2038 if (ld_dst != B7_4 || mv_src >= 0) {
2042 op_opc = INDEX_op_xor_i32;
2043 op_arg = tcg_const_i32(-1);
2046 case 0x7000 ... 0x700f: /* add #imm,Rn */
2047 if (op_dst != B11_8 || mv_src >= 0) {
2050 op_opc = INDEX_op_add_i32;
2051 op_arg = tcg_const_i32(B7_0s);
2054 case 0x3000: /* cmp/eq Rm,Rn */
2055 /* Looking for the middle of a compare-and-swap sequence,
2056 beginning with the compare. Operands can be either order,
2057 but with only one overlapping the load. */
2058 if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) {
2061 op_opc = INDEX_op_setcond_i32; /* placeholder */
2062 op_src = (ld_dst == B11_8 ? B7_4 : B11_8);
2063 op_arg = REG(op_src);
2066 switch (ctx->opcode & 0xff00) {
2067 case 0x8b00: /* bf label */
2068 case 0x8f00: /* bf/s label */
2069 if (pc + (i + 1 + B7_0s) * 2 != pc_end) {
2072 if ((ctx->opcode & 0xff00) == 0x8b00) { /* bf label */
2075 /* We're looking to unconditionally modify Rn with the
2076 result of the comparison, within the delay slot of
2077 the branch. This is used by older gcc. */
2079 if ((ctx->opcode & 0xf0ff) == 0x0029) { /* movt Rn */
2091 case 0x2008: /* tst Rm,Rn */
2092 /* Looking for a compare-and-swap against zero. */
2093 if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) {
2096 op_opc = INDEX_op_setcond_i32;
2097 op_arg = tcg_const_i32(0);
2100 if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */
2101 || pc + (i + 1 + B7_0s) * 2 != pc_end) {
2107 /* Put back and re-examine as store. */
2114 /* The store must be the last insn. */
2115 if (i != max_insns - 1) {
2119 switch (ctx->opcode & 0xf00f) {
2120 case 0x2000: /* mov.b Rm,@Rn */
2123 case 0x2001: /* mov.w Rm,@Rn */
2126 case 0x2002: /* mov.l Rm,@Rn */
2132 /* The store must match the load. */
2133 if (ld_adr != B11_8 || st_mop != (ld_mop & MO_SIZE)) {
2141 * Emit the operation.
2143 tcg_gen_insn_start(pc, ctx->envflags);
2146 /* No operation found. Look for exchange pattern. */
2147 if (st_src == ld_dst || mv_src >= 0) {
2150 tcg_gen_atomic_xchg_i32(REG(ld_dst), REG(ld_adr), REG(st_src),
2151 ctx->memidx, ld_mop);
2154 case INDEX_op_add_i32:
2155 if (op_dst != st_src) {
2158 if (op_dst == ld_dst && st_mop == MO_UL) {
2159 tcg_gen_atomic_add_fetch_i32(REG(ld_dst), REG(ld_adr),
2160 op_arg, ctx->memidx, ld_mop);
2162 tcg_gen_atomic_fetch_add_i32(REG(ld_dst), REG(ld_adr),
2163 op_arg, ctx->memidx, ld_mop);
2164 if (op_dst != ld_dst) {
2165 /* Note that mop sizes < 4 cannot use add_fetch
2166 because it won't carry into the higher bits. */
2167 tcg_gen_add_i32(REG(op_dst), REG(ld_dst), op_arg);
2172 case INDEX_op_and_i32:
2173 if (op_dst != st_src) {
2176 if (op_dst == ld_dst) {
2177 tcg_gen_atomic_and_fetch_i32(REG(ld_dst), REG(ld_adr),
2178 op_arg, ctx->memidx, ld_mop);
2180 tcg_gen_atomic_fetch_and_i32(REG(ld_dst), REG(ld_adr),
2181 op_arg, ctx->memidx, ld_mop);
2182 tcg_gen_and_i32(REG(op_dst), REG(ld_dst), op_arg);
2186 case INDEX_op_or_i32:
2187 if (op_dst != st_src) {
2190 if (op_dst == ld_dst) {
2191 tcg_gen_atomic_or_fetch_i32(REG(ld_dst), REG(ld_adr),
2192 op_arg, ctx->memidx, ld_mop);
2194 tcg_gen_atomic_fetch_or_i32(REG(ld_dst), REG(ld_adr),
2195 op_arg, ctx->memidx, ld_mop);
2196 tcg_gen_or_i32(REG(op_dst), REG(ld_dst), op_arg);
2200 case INDEX_op_xor_i32:
2201 if (op_dst != st_src) {
2204 if (op_dst == ld_dst) {
2205 tcg_gen_atomic_xor_fetch_i32(REG(ld_dst), REG(ld_adr),
2206 op_arg, ctx->memidx, ld_mop);
2208 tcg_gen_atomic_fetch_xor_i32(REG(ld_dst), REG(ld_adr),
2209 op_arg, ctx->memidx, ld_mop);
2210 tcg_gen_xor_i32(REG(op_dst), REG(ld_dst), op_arg);
2214 case INDEX_op_setcond_i32:
2215 if (st_src == ld_dst) {
2218 tcg_gen_atomic_cmpxchg_i32(REG(ld_dst), REG(ld_adr), op_arg,
2219 REG(st_src), ctx->memidx, ld_mop);
2220 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(ld_dst), op_arg);
2222 tcg_gen_mov_i32(REG(mt_dst), cpu_sr_t);
2227 g_assert_not_reached();
2230 /* If op_src is not a valid register, then op_arg was a constant. */
2231 if (op_src < 0 && !TCGV_IS_UNUSED(op_arg)) {
2232 tcg_temp_free_i32(op_arg);
2235 /* The entire region has been translated. */
2236 ctx->envflags &= ~GUSA_MASK;
2237 ctx->base.pc_next = pc_end;
2241 qemu_log_mask(LOG_UNIMP, "Unrecognized gUSA sequence %08x-%08x\n",
2244 /* Restart with the EXCLUSIVE bit set, within a TB run via
2245 cpu_exec_step_atomic holding the exclusive lock. */
2246 tcg_gen_insn_start(pc, ctx->envflags);
2247 ctx->envflags |= GUSA_EXCLUSIVE;
2248 gen_save_cpu_state(ctx, false);
2249 gen_helper_exclusive(cpu_env);
2250 ctx->base.is_jmp = DISAS_NORETURN;
2252 /* We're not executing an instruction, but we must report one for the
2253 purposes of accounting within the TB. We might as well report the
2254 entire region consumed via ctx->base.pc_next so that it's immediately
2255 available in the disassembly dump. */
2256 ctx->base.pc_next = pc_end;
2261 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
2263 CPUSH4State *env = cs->env_ptr;
2265 target_ulong pc_start;
2270 ctx.base.pc_next = pc_start;
2271 ctx.tbflags = (uint32_t)tb->flags;
2272 ctx.envflags = tb->flags & TB_FLAG_ENVFLAGS_MASK;
2273 ctx.base.is_jmp = DISAS_NEXT;
2274 ctx.memidx = (ctx.tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
2275 /* We don't know if the delayed pc came from a dynamic or static branch,
2276 so assume it is a dynamic branch. */
2277 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
2279 ctx.base.singlestep_enabled = cs->singlestep_enabled;
2280 ctx.features = env->features;
2281 ctx.has_movcal = (ctx.tbflags & TB_FLAG_PENDING_MOVCA);
2282 ctx.gbank = ((ctx.tbflags & (1 << SR_MD)) &&
2283 (ctx.tbflags & (1 << SR_RB))) * 0x10;
2284 ctx.fbank = ctx.tbflags & FPSCR_FR ? 0x10 : 0;
2286 max_insns = tb_cflags(tb) & CF_COUNT_MASK;
2287 if (max_insns == 0) {
2288 max_insns = CF_COUNT_MASK;
2290 max_insns = MIN(max_insns, TCG_MAX_INSNS);
2292 /* Since the ISA is fixed-width, we can bound by the number
2293 of instructions remaining on the page. */
2294 num_insns = -(ctx.base.pc_next | TARGET_PAGE_MASK) / 2;
2295 max_insns = MIN(max_insns, num_insns);
2297 /* Single stepping means just that. */
2298 if (ctx.base.singlestep_enabled || singlestep) {
2305 #ifdef CONFIG_USER_ONLY
2306 if (ctx.tbflags & GUSA_MASK) {
2307 num_insns = decode_gusa(&ctx, env, &max_insns);
2311 while (ctx.base.is_jmp == DISAS_NEXT
2312 && num_insns < max_insns
2313 && !tcg_op_buf_full()) {
2314 tcg_gen_insn_start(ctx.base.pc_next, ctx.envflags);
2317 if (unlikely(cpu_breakpoint_test(cs, ctx.base.pc_next, BP_ANY))) {
2318 /* We have hit a breakpoint - make sure PC is up-to-date */
2319 gen_save_cpu_state(&ctx, true);
2320 gen_helper_debug(cpu_env);
2321 ctx.base.is_jmp = DISAS_NORETURN;
2322 /* The address covered by the breakpoint must be included in
2323 [tb->pc, tb->pc + tb->size) in order to for it to be
2324 properly cleared -- thus we increment the PC here so that
2325 the logic setting tb->size below does the right thing. */
2326 ctx.base.pc_next += 2;
2330 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
2334 ctx.opcode = cpu_lduw_code(env, ctx.base.pc_next);
2336 ctx.base.pc_next += 2;
2338 if (tb_cflags(tb) & CF_LAST_IO) {
2342 if (ctx.tbflags & GUSA_EXCLUSIVE) {
2343 /* Ending the region of exclusivity. Clear the bits. */
2344 ctx.envflags &= ~GUSA_MASK;
2347 switch (ctx.base.is_jmp) {
2349 gen_save_cpu_state(&ctx, true);
2350 if (ctx.base.singlestep_enabled) {
2351 gen_helper_debug(cpu_env);
2357 gen_save_cpu_state(&ctx, false);
2358 gen_goto_tb(&ctx, 0, ctx.base.pc_next);
2360 case DISAS_NORETURN:
2363 g_assert_not_reached();
2366 gen_tb_end(tb, num_insns);
2368 tb->size = ctx.base.pc_next - pc_start;
2369 tb->icount = num_insns;
2372 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
2373 && qemu_log_in_addr_range(pc_start)) {
2375 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2376 log_target_disas(cs, pc_start, ctx.base.pc_next - pc_start);
2383 void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
2387 env->flags = data[1];
2388 /* Theoretically delayed_pc should also be restored. In practice the
2389 branch instruction is re-executed after exception, so the delayed
2390 branch target will be recomputed. */