#include "cpu.h"
#include "disas/disas.h"
-#include "helper.h"
+#include "exec/helper-proto.h"
#include "tcg-op.h"
-#include "hw/lm32_pic.h"
+#include "exec/cpu_ldst.h"
+#include "hw/lm32/lm32_pic.h"
+
+#include "exec/helper-gen.h"
+
+#include "trace-tcg.h"
-#define GEN_HELPER 1
-#include "helper.h"
#define DISAS_LM32 1
#if DISAS_LM32
/* This is the state at translation time. */
typedef struct DisasContext {
- CPULM32State *env;
target_ulong pc;
/* Decoder. */
unsigned int tb_flags, synced_flags; /* tb dependent flags. */
int is_jmp;
- int nr_nops;
struct TranslationBlock *tb;
int singlestep_enabled;
+
+ uint32_t features;
+ uint8_t num_breakpoints;
+ uint8_t num_watchpoints;
} DisasContext;
static const char *regnames[] = {
tcg_temp_free_i32(tmp);
}
+static inline void t_gen_illegal_insn(DisasContext *dc)
+{
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ gen_helper_ill(cpu_env);
+}
+
static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
{
TranslationBlock *tb;
likely(!dc->singlestep_enabled)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_tl(cpu_pc, dest);
- tcg_gen_exit_tb((tcg_target_long)tb + n);
+ tcg_gen_exit_tb((uintptr_t)tb + n);
} else {
tcg_gen_movi_tl(cpu_pc, dest);
if (dc->singlestep_enabled) {
/* restore IE.IE in case of an eret */
if (dc->r0 == R_EA) {
TCGv t0 = tcg_temp_new();
- int l1 = gen_new_label();
+ TCGLabel *l1 = gen_new_label();
tcg_gen_andi_tl(t0, cpu_ie, IE_EIE);
tcg_gen_ori_tl(cpu_ie, cpu_ie, IE_IE);
tcg_gen_brcondi_tl(TCG_COND_EQ, t0, IE_EIE, l1);
tcg_temp_free(t0);
} else if (dc->r0 == R_BA) {
TCGv t0 = tcg_temp_new();
- int l1 = gen_new_label();
+ TCGLabel *l1 = gen_new_label();
tcg_gen_andi_tl(t0, cpu_ie, IE_BIE);
tcg_gen_ori_tl(cpu_ie, cpu_ie, IE_IE);
tcg_gen_brcondi_tl(TCG_COND_EQ, t0, IE_BIE, l1);
static inline void gen_cond_branch(DisasContext *dc, int cond)
{
- int l1;
-
- l1 = gen_new_label();
+ TCGLabel *l1 = gen_new_label();
tcg_gen_brcond_tl(cond, cpu_R[dc->r0], cpu_R[dc->r1], l1);
gen_goto_tb(dc, 0, dc->pc + 4);
gen_set_label(l1);
int rX = (dc->format == OP_FMT_RR) ? dc->r2 : dc->r1;
int rY = (dc->format == OP_FMT_RR) ? dc->r0 : dc->r0;
int rZ = (dc->format == OP_FMT_RR) ? dc->r1 : -1;
+ int i;
if (dc->format == OP_FMT_RI) {
- tcg_gen_setcondi_tl(cond, cpu_R[rX], cpu_R[rY],
- sign_extend(dc->imm16, 16));
+ switch (cond) {
+ case TCG_COND_GEU:
+ case TCG_COND_GTU:
+ i = zero_extend(dc->imm16, 16);
+ break;
+ default:
+ i = sign_extend(dc->imm16, 16);
+ break;
+ }
+
+ tcg_gen_setcondi_tl(cond, cpu_R[rX], cpu_R[rY], i);
} else {
tcg_gen_setcond_tl(cond, cpu_R[rX], cpu_R[rY], cpu_R[rZ]);
}
{
if (dc->format == OP_FMT_RI) {
LOG_DIS("cmpgeui r%d, r%d, %d\n", dc->r0, dc->r1,
- sign_extend(dc->imm16, 16));
+ zero_extend(dc->imm16, 16));
} else {
LOG_DIS("cmpgeu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
}
{
if (dc->format == OP_FMT_RI) {
LOG_DIS("cmpgui r%d, r%d, %d\n", dc->r0, dc->r1,
- sign_extend(dc->imm16, 16));
+ zero_extend(dc->imm16, 16));
} else {
LOG_DIS("cmpgu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
}
static void dec_divu(DisasContext *dc)
{
- int l1;
+ TCGLabel *l1;
LOG_DIS("divu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
- if (!(dc->env->features & LM32_FEATURE_DIVIDE)) {
- cpu_abort(dc->env, "hardware divider is not available\n");
+ if (!(dc->features & LM32_FEATURE_DIVIDE)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "hardware divider is not available\n");
+ t_gen_illegal_insn(dc);
+ return;
}
l1 = gen_new_label();
static void dec_modu(DisasContext *dc)
{
- int l1;
+ TCGLabel *l1;
LOG_DIS("modu r%d, r%d, %d\n", dc->r2, dc->r0, dc->r1);
- if (!(dc->env->features & LM32_FEATURE_DIVIDE)) {
- cpu_abort(dc->env, "hardware divider is not available\n");
+ if (!(dc->features & LM32_FEATURE_DIVIDE)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "hardware divider is not available\n");
+ t_gen_illegal_insn(dc);
+ return;
}
l1 = gen_new_label();
LOG_DIS("mul r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
}
- if (!(dc->env->features & LM32_FEATURE_MULTIPLY)) {
- cpu_abort(dc->env, "hardware multiplier is not available\n");
+ if (!(dc->features & LM32_FEATURE_MULTIPLY)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "hardware multiplier is not available\n");
+ t_gen_illegal_insn(dc);
+ return;
}
if (dc->format == OP_FMT_RI) {
static void dec_scall(DisasContext *dc)
{
- if (dc->imm5 == 7) {
- LOG_DIS("scall\n");
- } else if (dc->imm5 == 2) {
+ switch (dc->imm5) {
+ case 2:
LOG_DIS("break\n");
- } else {
- cpu_abort(dc->env, "invalid opcode\n");
- }
-
- if (dc->imm5 == 7) {
- tcg_gen_movi_tl(cpu_pc, dc->pc);
- t_gen_raise_exception(dc, EXCP_SYSTEMCALL);
- } else {
tcg_gen_movi_tl(cpu_pc, dc->pc);
t_gen_raise_exception(dc, EXCP_BREAKPOINT);
+ break;
+ case 7:
+ LOG_DIS("scall\n");
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ t_gen_raise_exception(dc, EXCP_SYSTEMCALL);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "invalid opcode @0x%x", dc->pc);
+ t_gen_illegal_insn(dc);
+ break;
}
}
case CSR_WP1:
case CSR_WP2:
case CSR_WP3:
- cpu_abort(dc->env, "invalid read access csr=%x\n", dc->csr);
+ qemu_log_mask(LOG_GUEST_ERROR, "invalid read access csr=%x\n", dc->csr);
break;
default:
- cpu_abort(dc->env, "read_csr: unknown csr=%x\n", dc->csr);
+ qemu_log_mask(LOG_GUEST_ERROR, "read_csr: unknown csr=%x\n", dc->csr);
break;
}
}
{
LOG_DIS("sextb r%d, r%d\n", dc->r2, dc->r0);
- if (!(dc->env->features & LM32_FEATURE_SIGN_EXTEND)) {
- cpu_abort(dc->env, "hardware sign extender is not available\n");
+ if (!(dc->features & LM32_FEATURE_SIGN_EXTEND)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "hardware sign extender is not available\n");
+ t_gen_illegal_insn(dc);
+ return;
}
tcg_gen_ext8s_tl(cpu_R[dc->r2], cpu_R[dc->r0]);
{
LOG_DIS("sexth r%d, r%d\n", dc->r2, dc->r0);
- if (!(dc->env->features & LM32_FEATURE_SIGN_EXTEND)) {
- cpu_abort(dc->env, "hardware sign extender is not available\n");
+ if (!(dc->features & LM32_FEATURE_SIGN_EXTEND)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "hardware sign extender is not available\n");
+ t_gen_illegal_insn(dc);
+ return;
}
tcg_gen_ext16s_tl(cpu_R[dc->r2], cpu_R[dc->r0]);
LOG_DIS("sl r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
}
- if (!(dc->env->features & LM32_FEATURE_SHIFT)) {
- cpu_abort(dc->env, "hardware shifter is not available\n");
+ if (!(dc->features & LM32_FEATURE_SHIFT)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "hardware shifter is not available\n");
+ t_gen_illegal_insn(dc);
+ return;
}
if (dc->format == OP_FMT_RI) {
LOG_DIS("sr r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
}
- if (!(dc->env->features & LM32_FEATURE_SHIFT)) {
- if (dc->format == OP_FMT_RI) {
- /* TODO: check r1 == 1 during runtime */
- } else {
- if (dc->imm5 != 1) {
- cpu_abort(dc->env, "hardware shifter is not available\n");
- }
- }
- }
-
+ /* The real CPU (w/o hardware shifter) only supports right shift by exactly
+ * one bit */
if (dc->format == OP_FMT_RI) {
+ if (!(dc->features & LM32_FEATURE_SHIFT) && (dc->imm5 != 1)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "hardware shifter is not available\n");
+ t_gen_illegal_insn(dc);
+ return;
+ }
tcg_gen_sari_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5);
} else {
- TCGv t0 = tcg_temp_new();
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGv t0 = tcg_temp_local_new();
tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f);
+
+ if (!(dc->features & LM32_FEATURE_SHIFT)) {
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 1, l1);
+ t_gen_illegal_insn(dc);
+ tcg_gen_br(l2);
+ }
+
+ gen_set_label(l1);
tcg_gen_sar_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0);
+ gen_set_label(l2);
+
tcg_temp_free(t0);
}
}
LOG_DIS("sru r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
}
- if (!(dc->env->features & LM32_FEATURE_SHIFT)) {
- if (dc->format == OP_FMT_RI) {
- /* TODO: check r1 == 1 during runtime */
- } else {
- if (dc->imm5 != 1) {
- cpu_abort(dc->env, "hardware shifter is not available\n");
- }
- }
- }
-
if (dc->format == OP_FMT_RI) {
+ if (!(dc->features & LM32_FEATURE_SHIFT) && (dc->imm5 != 1)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "hardware shifter is not available\n");
+ t_gen_illegal_insn(dc);
+ return;
+ }
tcg_gen_shri_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5);
} else {
- TCGv t0 = tcg_temp_new();
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGv t0 = tcg_temp_local_new();
tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f);
+
+ if (!(dc->features & LM32_FEATURE_SHIFT)) {
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 1, l1);
+ t_gen_illegal_insn(dc);
+ tcg_gen_br(l2);
+ }
+
+ gen_set_label(l1);
tcg_gen_shr_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0);
+ gen_set_label(l2);
+
tcg_temp_free(t0);
}
}
{
LOG_DIS("user");
- cpu_abort(dc->env, "user insn undefined\n");
+ qemu_log_mask(LOG_GUEST_ERROR, "user instruction undefined\n");
+ t_gen_illegal_insn(dc);
}
static void dec_wcsr(DisasContext *dc)
break;
case CSR_IM:
/* mark as an io operation because it could cause an interrupt */
- if (use_icount) {
+ if (dc->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_wcsr_im(cpu_env, cpu_R[dc->r1]);
tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
- if (use_icount) {
+ if (dc->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
}
dc->is_jmp = DISAS_UPDATE;
break;
case CSR_IP:
/* mark as an io operation because it could cause an interrupt */
- if (use_icount) {
+ if (dc->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_wcsr_ip(cpu_env, cpu_R[dc->r1]);
tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
- if (use_icount) {
+ if (dc->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
}
dc->is_jmp = DISAS_UPDATE;
gen_helper_wcsr_jrx(cpu_env, cpu_R[dc->r1]);
break;
case CSR_DC:
- tcg_gen_mov_tl(cpu_dc, cpu_R[dc->r1]);
+ gen_helper_wcsr_dc(cpu_env, cpu_R[dc->r1]);
break;
case CSR_BP0:
case CSR_BP1:
case CSR_BP2:
case CSR_BP3:
no = dc->csr - CSR_BP0;
- if (dc->env->num_bps <= no) {
- cpu_abort(dc->env, "breakpoint #%i is not available\n", no);
+ if (dc->num_breakpoints <= no) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "breakpoint #%i is not available\n", no);
+ t_gen_illegal_insn(dc);
+ break;
}
- tcg_gen_mov_tl(cpu_bp[no], cpu_R[dc->r1]);
+ gen_helper_wcsr_bp(cpu_env, cpu_R[dc->r1], tcg_const_i32(no));
break;
case CSR_WP0:
case CSR_WP1:
case CSR_WP2:
case CSR_WP3:
no = dc->csr - CSR_WP0;
- if (dc->env->num_wps <= no) {
- cpu_abort(dc->env, "watchpoint #%i is not available\n", no);
+ if (dc->num_watchpoints <= no) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "watchpoint #%i is not available\n", no);
+ t_gen_illegal_insn(dc);
+ break;
}
- tcg_gen_mov_tl(cpu_wp[no], cpu_R[dc->r1]);
+ gen_helper_wcsr_wp(cpu_env, cpu_R[dc->r1], tcg_const_i32(no));
break;
case CSR_CC:
case CSR_CFG:
- cpu_abort(dc->env, "invalid write access csr=%x\n", dc->csr);
+ qemu_log_mask(LOG_GUEST_ERROR, "invalid write access csr=%x\n",
+ dc->csr);
break;
default:
- cpu_abort(dc->env, "write_csr unknown csr=%x\n", dc->csr);
+ qemu_log_mask(LOG_GUEST_ERROR, "write_csr: unknown csr=%x\n",
+ dc->csr);
break;
}
}
static void dec_ill(DisasContext *dc)
{
- cpu_abort(dc->env, "unknown opcode 0x%02x\n", dc->opcode);
+ qemu_log_mask(LOG_GUEST_ERROR, "invalid opcode 0x%02x\n", dc->opcode);
+ t_gen_illegal_insn(dc);
}
typedef void (*DecoderInfo)(DisasContext *dc);
static inline void decode(DisasContext *dc, uint32_t ir)
{
- if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
- tcg_gen_debug_insn_start(dc->pc);
- }
-
dc->ir = ir;
LOG_DIS("%8.8x\t", dc->ir);
- /* try guessing 'empty' instruction memory, although it may be a valid
- * instruction sequence (eg. srui r0, r0, 0) */
- if (dc->ir) {
- dc->nr_nops = 0;
- } else {
- LOG_DIS("nr_nops=%d\t", dc->nr_nops);
- dc->nr_nops++;
- if (dc->nr_nops > 4) {
- cpu_abort(dc->env, "fetching nop sequence\n");
- }
- }
-
dc->opcode = EXTRACT_FIELD(ir, 26, 31);
dc->imm5 = EXTRACT_FIELD(ir, 0, 4);
decinfo[dc->opcode](dc);
}
-static void check_breakpoint(CPULM32State *env, DisasContext *dc)
-{
- CPUBreakpoint *bp;
-
- if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
- QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
- if (bp->pc == dc->pc) {
- tcg_gen_movi_tl(cpu_pc, dc->pc);
- t_gen_raise_exception(dc, EXCP_DEBUG);
- dc->is_jmp = DISAS_UPDATE;
- }
- }
- }
-}
-
/* generate intermediate code for basic block 'tb'. */
-static void gen_intermediate_code_internal(CPULM32State *env,
- TranslationBlock *tb, int search_pc)
+static inline
+void gen_intermediate_code_internal(LM32CPU *cpu,
+ TranslationBlock *tb, bool search_pc)
{
+ CPUState *cs = CPU(cpu);
+ CPULM32State *env = &cpu->env;
struct DisasContext ctx, *dc = &ctx;
- uint16_t *gen_opc_end;
uint32_t pc_start;
int j, lj;
uint32_t next_page_start;
int max_insns;
pc_start = tb->pc;
- dc->env = env;
+ dc->features = cpu->features;
+ dc->num_breakpoints = cpu->num_breakpoints;
+ dc->num_watchpoints = cpu->num_watchpoints;
dc->tb = tb;
- gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
-
dc->is_jmp = DISAS_NEXT;
dc->pc = pc_start;
- dc->singlestep_enabled = env->singlestep_enabled;
- dc->nr_nops = 0;
+ dc->singlestep_enabled = cs->singlestep_enabled;
if (pc_start & 3) {
- cpu_abort(env, "LM32: unaligned PC=%x\n", pc_start);
- }
-
- if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
- qemu_log("-----------------------------------------\n");
- log_cpu_state(env, 0);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "unaligned PC=%x. Ignoring lowest bits.\n", pc_start);
+ pc_start &= ~3;
}
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
- gen_tb_start();
+ gen_tb_start(tb);
do {
- check_breakpoint(env, dc);
-
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j) {
tcg_ctx.gen_opc_instr_start[lj] = 1;
tcg_ctx.gen_opc_icount[lj] = num_insns;
}
+ tcg_gen_insn_start(dc->pc);
+ num_insns++;
+
+ if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ t_gen_raise_exception(dc, EXCP_DEBUG);
+ dc->is_jmp = DISAS_UPDATE;
+ break;
+ }
/* Pretty disas. */
LOG_DIS("%8.8x:\t", dc->pc);
- if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
gen_io_start();
}
decode(dc, cpu_ldl_code(env, dc->pc));
dc->pc += 4;
- num_insns++;
-
} while (!dc->is_jmp
- && tcg_ctx.gen_opc_ptr < gen_opc_end
- && !env->singlestep_enabled
+ && !tcg_op_buf_full()
+ && !cs->singlestep_enabled
&& !singlestep
&& (dc->pc < next_page_start)
&& num_insns < max_insns);
gen_io_end();
}
- if (unlikely(env->singlestep_enabled)) {
+ if (unlikely(cs->singlestep_enabled)) {
if (dc->is_jmp == DISAS_NEXT) {
tcg_gen_movi_tl(cpu_pc, dc->pc);
}
}
gen_tb_end(tb, num_insns);
- *tcg_ctx.gen_opc_ptr = INDEX_op_end;
+
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
lj++;
while (lj <= j) {
tcg_ctx.gen_opc_instr_start[lj++] = 0;
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
qemu_log("\n");
- log_target_disas(env, pc_start, dc->pc - pc_start, 0);
- qemu_log("\nisize=%d osize=%td\n",
- dc->pc - pc_start, tcg_ctx.gen_opc_ptr -
- tcg_ctx.gen_opc_buf);
+ log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
+ qemu_log("\nisize=%d osize=%d\n",
+ dc->pc - pc_start, tcg_op_buf_count());
}
#endif
}
void gen_intermediate_code(CPULM32State *env, struct TranslationBlock *tb)
{
- gen_intermediate_code_internal(env, tb, 0);
+ gen_intermediate_code_internal(lm32_env_get_cpu(env), tb, false);
}
void gen_intermediate_code_pc(CPULM32State *env, struct TranslationBlock *tb)
{
- gen_intermediate_code_internal(env, tb, 1);
+ gen_intermediate_code_internal(lm32_env_get_cpu(env), tb, true);
}
-void cpu_dump_state(CPULM32State *env, FILE *f, fprintf_function cpu_fprintf,
- int flags)
+void lm32_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
+ int flags)
{
+ LM32CPU *cpu = LM32_CPU(cs);
+ CPULM32State *env = &cpu->env;
int i;
if (!env || !f) {
cpu_fprintf(f, "\n\n");
}
-void restore_state_to_opc(CPULM32State *env, TranslationBlock *tb, int pc_pos)
+void restore_state_to_opc(CPULM32State *env, TranslationBlock *tb,
+ target_ulong *data)
{
- env->pc = tcg_ctx.gen_opc_pc[pc_pos];
+ env->pc = data[0];
}
void lm32_translate_init(void)