#include <inttypes.h>
#include "cpu.h"
-#include "disas.h"
+#include "disas/disas.h"
#include "tcg-op.h"
-#include "qemu-log.h"
+#include "qemu/log.h"
#include "helper.h"
#define GEN_HELPER 1
static TCGv cpu_F0s, cpu_F1s;
static TCGv_i64 cpu_F0d, cpu_F1d;
-#include "gen-icount.h"
+#include "exec/gen-icount.h"
static const char *regnames[] = {
"r00", "r01", "r02", "r03", "r04", "r05", "r06", "r07",
cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
offsetof(CPUUniCore32State, regs[i]), regnames[i]);
}
-
-#define GEN_HELPER 2
-#include "helper.h"
}
static int num_temps;
dead_tmp(tmp);
}
-/* FIXME: Most targets have native widening multiplication.
- It would be good to use that instead of a full wide multiply. */
-/* 32x32->64 multiply. Marks inputs as dead. */
-static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
-{
- TCGv_i64 tmp1 = tcg_temp_new_i64();
- TCGv_i64 tmp2 = tcg_temp_new_i64();
-
- tcg_gen_extu_i32_i64(tmp1, a);
- dead_tmp(a);
- tcg_gen_extu_i32_i64(tmp2, b);
- dead_tmp(b);
- tcg_gen_mul_i64(tmp1, tmp1, tmp2);
- tcg_temp_free_i64(tmp2);
- return tmp1;
-}
-
-static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
-{
- TCGv_i64 tmp1 = tcg_temp_new_i64();
- TCGv_i64 tmp2 = tcg_temp_new_i64();
-
- tcg_gen_ext_i32_i64(tmp1, a);
- dead_tmp(a);
- tcg_gen_ext_i32_i64(tmp2, b);
- dead_tmp(b);
- tcg_gen_mul_i64(tmp1, tmp1, tmp2);
- tcg_temp_free_i64(tmp2);
- return tmp1;
-}
-
#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, CF))
/* Set CF to the top bit of var. */
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
tcg_gen_goto_tb(n);
gen_set_pc_im(dest);
- tcg_gen_exit_tb((tcg_target_long)tb + n);
+ tcg_gen_exit_tb((uintptr_t)tb + n);
} else {
gen_set_pc_im(dest);
tcg_gen_exit_tb(0);
}
}
-
-/* Store a 64-bit value to a register pair. Clobbers val. */
-static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
-{
- TCGv tmp;
- tmp = new_tmp();
- tcg_gen_trunc_i64_i32(tmp, val);
- store_reg(s, rlow, tmp);
- tmp = new_tmp();
- tcg_gen_shri_i64(val, val, 32);
- tcg_gen_trunc_i64_i32(tmp, val);
- store_reg(s, rhigh, tmp);
-}
-
-/* load and add a 64-bit value from a register pair. */
-static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
-{
- TCGv_i64 tmp;
- TCGv tmpl;
- TCGv tmph;
-
- /* Load 64-bit value rd:rn. */
- tmpl = load_reg(s, rlow);
- tmph = load_reg(s, rhigh);
- tmp = tcg_temp_new_i64();
- tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
- dead_tmp(tmpl);
- dead_tmp(tmph);
- tcg_gen_add_i64(val, val, tmp);
- tcg_temp_free_i64(tmp);
-}
-
/* data processing instructions */
static void do_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
{
/* multiply */
static void do_mult(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
{
- TCGv tmp;
- TCGv tmp2;
- TCGv_i64 tmp64;
+ TCGv tmp, tmp2, tmp3, tmp4;
if (UCOP_SET(27)) {
/* 64 bit mul */
tmp = load_reg(s, UCOP_REG_M);
tmp2 = load_reg(s, UCOP_REG_N);
if (UCOP_SET(26)) {
- tmp64 = gen_muls_i64_i32(tmp, tmp2);
+ tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
} else {
- tmp64 = gen_mulu_i64_i32(tmp, tmp2);
+ tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
}
if (UCOP_SET(25)) { /* mult accumulate */
- gen_addq(s, tmp64, UCOP_REG_LO, UCOP_REG_HI);
- }
- gen_storeq_reg(s, UCOP_REG_LO, UCOP_REG_HI, tmp64);
- tcg_temp_free_i64(tmp64);
+ tmp3 = load_reg(s, UCOP_REG_LO);
+ tmp4 = load_reg(s, UCOP_REG_HI);
+ tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, tmp3, tmp4);
+ dead_tmp(tmp3);
+ dead_tmp(tmp4);
+ }
+ store_reg(s, UCOP_REG_LO, tmp);
+ store_reg(s, UCOP_REG_HI, tmp2);
} else {
/* 32 bit mul */
tmp = load_reg(s, UCOP_REG_M);
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
basic block 'tb'. If search_pc is TRUE, also generate PC
information for each intermediate instruction. */
-static inline void gen_intermediate_code_internal(CPUUniCore32State *env,
- TranslationBlock *tb, int search_pc)
+static inline void gen_intermediate_code_internal(UniCore32CPU *cpu,
+ TranslationBlock *tb, bool search_pc)
{
+ CPUState *cs = CPU(cpu);
+ CPUUniCore32State *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
CPUBreakpoint *bp;
uint16_t *gen_opc_end;
dc->tb = tb;
- gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
+ gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
dc->is_jmp = DISAS_NEXT;
dc->pc = pc_start;
- dc->singlestep_enabled = env->singlestep_enabled;
+ dc->singlestep_enabled = cs->singlestep_enabled;
dc->condjmp = 0;
cpu_F0s = tcg_temp_new_i32();
cpu_F1s = tcg_temp_new_i32();
}
#endif
- gen_icount_start();
+ gen_tb_start();
do {
if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
invalidate this TB. */
dc->pc += 2; /* FIXME */
goto done_generating;
- break;
}
}
}
if (search_pc) {
- j = gen_opc_ptr - gen_opc_buf;
+ j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
if (lj < j) {
lj++;
while (lj < j) {
- gen_opc_instr_start[lj++] = 0;
+ tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
}
- gen_opc_pc[lj] = dc->pc;
- gen_opc_instr_start[lj] = 1;
- gen_opc_icount[lj] = num_insns;
+ tcg_ctx.gen_opc_pc[lj] = dc->pc;
+ tcg_ctx.gen_opc_instr_start[lj] = 1;
+ tcg_ctx.gen_opc_icount[lj] = num_insns;
}
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
* Also stop translation when a page boundary is reached. This
* ensures prefetch aborts occur at the right place. */
num_insns++;
- } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
- !env->singlestep_enabled &&
+ } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
+ !cs->singlestep_enabled &&
!singlestep &&
dc->pc < next_page_start &&
num_insns < max_insns);
/* At this stage dc->condjmp will only be set when the skipped
instruction was a conditional branch or trap, and the PC has
already been written. */
- if (unlikely(env->singlestep_enabled)) {
+ if (unlikely(cs->singlestep_enabled)) {
/* Make sure the pc is updated, and raise a debug exception. */
if (dc->condjmp) {
if (dc->is_jmp == DISAS_SYSCALL) {
}
done_generating:
- gen_icount_end(tb, num_insns);
- *gen_opc_ptr = INDEX_op_end;
+ gen_tb_end(tb, num_insns);
+ *tcg_ctx.gen_opc_ptr = INDEX_op_end;
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
}
#endif
if (search_pc) {
- j = gen_opc_ptr - gen_opc_buf;
+ j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
lj++;
while (lj <= j) {
- gen_opc_instr_start[lj++] = 0;
+ tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
} else {
tb->size = dc->pc - pc_start;
void gen_intermediate_code(CPUUniCore32State *env, TranslationBlock *tb)
{
- gen_intermediate_code_internal(env, tb, 0);
+ gen_intermediate_code_internal(uc32_env_get_cpu(env), tb, false);
}
void gen_intermediate_code_pc(CPUUniCore32State *env, TranslationBlock *tb)
{
- gen_intermediate_code_internal(env, tb, 1);
+ gen_intermediate_code_internal(uc32_env_get_cpu(env), tb, true);
}
static const char *cpu_mode_names[16] = {
#define cpu_dump_state_ucf64(env, file, pr, flags) do { } while (0)
#endif
-void cpu_dump_state(CPUUniCore32State *env, FILE *f,
- fprintf_function cpu_fprintf, int flags)
+void uc32_cpu_dump_state(CPUState *cs, FILE *f,
+ fprintf_function cpu_fprintf, int flags)
{
+ UniCore32CPU *cpu = UNICORE32_CPU(cs);
+ CPUUniCore32State *env = &cpu->env;
int i;
uint32_t psr;
void restore_state_to_opc(CPUUniCore32State *env, TranslationBlock *tb, int pc_pos)
{
- env->regs[31] = gen_opc_pc[pc_pos];
+ env->regs[31] = tcg_ctx.gen_opc_pc[pc_pos];
}