* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <stdio.h>
+#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "exec/helper-gen.h"
#include "trace-tcg.h"
+#include "exec/log.h"
typedef struct DisasContext {
unsigned cpenable;
} DisasContext;
-static TCGv_ptr cpu_env;
+static TCGv_env cpu_env;
static TCGv_i32 cpu_pc;
static TCGv_i32 cpu_R[16];
static TCGv_i32 cpu_FR[16];
int i;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
+ tcg_ctx.tcg_env = cpu_env;
+ cpu_pc = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUXtensaState, pc), "pc");
for (i = 0; i < 16; i++) {
- cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
+ cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUXtensaState, regs[i]),
regnames[i]);
}
for (i = 0; i < 16; i++) {
- cpu_FR[i] = tcg_global_mem_new_i32(TCG_AREG0,
+ cpu_FR[i] = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUXtensaState, fregs[i].f32[FP_F32_LOW]),
fregnames[i]);
}
for (i = 0; i < 256; ++i) {
if (sregnames[i].name) {
- cpu_SR[i] = tcg_global_mem_new_i32(TCG_AREG0,
+ cpu_SR[i] = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUXtensaState, sregs[i]),
sregnames[i].name);
}
for (i = 0; i < 256; ++i) {
if (uregnames[i].name) {
- cpu_UR[i] = tcg_global_mem_new_i32(TCG_AREG0,
+ cpu_UR[i] = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUXtensaState, uregs[i]),
uregnames[i].name);
}
static void gen_jumpi(DisasContext *dc, uint32_t dest, int slot)
{
TCGv_i32 tmp = tcg_const_i32(dest);
+#ifndef CONFIG_USER_ONLY
if (((dc->tb->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
slot = -1;
}
+#endif
gen_jump_slot(dc, tmp, slot);
tcg_temp_free(tmp);
}
static void gen_callwi(DisasContext *dc, int callinc, uint32_t dest, int slot)
{
TCGv_i32 tmp = tcg_const_i32(dest);
+#ifndef CONFIG_USER_ONLY
if (((dc->tb->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
slot = -1;
}
+#endif
gen_callw_slot(dc, callinc, tmp, slot);
tcg_temp_free(tmp);
}
{
if (!xtensa_option_bits_enabled(dc->config, sregnames[sr].opt_bits)) {
if (sregnames[sr].name) {
- qemu_log("SR %s is not configured\n", sregnames[sr].name);
+ qemu_log_mask(LOG_GUEST_ERROR, "SR %s is not configured\n", sregnames[sr].name);
} else {
- qemu_log("SR %d is not implemented\n", sr);
+ qemu_log_mask(LOG_UNIMP, "SR %d is not implemented\n", sr);
}
gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
return false;
[SR_X] = "xsr",
};
assert(access < ARRAY_SIZE(access_text) && access_text[access]);
- qemu_log("SR %s is not available for %s\n", sregnames[sr].name,
- access_text[access]);
+ qemu_log_mask(LOG_GUEST_ERROR, "SR %s is not available for %s\n", sregnames[sr].name,
+ access_text[access]);
gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
return false;
}
{
#define HAS_OPTION_BITS(opt) do { \
if (!option_bits_enabled(dc, opt)) { \
- qemu_log("Option is not enabled %s:%d\n", \
- __FILE__, __LINE__); \
+ qemu_log_mask(LOG_GUEST_ERROR, "Option is not enabled %s:%d\n", \
+ __FILE__, __LINE__); \
goto invalid_opcode; \
} \
} while (0)
#define HAS_OPTION(opt) HAS_OPTION_BITS(XTENSA_OPTION_BIT(opt))
-#define TBD() qemu_log("TBD(pc = %08x): %s:%d\n", dc->pc, __FILE__, __LINE__)
+#define TBD() qemu_log_mask(LOG_UNIMP, "TBD(pc = %08x): %s:%d\n", dc->pc, __FILE__, __LINE__)
#define RESERVED() do { \
- qemu_log("RESERVED(pc = %08x, %02x%02x%02x): %s:%d\n", \
- dc->pc, b0, b1, b2, __FILE__, __LINE__); \
+ qemu_log_mask(LOG_GUEST_ERROR, "RESERVED(pc = %08x, %02x%02x%02x): %s:%d\n", \
+ dc->pc, b0, b1, b2, __FILE__, __LINE__); \
goto invalid_opcode; \
} while (0)
gen_jump(dc, cpu_SR[EPC1 + RRR_S - 1]);
}
} else {
- qemu_log("RFI %d is illegal\n", RRR_S);
+ qemu_log_mask(LOG_GUEST_ERROR, "RFI %d is illegal\n", RRR_S);
gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
}
break;
gen_helper_simcall(cpu_env);
}
} else {
- qemu_log("SIMCALL but semihosting is disabled\n");
+ qemu_log_mask(LOG_GUEST_ERROR, "SIMCALL but semihosting is disabled\n");
gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
}
break;
if (uregnames[st].name) {
tcg_gen_mov_i32(cpu_R[RRR_R], cpu_UR[st]);
} else {
- qemu_log("RUR %d not implemented, ", st);
+ qemu_log_mask(LOG_UNIMP, "RUR %d not implemented, ", st);
TBD();
}
}
if (uregnames[RSR_SR].name) {
gen_wur(RSR_SR, cpu_R[RRR_T]);
} else {
- qemu_log("WUR %d not implemented, ", RSR_SR);
+ qemu_log_mask(LOG_UNIMP, "WUR %d not implemented, ", RSR_SR);
TBD();
}
}
}
break;
+ case 5: /*S32N*/
+ if (gen_window_check2(dc, RRI4_S, RRI4_T)) {
+ TCGv_i32 addr = tcg_temp_new_i32();
+
+ tcg_gen_addi_i32(addr, cpu_R[RRI4_S], RRI4_IMM4 << 2);
+ gen_load_store_alignment(dc, 2, addr, false);
+ tcg_gen_qemu_st32(cpu_R[RRI4_T], addr, dc->cring);
+ tcg_temp_free(addr);
+ }
+ break;
+
default:
RESERVED();
break;
return;
invalid_opcode:
- qemu_log("INVALID(pc = %08x)\n", dc->pc);
+ qemu_log_mask(LOG_GUEST_ERROR, "INVALID(pc = %08x)\n", dc->pc);
gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
#undef HAS_OPTION
}
tcg_gen_movi_i32(cpu_pc, dc.pc);
gen_exception(&dc, EXCP_DEBUG);
dc.is_jmp = DISAS_UPDATE;
+ /* The address covered by the breakpoint must be included in
+ [tb->pc, tb->pc + tb->size) in order to for it to be
+ properly cleared -- thus we increment the PC here so that
+ the logic setting tb->size below does the right thing. */
+ dc.pc += 2;
break;
}
gen_tb_end(tb, insn_count);
#ifdef DEBUG_DISAS
- if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
+ && qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
qemu_log("----------------\n");
qemu_log("IN: %s\n", lookup_symbol(pc_start));
log_target_disas(cs, pc_start, dc.pc - pc_start, 0);
qemu_log("\n");
+ qemu_log_unlock();
}
#endif
tb->size = dc.pc - pc_start;