}
}
+void HELPER(check_exclusive)(CPUXtensaState *env, uint32_t pc, uint32_t vaddr,
+ uint32_t is_write)
+{
+ uint32_t paddr, page_size, access;
+ uint32_t atomctl = env->sregs[ATOMCTL];
+ int rc = xtensa_get_physical_addr(env, true, vaddr, is_write,
+ xtensa_get_cring(env), &paddr,
+ &page_size, &access);
+
+ if (rc) {
+ HELPER(exception_cause_vaddr)(env, pc, rc, vaddr);
+ }
+
+ /* When data cache is not configured use ATOMCTL bypass field. */
+ if (!xtensa_option_enabled(env->config, XTENSA_OPTION_DCACHE)) {
+ access = PAGE_CACHE_BYPASS;
+ }
+
+ switch (access & PAGE_CACHE_MASK) {
+ case PAGE_CACHE_WB:
+ atomctl >>= 2;
+ /* fall through */
+ case PAGE_CACHE_WT:
+ atomctl >>= 2;
+ /* fall through */
+ case PAGE_CACHE_BYPASS:
+ if ((atomctl & 0x3) == 0) {
+ HELPER(exception_cause_vaddr)(env, pc,
+ EXCLUSIVE_ERROR_CAUSE, vaddr);
+ }
+ break;
+
+ case PAGE_CACHE_ISOLATE:
+ HELPER(exception_cause_vaddr)(env, pc,
+ LOAD_STORE_ERROR_CAUSE, vaddr);
+ break;
+
+ default:
+ break;
+ }
+}
+
void HELPER(wsr_memctl)(CPUXtensaState *env, uint32_t v)
{
if (xtensa_option_enabled(env->config, XTENSA_OPTION_ICACHE)) {
static TCGv_i32 cpu_SR[256];
static TCGv_i32 cpu_UR[256];
static TCGv_i32 cpu_windowbase_next;
+static TCGv_i32 cpu_exclusive_addr;
+static TCGv_i32 cpu_exclusive_val;
static GHashTable *xtensa_regfile_table;
tcg_global_mem_new_i32(cpu_env,
offsetof(CPUXtensaState, windowbase_next),
"windowbase_next");
+ cpu_exclusive_addr =
+ tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUXtensaState, exclusive_addr),
+ "exclusive_addr");
+ cpu_exclusive_val =
+ tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUXtensaState, exclusive_val),
+ "exclusive_val");
}
void **xtensa_get_regfile_by_name(const char *name)
tcg_gen_andi_i32(cpu_UR[EXPSTATE], cpu_UR[EXPSTATE], ~(1u << arg[0].imm));
}
+static void translate_clrex(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_movi_i32(cpu_exclusive_addr, -1);
+}
+
static void translate_const16(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
tcg_temp_free(tmp);
}
+static void translate_getex(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_extract_i32(tmp, cpu_SR[ATOMCTL], 8, 1);
+ tcg_gen_deposit_i32(cpu_SR[ATOMCTL], cpu_SR[ATOMCTL], arg[0].in, 8, 1);
+ tcg_gen_mov_i32(arg[0].out, tmp);
+ tcg_temp_free(tmp);
+}
+
static void translate_icache(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
tcg_temp_free(addr);
}
+#ifdef CONFIG_USER_ONLY
+static void gen_check_exclusive(DisasContext *dc, TCGv_i32 addr, bool is_write)
+{
+}
+#else
+static void gen_check_exclusive(DisasContext *dc, TCGv_i32 addr, bool is_write)
+{
+ if (!option_enabled(dc, XTENSA_OPTION_MPU)) {
+ TCGv_i32 tpc = tcg_const_i32(dc->pc);
+ TCGv_i32 write = tcg_const_i32(is_write);
+
+ gen_helper_check_exclusive(cpu_env, tpc, addr, write);
+ tcg_temp_free(tpc);
+ tcg_temp_free(write);
+ }
+}
+#endif
+
+static void translate_l32ex(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 addr = tcg_temp_new_i32();
+
+ tcg_gen_mov_i32(addr, arg[1].in);
+ gen_load_store_alignment(dc, 2, addr, true);
+ gen_check_exclusive(dc, addr, false);
+ tcg_gen_qemu_ld_i32(arg[0].out, addr, dc->ring, MO_TEUL);
+ tcg_gen_mov_i32(cpu_exclusive_addr, addr);
+ tcg_gen_mov_i32(cpu_exclusive_val, arg[0].out);
+ tcg_temp_free(addr);
+}
+
static void translate_ldst(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
tcg_temp_free(addr);
}
+static void translate_s32ex(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 prev = tcg_temp_new_i32();
+ TCGv_i32 addr = tcg_temp_local_new_i32();
+ TCGv_i32 res = tcg_temp_local_new_i32();
+ TCGLabel *label = gen_new_label();
+
+ tcg_gen_movi_i32(res, 0);
+ tcg_gen_mov_i32(addr, arg[1].in);
+ gen_load_store_alignment(dc, 2, addr, true);
+ tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, label);
+ gen_check_exclusive(dc, addr, true);
+ tcg_gen_atomic_cmpxchg_i32(prev, cpu_exclusive_addr, cpu_exclusive_val,
+ arg[0].in, dc->cring, MO_TEUL);
+ tcg_gen_setcond_i32(TCG_COND_EQ, res, prev, cpu_exclusive_val);
+ tcg_gen_movcond_i32(TCG_COND_EQ, cpu_exclusive_val,
+ prev, cpu_exclusive_val, prev, cpu_exclusive_val);
+ tcg_gen_movi_i32(cpu_exclusive_addr, -1);
+ gen_set_label(label);
+ tcg_gen_extract_i32(arg[0].out, cpu_SR[ATOMCTL], 8, 1);
+ tcg_gen_deposit_i32(cpu_SR[ATOMCTL], cpu_SR[ATOMCTL], res, 8, 1);
+ tcg_temp_free(prev);
+ tcg_temp_free(addr);
+ tcg_temp_free(res);
+}
+
static void translate_salt(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
}, {
.name = "clrb_expstate",
.translate = translate_clrb_expstate,
+ }, {
+ .name = "clrex",
+ .translate = translate_clrex,
}, {
.name = "const16",
.translate = translate_const16,
}, {
.name = "extw",
.translate = translate_memw,
+ }, {
+ .name = "getex",
+ .translate = translate_getex,
}, {
.name = "hwwdtlba",
.op_flags = XTENSA_OP_ILL,
.name = "l32e",
.translate = translate_l32e,
.op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_LOAD,
+ }, {
+ .name = "l32ex",
+ .translate = translate_l32ex,
+ .op_flags = XTENSA_OP_LOAD,
}, {
.name = (const char * const[]) {
"l32i", "l32i.n", NULL,
.name = "s32e",
.translate = translate_s32e,
.op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_STORE,
+ }, {
+ .name = "s32ex",
+ .translate = translate_s32ex,
+ .op_flags = XTENSA_OP_LOAD | XTENSA_OP_STORE,
}, {
.name = (const char * const[]) {
"s32i", "s32i.n", "s32nb", NULL,