CPUState *cpu = ENV_GET_CPU(env1);
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- mmu_idx = cpu_mmu_index(env1);
+ mmu_idx = cpu_mmu_index(env1, true);
if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
(addr & TARGET_PAGE_MASK))) {
cpu_ldub_code(env1, addr);
#endif /* (NB_MMU_MODES > 12) */
/* these access are slower, they must be as rare as possible */
-#define CPU_MMU_INDEX (cpu_mmu_index(env))
+#define CPU_MMU_INDEX (cpu_mmu_index(env, false))
#define MEMSUFFIX _data
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
-#define CPU_MMU_INDEX (cpu_mmu_index(env))
+#define CPU_MMU_INDEX (cpu_mmu_index(env, true))
#define MEMSUFFIX _code
#define SOFTMMU_CODE_ACCESS
PS_USER_MODE = 8
};
-static inline int cpu_mmu_index(CPUAlphaState *env)
+static inline int cpu_mmu_index(CPUAlphaState *env, bool ifetch)
{
if (env->pal_mode) {
return MMU_KERNEL_IDX;
ctx.tb = tb;
ctx.pc = pc_start;
- ctx.mem_idx = cpu_mmu_index(env);
+ ctx.mem_idx = cpu_mmu_index(env, false);
ctx.implver = env->implver;
ctx.singlestep_enabled = cs->singlestep_enabled;
}
/* Determine the current mmu_idx to use for normal loads/stores */
-static inline int cpu_mmu_index(CPUARMState *env)
+static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
{
int el = arm_current_el(env);
<< ARM_TBFLAG_XSCALE_CPAR_SHIFT);
}
- *flags |= (cpu_mmu_index(env) << ARM_TBFLAG_MMUIDX_SHIFT);
+ *flags |= (cpu_mmu_index(env, false) << ARM_TBFLAG_MMUIDX_SHIFT);
/* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
* states defined in the ARM ARM for software singlestep:
* SS_ACTIVE PSTATE.SS State
uint32_t fsr;
MemTxAttrs attrs = {};
- ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env), &phys_addr,
+ ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env, false), &phys_addr,
&attrs, &prot, &page_size, &fsr);
if (ret) {
int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
void *hostaddr[maxidx];
int try, i;
- unsigned mmu_idx = cpu_mmu_index(env);
+ unsigned mmu_idx = cpu_mmu_index(env, false);
TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
for (try = 0; try < 2; try++) {
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
#define MMU_USER_IDX 1
-static inline int cpu_mmu_index (CPUCRISState *env)
+static inline int cpu_mmu_index (CPUCRISState *env, bool ifetch)
{
return !!(env->pregs[PR_CCS] & U_FLAG);
}
static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
{
- int mem_index = cpu_mmu_index(&dc->cpu->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
/* If we get a fault on a delayslot we must keep the jmp state in
the cpu-state to be able to re-execute the jmp. */
static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
unsigned int size, int sign)
{
- int mem_index = cpu_mmu_index(&dc->cpu->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
/* If we get a fault on a delayslot we must keep the jmp state in
the cpu-state to be able to re-execute the jmp. */
static void gen_store (DisasContext *dc, TCGv addr, TCGv val,
unsigned int size)
{
- int mem_index = cpu_mmu_index(&dc->cpu->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
/* If we get a fault on a delayslot we must keep the jmp state in
the cpu-state to be able to re-execute the jmp. */
static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val,
unsigned int size)
{
- int mem_index = cpu_mmu_index(&dc->cpu->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
/* If we get a fault on a delayslot we must keep the jmp state in
the cpu-state to be able to re-execute the jmp. */
#define MMU_KSMAP_IDX 0
#define MMU_USER_IDX 1
#define MMU_KNOSMAP_IDX 2
-static inline int cpu_mmu_index(CPUX86State *env)
+static inline int cpu_mmu_index(CPUX86State *env, bool ifetch)
{
return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
(!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK))
/* select memory access functions */
dc->mem_index = 0;
if (flags & HF_SOFTMMU_MASK) {
- dc->mem_index = cpu_mmu_index(env);
+ dc->mem_index = cpu_mmu_index(env, false);
}
dc->cpuid_features = env->features[FEAT_1_EDX];
dc->cpuid_ext_features = env->features[FEAT_1_ECX];
#define NB_MMU_MODES 1
#define TARGET_PAGE_BITS 12
-static inline int cpu_mmu_index(CPULM32State *env)
+static inline int cpu_mmu_index(CPULM32State *env, bool ifetch)
{
return 0;
}
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
#define MMU_USER_IDX 1
-static inline int cpu_mmu_index (CPUM68KState *env)
+static inline int cpu_mmu_index (CPUM68KState *env, bool ifetch)
{
return (env->sr & SR_S) == 0 ? 1 : 0;
}
#define MMU_USER_IDX 2
/* See NB_MMU_MODES further up the file. */
-static inline int cpu_mmu_index (CPUMBState *env)
+static inline int cpu_mmu_index (CPUMBState *env, bool ifetch)
{
/* Are we in nommu mode?. */
if (!(env->sregs[SR_MSR] & MSR_VM))
}
hit = mmu_translate(&env->mmu, &lu,
- v & TLB_EPN_MASK, 0, cpu_mmu_index(env));
+ v & TLB_EPN_MASK, 0, cpu_mmu_index(env, false));
if (hit) {
env->mmu.regs[MMU_R_TLBX] = lu.idx;
} else
CPUState *cs = CPU(dc->cpu);
TCGv t0, t1;
unsigned int sr, to, rn;
- int mem_index = cpu_mmu_index(&dc->cpu->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
sr = dc->imm & ((1 << 14) - 1);
to = dc->imm & (1 << 14);
CPUState *cs = CPU(dc->cpu);
TCGv t0;
unsigned int op;
- int mem_index = cpu_mmu_index(&dc->cpu->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
op = dc->ir & ((1 << 9) - 1);
switch (op) {
* address and if that succeeds we write into the destination reg.
*/
v = tcg_temp_new();
- tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env), mop);
+ tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
this compare and the following write to be atomic. For user
emulation we need to add atomicity between threads. */
tval = tcg_temp_new();
- tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env),
+ tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
MO_TEUL);
tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
write_carryi(dc, 0);
break;
}
}
- tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env), mop);
+ tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
/* Verify alignment if needed. */
if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
static void dec_br(DisasContext *dc)
{
unsigned int dslot, link, abs, mbar;
- int mem_index = cpu_mmu_index(&dc->cpu->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
dslot = dc->ir & (1 << 20);
abs = dc->ir & (1 << 19);
static void dec_rts(DisasContext *dc)
{
unsigned int b_bit, i_bit, e_bit;
- int mem_index = cpu_mmu_index(&dc->cpu->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
i_bit = dc->ir & (1 << 21);
b_bit = dc->ir & (1 << 22);
/* Insns connected to FSL or AXI stream attached devices. */
static void dec_stream(DisasContext *dc)
{
- int mem_index = cpu_mmu_index(&dc->cpu->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
TCGv_i32 t_id, t_ctrl;
int ctrl;
#define MMU_MODE1_SUFFIX _super
#define MMU_MODE2_SUFFIX _user
#define MMU_USER_IDX 2
-static inline int cpu_mmu_index (CPUMIPSState *env)
+static inline int cpu_mmu_index (CPUMIPSState *env, bool ifetch)
{
return env->hflags & MIPS_HFLAG_KSU;
}
#if !defined(CONFIG_USER_ONLY)
#define MEMOP_IDX(DF) \
TCGMemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \
- cpu_mmu_index(env));
+ cpu_mmu_index(env, false));
#else
#define MEMOP_IDX(DF)
#endif
target_ulong addr) \
{ \
wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
- int mmu_idx = cpu_mmu_index(env); \
+ int mmu_idx = cpu_mmu_index(env, false); \
int i; \
MEMOP_IDX(DF) \
ensure_writable_pages(env, addr, mmu_idx, GETRA()); \
#define cpu_gen_code cpu_moxie_gen_code
#define cpu_signal_handler cpu_moxie_signal_handler
-static inline int cpu_mmu_index(CPUMoxieState *env)
+static inline int cpu_mmu_index(CPUMoxieState *env, bool ifetch)
{
return 0;
}
*flags = (env->flags & D_FLAG);
}
-static inline int cpu_mmu_index(CPUOpenRISCState *env)
+static inline int cpu_mmu_index(CPUOpenRISCState *env, bool ifetch)
{
if (!(env->sr & SR_IME)) {
return MMU_NOMMU_IDX;
dc->ppc = pc_start;
dc->pc = pc_start;
dc->flags = cpu->env.cpucfgr;
- dc->mem_idx = cpu_mmu_index(&cpu->env);
+ dc->mem_idx = cpu_mmu_index(&cpu->env, false);
dc->synced_flags = dc->tb_flags = tb->flags;
dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
dc->singlestep_enabled = cs->singlestep_enabled;
#define MMU_MODE1_SUFFIX _kernel
#define MMU_MODE2_SUFFIX _hypv
#define MMU_USER_IDX 0
-static inline int cpu_mmu_index (CPUPPCState *env)
+static inline int cpu_mmu_index (CPUPPCState *env, bool ifetch)
{
return env->mmu_idx;
}
#define MMU_SECONDARY_IDX 1
#define MMU_HOME_IDX 2
-static inline int cpu_mmu_index (CPUS390XState *env)
+static inline int cpu_mmu_index (CPUS390XState *env, bool ifetch)
{
switch (env->psw.mask & PSW_MASK_ASC) {
case PSW_ASC_PRIMARY:
static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
uint32_t l)
{
- int mmu_idx = cpu_mmu_index(env);
+ int mmu_idx = cpu_mmu_index(env, false);
while (l > 0) {
void *p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
uint32_t l)
{
- int mmu_idx = cpu_mmu_index(env);
+ int mmu_idx = cpu_mmu_index(env, false);
while (l > 0) {
void *src_p = tlb_vaddr_to_host(env, src, MMU_DATA_LOAD, mmu_idx);
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
#define MMU_USER_IDX 1
-static inline int cpu_mmu_index (CPUSH4State *env)
+static inline int cpu_mmu_index (CPUSH4State *env, bool ifetch)
{
return (env->sr & (1u << SR_MD)) == 0 ? 1 : 0;
}
}
#endif
-static inline int cpu_mmu_index(CPUSPARCState *env1)
+static inline int cpu_mmu_index(CPUSPARCState *env1, bool ifetch)
{
#if defined(CONFIG_USER_ONLY)
return MMU_USER_IDX;
SPARCCPU *cpu = SPARC_CPU(cs);
CPUSPARCState *env = &cpu->env;
hwaddr phys_addr;
- int mmu_idx = cpu_mmu_index(env);
+ int mmu_idx = cpu_mmu_index(env, false);
MemoryRegionSection section;
if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) {
last_pc = dc->pc;
dc->npc = (target_ulong) tb->cs_base;
dc->cc_op = CC_OP_DYNAMIC;
- dc->mem_idx = cpu_mmu_index(env);
+ dc->mem_idx = cpu_mmu_index(env, false);
dc->def = env->def;
dc->fpu_enabled = tb_fpu_enabled(tb->flags);
dc->address_mask_32bit = tb_am_enabled(tb->flags);
#define cpu_signal_handler cpu_tricore_signal_handler
#define cpu_list tricore_cpu_list
-static inline int cpu_mmu_index(CPUTriCoreState *env)
+static inline int cpu_mmu_index(CPUTriCoreState *env, bool ifetch)
{
return 0;
}
ctx.tb = tb;
ctx.singlestep_enabled = cs->singlestep_enabled;
ctx.bstate = BS_NONE;
- ctx.mem_idx = cpu_mmu_index(env);
+ ctx.mem_idx = cpu_mmu_index(env, false);
tcg_clear_temp_count();
gen_tb_start(tb);
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
#define MMU_USER_IDX 1
-static inline int cpu_mmu_index(CPUUniCore32State *env)
+static inline int cpu_mmu_index(CPUUniCore32State *env, bool ifetch)
{
return (env->uncached_asr & ASR_M) == ASR_MODE_USER ? 1 : 0;
}
#define MMU_MODE2_SUFFIX _ring2
#define MMU_MODE3_SUFFIX _ring3
-static inline int cpu_mmu_index(CPUXtensaState *env)
+static inline int cpu_mmu_index(CPUXtensaState *env, bool ifetch)
{
return xtensa_get_cring(env);
}