target_ulong cs_base,
uint64_t flags)
{
+ CPUState *cpu = ENV_GET_CPU(env);
TranslationBlock *tb, **ptb1;
unsigned int h;
tb_page_addr_t phys_pc, phys_page1;
tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
}
/* we add the TB in the virtual pc hash table */
- env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
+ cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
return tb;
}
static inline TranslationBlock *tb_find_fast(CPUArchState *env)
{
+ CPUState *cpu = ENV_GET_CPU(env);
TranslationBlock *tb;
target_ulong cs_base, pc;
int flags;
always be the same before a given translated block
is executed. */
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
- tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
+ tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
tb->flags != flags)) {
tb = tb_find_slow(env, pc, cs_base, flags);
cpu->current_tb = NULL;
memset(env->tlb_table, -1, sizeof(env->tlb_table));
- memset(env->tb_jmp_cache, 0, sizeof(env->tb_jmp_cache));
+ memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
env->tlb_flush_addr = -1;
env->tlb_flush_mask = 0;
#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
-#define TB_JMP_CACHE_BITS 12
-#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
-
/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
addresses on the same page. The top bits are the same. This allows
TLB invalidation to quickly clear a subset of the hash table. */
#define CPU_COMMON \
/* soft mmu support */ \
CPU_COMMON_TLB \
- struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \
\
/* from this point: preserved by CPU reset */ \
/* ice debug support */ \
struct KVMState;
struct kvm_run;
+#define TB_JMP_CACHE_BITS 12
+#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
+
/**
* CPUState:
* @cpu_index: CPU index (informative).
void *env_ptr; /* CPUArchState */
struct TranslationBlock *current_tb;
+ struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
struct GDBRegisterState *gdb_regs;
int gdb_num_regs;
int gdb_num_g_regs;
cpu->icount_extra = 0;
cpu->icount_decr.u32 = 0;
cpu->can_do_io = 0;
+ memset(cpu->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof(void *));
}
static bool cpu_common_has_work(CPUState *cs)
tcg_ctx.tb_ctx.nb_tbs = 0;
CPU_FOREACH(cpu) {
- CPUArchState *env = cpu->env_ptr;
-
- memset(env->tb_jmp_cache, 0, sizeof(env->tb_jmp_cache));
+ memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
}
memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
/* remove the TB from the hash list */
h = tb_jmp_cache_hash_func(tb->pc);
CPU_FOREACH(cpu) {
- CPUArchState *env = cpu->env_ptr;
-
- if (env->tb_jmp_cache[h] == tb) {
- env->tb_jmp_cache[h] = NULL;
+ if (cpu->tb_jmp_cache[h] == tb) {
+ cpu->tb_jmp_cache[h] = NULL;
}
}
void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
{
+ CPUState *cpu = ENV_GET_CPU(env);
unsigned int i;
/* Discard jump cache entries for any tb which might potentially
overlap the flushed page. */
i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
- memset(&env->tb_jmp_cache[i], 0,
+ memset(&cpu->tb_jmp_cache[i], 0,
TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
i = tb_jmp_cache_hash_page(addr);
- memset(&env->tb_jmp_cache[i], 0,
+ memset(&cpu->tb_jmp_cache[i], 0,
TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
}