return false;
}
-static TranslationBlock *tb_htable_lookup(CPUState *cpu,
- target_ulong pc,
- target_ulong cs_base,
- uint32_t flags)
+TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
+ target_ulong cs_base, uint32_t flags)
{
tb_page_addr_t phys_pc;
struct tb_desc desc;
void tb_free(TranslationBlock *tb);
void tb_flush(CPUState *cpu);
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
+TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
+ target_ulong cs_base, uint32_t flags);
#if defined(USE_DIRECT_JUMP)
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
#include "exec/exec-all.h"
+#include "exec/tb-hash.h"
+#include "disas/disas.h"
+#include "exec/log.h"
/* 32-bit helpers */
return ctpop64(arg);
}
+void *HELPER(lookup_tb_ptr)(CPUArchState *env, target_ulong addr)
+{
+ CPUState *cpu = ENV_GET_CPU(env);
+ TranslationBlock *tb;
+ target_ulong cs_base, pc;
+ uint32_t flags;
+
+ tb = atomic_rcu_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(addr)]);
+ if (likely(tb)) {
+ cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
+ if (likely(tb->pc == addr && tb->cs_base == cs_base &&
+ tb->flags == flags)) {
+ goto found;
+ }
+ tb = tb_htable_lookup(cpu, addr, cs_base, flags);
+ if (likely(tb)) {
+ atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(addr)], tb);
+ goto found;
+ }
+ }
+ return tcg_ctx.code_gen_epilogue;
+ found:
+ qemu_log_mask_and_addr(CPU_LOG_EXEC, addr,
+ "Chain %p [%d: " TARGET_FMT_lx "] %s\n",
+ tb->tc_ptr, cpu->cpu_index, addr,
+ lookup_symbol(addr));
+ return tb->tc_ptr;
+}
+
void HELPER(exit_atomic)(CPUArchState *env)
{
cpu_loop_exit_atomic(ENV_GET_CPU(env), GETPC());
instructions. Only indices 0 and 1 are valid and tcg_gen_goto_tb may be issued
at most once with each slot index per TB.
+* lookup_and_goto_ptr tb_addr
+
+Look up a TB address ('tb_addr') and jump to it if valid. If not valid,
+jump to the TCG epilogue to go back to the exec loop.
+
+This operation is optional. If the TCG backend does not implement the
+goto_ptr opcode, emitting this op is equivalent to emitting exit_tb(0).
+
* qemu_ld_i32/i64 t0, t1, flags, memidx
* qemu_st_i32/i64 t0, t1, flags, memidx
#define TCG_TARGET_HAS_mulsh_i32 0
#define TCG_TARGET_HAS_extrl_i64_i32 0
#define TCG_TARGET_HAS_extrh_i64_i32 0
+#define TCG_TARGET_HAS_goto_ptr 0
#define TCG_TARGET_HAS_div_i64 1
#define TCG_TARGET_HAS_rem_i64 1
#define TCG_TARGET_HAS_mulsh_i32 0
#define TCG_TARGET_HAS_div_i32 use_idiv_instructions
#define TCG_TARGET_HAS_rem_i32 0
+#define TCG_TARGET_HAS_goto_ptr 0
enum {
TCG_AREG0 = TCG_REG_R6,
#define TCG_TARGET_HAS_muls2_i32 1
#define TCG_TARGET_HAS_muluh_i32 0
#define TCG_TARGET_HAS_mulsh_i32 0
+#define TCG_TARGET_HAS_goto_ptr 0
#if TCG_TARGET_REG_BITS == 64
#define TCG_TARGET_HAS_extrl_i64_i32 0
#define TCG_TARGET_HAS_mulsh_i64 0
#define TCG_TARGET_HAS_extrl_i64_i32 0
#define TCG_TARGET_HAS_extrh_i64_i32 0
+#define TCG_TARGET_HAS_goto_ptr 0
#define TCG_TARGET_deposit_i32_valid(ofs, len) ((len) <= 16)
#define TCG_TARGET_deposit_i64_valid(ofs, len) ((len) <= 16)
#define TCG_TARGET_HAS_muluh_i32 1
#define TCG_TARGET_HAS_mulsh_i32 1
#define TCG_TARGET_HAS_bswap32_i32 1
+#define TCG_TARGET_HAS_goto_ptr 0
#if TCG_TARGET_REG_BITS == 64
#define TCG_TARGET_HAS_add2_i32 0
#define TCG_TARGET_HAS_muls2_i32 0
#define TCG_TARGET_HAS_muluh_i32 1
#define TCG_TARGET_HAS_mulsh_i32 1
+#define TCG_TARGET_HAS_goto_ptr 0
#if TCG_TARGET_REG_BITS == 64
#define TCG_TARGET_HAS_add2_i32 0
#define TCG_TARGET_HAS_mulsh_i32 0
#define TCG_TARGET_HAS_extrl_i64_i32 0
#define TCG_TARGET_HAS_extrh_i64_i32 0
+#define TCG_TARGET_HAS_goto_ptr 0
#define TCG_TARGET_HAS_div2_i64 1
#define TCG_TARGET_HAS_rot_i64 1
#define TCG_TARGET_HAS_muls2_i32 1
#define TCG_TARGET_HAS_muluh_i32 0
#define TCG_TARGET_HAS_mulsh_i32 0
+#define TCG_TARGET_HAS_goto_ptr 0
#define TCG_TARGET_HAS_extrl_i64_i32 1
#define TCG_TARGET_HAS_extrh_i64_i32 1
tcg_gen_op1i(INDEX_op_goto_tb, idx);
}
+void tcg_gen_lookup_and_goto_ptr(TCGv addr)
+{
+ if (TCG_TARGET_HAS_goto_ptr && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
+ TCGv_ptr ptr = tcg_temp_new_ptr();
+ gen_helper_lookup_tb_ptr(ptr, tcg_ctx.tcg_env, addr);
+ tcg_gen_op1i(INDEX_op_goto_ptr, GET_TCGV_PTR(ptr));
+ tcg_temp_free_ptr(ptr);
+ } else {
+ tcg_gen_exit_tb(0);
+ }
+}
+
static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st)
{
/* Trigger the asserts within as early as possible. */
*/
void tcg_gen_goto_tb(unsigned idx);
+/**
+ * tcg_gen_lookup_and_goto_ptr() - look up a TB and jump to it if valid
+ * @addr: Guest address of the target TB
+ *
+ * If the TB is not valid, jump to the epilogue.
+ *
+ * This operation is optional. If the TCG backend does not implement goto_ptr,
+ * this op is equivalent to calling tcg_gen_exit_tb() with 0 as the argument.
+ */
+void tcg_gen_lookup_and_goto_ptr(TCGv addr);
+
#if TARGET_LONG_BITS == 32
#define tcg_temp_new() tcg_temp_new_i32()
#define tcg_global_reg_new tcg_global_reg_new_i32
TCG_OPF_NOT_PRESENT)
DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_END)
DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_END)
+DEF(goto_ptr, 0, 1, 0, TCG_OPF_BB_END | IMPL(TCG_TARGET_HAS_goto_ptr))
DEF(qemu_ld_i32, 1, TLADDR_ARGS, 1,
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF_HELPER_FLAGS_1(ctpop_i32, TCG_CALL_NO_RWG_SE, i32, i32)
DEF_HELPER_FLAGS_1(ctpop_i64, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_2(lookup_tb_ptr, TCG_CALL_NO_WG_SE, ptr, env, tl)
+
DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env)
#ifdef CONFIG_SOFTMMU
qemu_log_unlock();
}
#endif
+
+ /* Assert that goto_ptr is implemented completely. */
+ if (TCG_TARGET_HAS_goto_ptr) {
+ tcg_debug_assert(s->code_gen_epilogue != NULL);
+ }
}
void tcg_func_start(TCGContext *s)
extension that allows arithmetic on void*. */
int code_gen_max_blocks;
void *code_gen_prologue;
+ void *code_gen_epilogue;
void *code_gen_buffer;
size_t code_gen_buffer_size;
void *code_gen_ptr;
#define TCG_TARGET_HAS_muls2_i32 0
#define TCG_TARGET_HAS_muluh_i32 0
#define TCG_TARGET_HAS_mulsh_i32 0
+#define TCG_TARGET_HAS_goto_ptr 0
#if TCG_TARGET_REG_BITS == 64
#define TCG_TARGET_HAS_extrl_i64_i32 0