typedef struct TranslationBlock TranslationBlock;
/* XXX: make safe guess about sizes */
-#define MAX_OP_PER_INSTR 208
+#define MAX_OP_PER_INSTR 266
#if HOST_LONG_BITS == 32
#define MAX_OPC_PARAM_PER_ARG 2
void cpu_gen_init(void);
int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
int *gen_code_size_ptr);
-bool cpu_restore_state(CPUArchState *env, uintptr_t searched_pc);
+bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
void page_size_init(void);
-void QEMU_NORETURN cpu_resume_from_signal(CPUArchState *env1, void *puc);
-void QEMU_NORETURN cpu_io_recompile(CPUArchState *env, uintptr_t retaddr);
-TranslationBlock *tb_gen_code(CPUArchState *env,
+void QEMU_NORETURN cpu_resume_from_signal(CPUState *cpu, void *puc);
+void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
+TranslationBlock *tb_gen_code(CPUState *cpu,
target_ulong pc, target_ulong cs_base, int flags,
int cflags);
void cpu_exec_init(CPUArchState *env);
-void QEMU_NORETURN cpu_loop_exit(CPUArchState *env1);
+void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
int page_unprotect(target_ulong address, uintptr_t pc, void *puc);
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access);
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access);
#if !defined(CONFIG_USER_ONLY)
+void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as);
/* cputlb.c */
-void tlb_flush_page(CPUArchState *env, target_ulong addr);
-void tlb_flush(CPUArchState *env, int flush_global);
-void tlb_set_page(CPUArchState *env, target_ulong vaddr,
+void tlb_flush_page(CPUState *cpu, target_ulong addr);
+void tlb_flush(CPUState *cpu, int flush_global);
+void tlb_set_page(CPUState *cpu, target_ulong vaddr,
hwaddr paddr, int prot,
int mmu_idx, target_ulong size);
-void tb_invalidate_phys_addr(hwaddr addr);
+void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
#else
-static inline void tlb_flush_page(CPUArchState *env, target_ulong addr)
+static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
{
}
-static inline void tlb_flush(CPUArchState *env, int flush_global)
+static inline void tlb_flush(CPUState *cpu, int flush_global)
{
}
#endif
#if defined(__arm__) || defined(_ARCH_PPC) \
|| defined(__x86_64__) || defined(__i386__) \
|| defined(__sparc__) || defined(__aarch64__) \
+ || defined(__s390x__) || defined(__mips__) \
|| defined(CONFIG_TCG_INTERPRETER)
#define USE_DIRECT_JUMP
#endif
uint16_t cflags; /* compile flags */
#define CF_COUNT_MASK 0x7fff
#define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
+#define CF_NOCACHE 0x10000 /* To be freed after execution */
- uint8_t *tc_ptr; /* pointer to the translated code */
+ void *tc_ptr; /* pointer to the translated code */
/* next matching tb for physical address. */
struct TranslationBlock *phys_hash_next;
/* first and second physical page containing code. The lower bit
/* no need to flush icache explicitly */
}
#elif defined(_ARCH_PPC)
-void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);
+void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
#define tb_set_jmp_target1 ppc_tb_set_jmp_target
#elif defined(__i386__) || defined(__x86_64__)
static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
{
/* patch the branch destination */
- *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
+ stl_le_p((void*)jmp_addr, addr - (jmp_addr + 4));
+ /* no need to flush icache explicitly */
+}
+#elif defined(__s390x__)
+static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
+{
+ /* patch the branch destination */
+ intptr_t disp = addr - (jmp_addr - 2);
+ stl_be_p((void*)jmp_addr, disp / 2);
/* no need to flush icache explicitly */
}
#elif defined(__aarch64__)
__asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
#endif
}
-#elif defined(__sparc__)
+#elif defined(__sparc__) || defined(__mips__)
void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
#else
#error tb_set_jmp_target1 is missing
#if !defined(CONFIG_USER_ONLY)
-void phys_mem_set_alloc(void *(*alloc)(size_t));
+void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align));
-struct MemoryRegion *iotlb_to_region(hwaddr index);
+struct MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index);
bool io_mem_read(struct MemoryRegion *mr, hwaddr addr,
uint64_t *pvalue, unsigned size);
bool io_mem_write(struct MemoryRegion *mr, hwaddr addr,
uint64_t value, unsigned size);
-void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx,
+void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr);
-uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-
-#define ACCESS_TYPE (NB_MMU_MODES + 1)
-#define MEMSUFFIX _code
-
-#define DATA_SIZE 1
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 2
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 4
-#include "exec/softmmu_header.h"
-
-#define DATA_SIZE 8
-#include "exec/softmmu_header.h"
-
-#undef ACCESS_TYPE
-#undef MEMSUFFIX
-
#endif
#if defined(CONFIG_USER_ONLY)
tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
#endif
-typedef void (CPUDebugExcpHandler)(CPUArchState *env);
-
-void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
-
/* vl.c */
extern int singlestep;
/* cpu-exec.c */
extern volatile sig_atomic_t exit_request;
-/* Deterministic execution requires that IO only be performed on the last
- instruction of a TB so that interrupts take effect immediately. */
-static inline int can_do_io(CPUArchState *env)
+/**
+ * cpu_can_do_io:
+ * @cpu: The CPU for which to check IO.
+ *
+ * Deterministic execution requires that IO only be performed on the last
+ * instruction of a TB so that interrupts take effect immediately.
+ *
+ * Returns: %true if memory-mapped IO is safe, %false otherwise.
+ */
+static inline bool cpu_can_do_io(CPUState *cpu)
{
- CPUState *cpu = ENV_GET_CPU(env);
-
if (!use_icount) {
- return 1;
+ return true;
}
/* If not executing code then assume we are ok. */
if (cpu->current_tb == NULL) {
- return 1;
+ return true;
}
- return env->can_do_io != 0;
+ return cpu->can_do_io != 0;
}
#endif