/*
* defines common to all virtual CPUs
- *
+ *
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
*/
#ifndef CPU_ALL_H
#define CPU_ALL_H
-#if defined(__arm__) || defined(__sparc__)
-#define WORDS_ALIGNED
-#endif
+#include "qemu-common.h"
+#include "cpu-common.h"
-/* some important defines:
- *
+/* some important defines:
+ *
* WORDS_ALIGNED : if defined, the host cpu can only make word aligned
* memory accesses.
- *
+ *
* WORDS_BIGENDIAN : if defined, the host cpu is big endian and
* otherwise little endian.
- *
+ *
* (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
- *
+ *
* TARGET_WORDS_BIGENDIAN : same for target cpu
*/
-#include "bswap.h"
+#include "softfloat.h"
#if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
#define BSWAP_NEEDED
#define bswaptls(s) bswap64s(s)
#endif
+typedef union {
+ float32 f;
+ uint32_t l;
+} CPU_FloatU;
+
/* NOTE: arm FPA is horrible as double 32 bit words are stored in big
endian ! */
typedef union {
uint64_t ll;
} CPU_DoubleU;
+#ifdef TARGET_SPARC
+typedef union {
+ float128 q;
+#if defined(WORDS_BIGENDIAN) \
+ || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
+ struct {
+ uint32_t upmost;
+ uint32_t upper;
+ uint32_t lower;
+ uint32_t lowest;
+ } l;
+ struct {
+ uint64_t upper;
+ uint64_t lower;
+ } ll;
+#else
+ struct {
+ uint32_t lowest;
+ uint32_t lower;
+ uint32_t upper;
+ uint32_t upmost;
+ } l;
+ struct {
+ uint64_t lower;
+ uint64_t upper;
+ } ll;
+#endif
+} CPU_QuadU;
+#endif
+
/* CPU memory access without any memory or io remapping */
/*
* type is:
* (empty): integer access
* f : float access
- *
+ *
* sign is:
* (empty): for floats or 32 bit size
* u : unsigned
* w: 16 bits
* l: 32 bits
* q: 64 bits
- *
+ *
* endian is:
* (empty): target cpu endianness or 8 bit access
* r : reversed target cpu endianness (not implemented yet)
* user : user mode access using soft MMU
* kernel : kernel mode access using soft MMU
*/
-static inline int ldub_p(void *ptr)
+static inline int ldub_p(const void *ptr)
{
return *(uint8_t *)ptr;
}
-static inline int ldsb_p(void *ptr)
+static inline int ldsb_p(const void *ptr)
{
return *(int8_t *)ptr;
}
#if defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
/* conservative code for little endian unaligned accesses */
-static inline int lduw_le_p(void *ptr)
+static inline int lduw_le_p(const void *ptr)
{
-#ifdef __powerpc__
+#ifdef _ARCH_PPC
int val;
__asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
return val;
#else
- uint8_t *p = ptr;
+ const uint8_t *p = ptr;
return p[0] | (p[1] << 8);
#endif
}
-static inline int ldsw_le_p(void *ptr)
+static inline int ldsw_le_p(const void *ptr)
{
-#ifdef __powerpc__
+#ifdef _ARCH_PPC
int val;
__asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
return (int16_t)val;
#else
- uint8_t *p = ptr;
+ const uint8_t *p = ptr;
return (int16_t)(p[0] | (p[1] << 8));
#endif
}
-static inline int ldl_le_p(void *ptr)
+static inline int ldl_le_p(const void *ptr)
{
-#ifdef __powerpc__
+#ifdef _ARCH_PPC
int val;
__asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
return val;
#else
- uint8_t *p = ptr;
+ const uint8_t *p = ptr;
return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
#endif
}
-static inline uint64_t ldq_le_p(void *ptr)
+static inline uint64_t ldq_le_p(const void *ptr)
{
- uint8_t *p = ptr;
+ const uint8_t *p = ptr;
uint32_t v1, v2;
- v1 = ldl_p(p);
- v2 = ldl_p(p + 4);
+ v1 = ldl_le_p(p);
+ v2 = ldl_le_p(p + 4);
return v1 | ((uint64_t)v2 << 32);
}
static inline void stw_le_p(void *ptr, int v)
{
-#ifdef __powerpc__
+#ifdef _ARCH_PPC
__asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
#else
uint8_t *p = ptr;
static inline void stl_le_p(void *ptr, int v)
{
-#ifdef __powerpc__
+#ifdef _ARCH_PPC
__asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
#else
uint8_t *p = ptr;
static inline void stq_le_p(void *ptr, uint64_t v)
{
uint8_t *p = ptr;
- stl_p(p, (uint32_t)v);
- stl_p(p + 4, v >> 32);
+ stl_le_p(p, (uint32_t)v);
+ stl_le_p(p + 4, v >> 32);
}
/* float access */
-static inline float32 ldfl_le_p(void *ptr)
+static inline float32 ldfl_le_p(const void *ptr)
{
union {
float32 f;
stl_le_p(ptr, u.i);
}
-static inline float64 ldfq_le_p(void *ptr)
+static inline float64 ldfq_le_p(const void *ptr)
{
CPU_DoubleU u;
u.l.lower = ldl_le_p(ptr);
#else
-static inline int lduw_le_p(void *ptr)
+static inline int lduw_le_p(const void *ptr)
{
return *(uint16_t *)ptr;
}
-static inline int ldsw_le_p(void *ptr)
+static inline int ldsw_le_p(const void *ptr)
{
return *(int16_t *)ptr;
}
-static inline int ldl_le_p(void *ptr)
+static inline int ldl_le_p(const void *ptr)
{
return *(uint32_t *)ptr;
}
-static inline uint64_t ldq_le_p(void *ptr)
+static inline uint64_t ldq_le_p(const void *ptr)
{
return *(uint64_t *)ptr;
}
/* float access */
-static inline float32 ldfl_le_p(void *ptr)
+static inline float32 ldfl_le_p(const void *ptr)
{
return *(float32 *)ptr;
}
-static inline float64 ldfq_le_p(void *ptr)
+static inline float64 ldfq_le_p(const void *ptr)
{
return *(float64 *)ptr;
}
#if !defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
-static inline int lduw_be_p(void *ptr)
+static inline int lduw_be_p(const void *ptr)
{
#if defined(__i386__)
int val;
: "m" (*(uint16_t *)ptr));
return val;
#else
- uint8_t *b = (uint8_t *) ptr;
+ const uint8_t *b = ptr;
return ((b[0] << 8) | b[1]);
#endif
}
-static inline int ldsw_be_p(void *ptr)
+static inline int ldsw_be_p(const void *ptr)
{
#if defined(__i386__)
int val;
: "m" (*(uint16_t *)ptr));
return (int16_t)val;
#else
- uint8_t *b = (uint8_t *) ptr;
+ const uint8_t *b = ptr;
return (int16_t)((b[0] << 8) | b[1]);
#endif
}
-static inline int ldl_be_p(void *ptr)
+static inline int ldl_be_p(const void *ptr)
{
#if defined(__i386__) || defined(__x86_64__)
int val;
: "m" (*(uint32_t *)ptr));
return val;
#else
- uint8_t *b = (uint8_t *) ptr;
+ const uint8_t *b = ptr;
return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
#endif
}
-static inline uint64_t ldq_be_p(void *ptr)
+static inline uint64_t ldq_be_p(const void *ptr)
{
uint32_t a,b;
a = ldl_be_p(ptr);
- b = ldl_be_p(ptr+4);
+ b = ldl_be_p((uint8_t *)ptr + 4);
return (((uint64_t)a<<32)|b);
}
static inline void stq_be_p(void *ptr, uint64_t v)
{
stl_be_p(ptr, v >> 32);
- stl_be_p(ptr + 4, v);
+ stl_be_p((uint8_t *)ptr + 4, v);
}
/* float access */
-static inline float32 ldfl_be_p(void *ptr)
+static inline float32 ldfl_be_p(const void *ptr)
{
union {
float32 f;
stl_be_p(ptr, u.i);
}
-static inline float64 ldfq_be_p(void *ptr)
+static inline float64 ldfq_be_p(const void *ptr)
{
CPU_DoubleU u;
u.l.upper = ldl_be_p(ptr);
- u.l.lower = ldl_be_p(ptr + 4);
+ u.l.lower = ldl_be_p((uint8_t *)ptr + 4);
return u.d;
}
CPU_DoubleU u;
u.d = v;
stl_be_p(ptr, u.l.upper);
- stl_be_p(ptr + 4, u.l.lower);
+ stl_be_p((uint8_t *)ptr + 4, u.l.lower);
}
#else
-static inline int lduw_be_p(void *ptr)
+static inline int lduw_be_p(const void *ptr)
{
return *(uint16_t *)ptr;
}
-static inline int ldsw_be_p(void *ptr)
+static inline int ldsw_be_p(const void *ptr)
{
return *(int16_t *)ptr;
}
-static inline int ldl_be_p(void *ptr)
+static inline int ldl_be_p(const void *ptr)
{
return *(uint32_t *)ptr;
}
-static inline uint64_t ldq_be_p(void *ptr)
+static inline uint64_t ldq_be_p(const void *ptr)
{
return *(uint64_t *)ptr;
}
/* float access */
-static inline float32 ldfl_be_p(void *ptr)
+static inline float32 ldfl_be_p(const void *ptr)
{
return *(float32 *)ptr;
}
-static inline float64 ldfq_be_p(void *ptr)
+static inline float64 ldfq_be_p(const void *ptr)
{
return *(float64 *)ptr;
}
/* MMU memory access macros */
+#if defined(CONFIG_USER_ONLY)
+#include <assert.h>
+#include "qemu-types.h"
+
+/* On some host systems the guest address space is reserved on the host.
+ * This allows the guest address space to be offset to a convenient location.
+ */
+//#define GUEST_BASE 0x20000000
+#define GUEST_BASE 0
+
+/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
+#define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE))
+#define h2g(x) ({ \
+ unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
+ /* Check if given address fits target address space */ \
+ assert(__ret == (abi_ulong)__ret); \
+ (abi_ulong)__ret; \
+})
+#define h2g_valid(x) ({ \
+ unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
+ (__guest == (abi_ulong)__guest); \
+})
+
+#define saddr(x) g2h(x)
+#define laddr(x) g2h(x)
+
+#else /* !CONFIG_USER_ONLY */
/* NOTE: we use double casts if pointers and target_ulong have
different sizes */
-#define ldub_raw(p) ldub_p((uint8_t *)(long)(p))
-#define ldsb_raw(p) ldsb_p((uint8_t *)(long)(p))
-#define lduw_raw(p) lduw_p((uint8_t *)(long)(p))
-#define ldsw_raw(p) ldsw_p((uint8_t *)(long)(p))
-#define ldl_raw(p) ldl_p((uint8_t *)(long)(p))
-#define ldq_raw(p) ldq_p((uint8_t *)(long)(p))
-#define ldfl_raw(p) ldfl_p((uint8_t *)(long)(p))
-#define ldfq_raw(p) ldfq_p((uint8_t *)(long)(p))
-#define stb_raw(p, v) stb_p((uint8_t *)(long)(p), v)
-#define stw_raw(p, v) stw_p((uint8_t *)(long)(p), v)
-#define stl_raw(p, v) stl_p((uint8_t *)(long)(p), v)
-#define stq_raw(p, v) stq_p((uint8_t *)(long)(p), v)
-#define stfl_raw(p, v) stfl_p((uint8_t *)(long)(p), v)
-#define stfq_raw(p, v) stfq_p((uint8_t *)(long)(p), v)
-
-
-#if defined(CONFIG_USER_ONLY)
+#define saddr(x) (uint8_t *)(long)(x)
+#define laddr(x) (uint8_t *)(long)(x)
+#endif
+
+#define ldub_raw(p) ldub_p(laddr((p)))
+#define ldsb_raw(p) ldsb_p(laddr((p)))
+#define lduw_raw(p) lduw_p(laddr((p)))
+#define ldsw_raw(p) ldsw_p(laddr((p)))
+#define ldl_raw(p) ldl_p(laddr((p)))
+#define ldq_raw(p) ldq_p(laddr((p)))
+#define ldfl_raw(p) ldfl_p(laddr((p)))
+#define ldfq_raw(p) ldfq_p(laddr((p)))
+#define stb_raw(p, v) stb_p(saddr((p)), v)
+#define stw_raw(p, v) stw_p(saddr((p)), v)
+#define stl_raw(p, v) stl_p(saddr((p)), v)
+#define stq_raw(p, v) stq_p(saddr((p)), v)
+#define stfl_raw(p, v) stfl_p(saddr((p)), v)
+#define stfq_raw(p, v) stfq_p(saddr((p)), v)
+
+
+#if defined(CONFIG_USER_ONLY)
/* if user mode, no other memory access functions */
#define ldub(p) ldub_raw(p)
#define lduw_code(p) lduw_raw(p)
#define ldsw_code(p) ldsw_raw(p)
#define ldl_code(p) ldl_raw(p)
+#define ldq_code(p) ldq_raw(p)
#define ldub_kernel(p) ldub_raw(p)
#define ldsb_kernel(p) ldsb_raw(p)
#define lduw_kernel(p) lduw_raw(p)
#define ldsw_kernel(p) ldsw_raw(p)
#define ldl_kernel(p) ldl_raw(p)
+#define ldq_kernel(p) ldq_raw(p)
#define ldfl_kernel(p) ldfl_raw(p)
#define ldfq_kernel(p) ldfq_raw(p)
#define stb_kernel(p, v) stb_raw(p, v)
#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
+/* ??? These should be the larger of unsigned long and target_ulong. */
extern unsigned long qemu_real_host_page_size;
extern unsigned long qemu_host_page_bits;
extern unsigned long qemu_host_page_size;
#define PAGE_VALID 0x0008
/* original state of the write flag (used when tracking self-modifying
code */
-#define PAGE_WRITE_ORG 0x0010
+#define PAGE_WRITE_ORG 0x0010
+#define PAGE_RESERVED 0x0020
void page_dump(FILE *f);
-int page_get_flags(unsigned long address);
-void page_set_flags(unsigned long start, unsigned long end, int flags);
-void page_unprotect_range(uint8_t *data, unsigned long data_size);
-
-#define SINGLE_CPU_DEFINES
-#ifdef SINGLE_CPU_DEFINES
-
-#if defined(TARGET_I386)
-
-#define CPUState CPUX86State
-#define cpu_init cpu_x86_init
-#define cpu_exec cpu_x86_exec
-#define cpu_gen_code cpu_x86_gen_code
-#define cpu_signal_handler cpu_x86_signal_handler
-
-#elif defined(TARGET_ARM)
-
-#define CPUState CPUARMState
-#define cpu_init cpu_arm_init
-#define cpu_exec cpu_arm_exec
-#define cpu_gen_code cpu_arm_gen_code
-#define cpu_signal_handler cpu_arm_signal_handler
-
-#elif defined(TARGET_SPARC)
-
-#define CPUState CPUSPARCState
-#define cpu_init cpu_sparc_init
-#define cpu_exec cpu_sparc_exec
-#define cpu_gen_code cpu_sparc_gen_code
-#define cpu_signal_handler cpu_sparc_signal_handler
-
-#elif defined(TARGET_PPC)
+int page_get_flags(target_ulong address);
+void page_set_flags(target_ulong start, target_ulong end, int flags);
+int page_check_range(target_ulong start, target_ulong len, int flags);
-#define CPUState CPUPPCState
-#define cpu_init cpu_ppc_init
-#define cpu_exec cpu_ppc_exec
-#define cpu_gen_code cpu_ppc_gen_code
-#define cpu_signal_handler cpu_ppc_signal_handler
+void cpu_exec_init_all(unsigned long tb_size);
+CPUState *cpu_copy(CPUState *env);
-#elif defined(TARGET_MIPS)
-#define CPUState CPUMIPSState
-#define cpu_init cpu_mips_init
-#define cpu_exec cpu_mips_exec
-#define cpu_gen_code cpu_mips_gen_code
-#define cpu_signal_handler cpu_mips_signal_handler
-
-#else
-
-#error unsupported target CPU
-
-#endif
-
-#endif /* SINGLE_CPU_DEFINES */
-
-void cpu_dump_state(CPUState *env, FILE *f,
+void cpu_dump_state(CPUState *env, FILE *f,
int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
int flags);
+void cpu_dump_statistics (CPUState *env, FILE *f,
+ int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
+ int flags);
-void cpu_abort(CPUState *env, const char *fmt, ...);
+void QEMU_NORETURN cpu_abort(CPUState *env, const char *fmt, ...)
+ __attribute__ ((__format__ (__printf__, 2, 3)));
+extern CPUState *first_cpu;
extern CPUState *cpu_single_env;
-extern int code_copy_enabled;
+extern int64_t qemu_icount;
+extern int use_icount;
-#define CPU_INTERRUPT_EXIT 0x01 /* wants exit from main loop */
#define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */
#define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */
#define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */
+#define CPU_INTERRUPT_FIQ 0x10 /* Fast interrupt pending. */
+#define CPU_INTERRUPT_HALT 0x20 /* CPU halt wanted */
+#define CPU_INTERRUPT_SMI 0x40 /* (x86 only) SMI interrupt pending */
+#define CPU_INTERRUPT_DEBUG 0x80 /* Debug event occured. */
+#define CPU_INTERRUPT_VIRQ 0x100 /* virtual interrupt pending. */
+#define CPU_INTERRUPT_NMI 0x200 /* NMI pending. */
+
void cpu_interrupt(CPUState *s, int mask);
void cpu_reset_interrupt(CPUState *env, int mask);
-int cpu_breakpoint_insert(CPUState *env, target_ulong pc);
-int cpu_breakpoint_remove(CPUState *env, target_ulong pc);
+void cpu_exit(CPUState *s);
+
+int qemu_cpu_has_work(CPUState *env);
+
+/* Breakpoint/watchpoint flags */
+#define BP_MEM_READ 0x01
+#define BP_MEM_WRITE 0x02
+#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
+#define BP_STOP_BEFORE_ACCESS 0x04
+#define BP_WATCHPOINT_HIT 0x08
+#define BP_GDB 0x10
+#define BP_CPU 0x20
+
+int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
+ CPUBreakpoint **breakpoint);
+int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags);
+void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint);
+void cpu_breakpoint_remove_all(CPUState *env, int mask);
+int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
+ int flags, CPUWatchpoint **watchpoint);
+int cpu_watchpoint_remove(CPUState *env, target_ulong addr,
+ target_ulong len, int flags);
+void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint);
+void cpu_watchpoint_remove_all(CPUState *env, int mask);
+
+#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
+#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
+#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
+
void cpu_single_step(CPUState *env, int enabled);
void cpu_reset(CPUState *s);
/* Return the physical page corresponding to a virtual one. Use it
only for debugging because no protection checks are done. Return -1
if no page found. */
-target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
+target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
-#define CPU_LOG_TB_OUT_ASM (1 << 0)
+#define CPU_LOG_TB_OUT_ASM (1 << 0)
#define CPU_LOG_TB_IN_ASM (1 << 1)
#define CPU_LOG_TB_OP (1 << 2)
#define CPU_LOG_TB_OP_OPT (1 << 3)
#define CPU_LOG_PCALL (1 << 6)
#define CPU_LOG_IOPORT (1 << 7)
#define CPU_LOG_TB_CPU (1 << 8)
+#define CPU_LOG_RESET (1 << 9)
/* define log items */
typedef struct CPULogItem {
const char *help;
} CPULogItem;
-extern CPULogItem cpu_log_items[];
+extern const CPULogItem cpu_log_items[];
void cpu_set_log(int log_flags);
void cpu_set_log_filename(const char *filename);
/* memory API */
-extern int phys_ram_size;
extern int phys_ram_fd;
-extern uint8_t *phys_ram_base;
extern uint8_t *phys_ram_dirty;
+extern ram_addr_t ram_size;
+extern ram_addr_t last_ram_offset;
/* physical memory access */
-#define IO_MEM_NB_ENTRIES 256
+
+/* MMIO pages are identified by a combination of an IO device index and
+ 3 flags. The ROMD code stores the page ram offset in iotlb entry,
+ so only a limited number of ids are avaiable. */
+
+#define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT))
+
+/* Flags stored in the low bits of the TLB virtual address. These are
+ defined so that fast path ram access is all zeros. */
+/* Zero if TLB entry is valid. */
#define TLB_INVALID_MASK (1 << 3)
-#define IO_MEM_SHIFT 4
-
-#define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */
-#define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */
-#define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT)
-#define IO_MEM_NOTDIRTY (4 << IO_MEM_SHIFT) /* used internally, never use directly */
-
-typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
-typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
-
-void cpu_register_physical_memory(target_phys_addr_t start_addr,
- unsigned long size,
- unsigned long phys_offset);
-int cpu_register_io_memory(int io_index,
- CPUReadMemoryFunc **mem_read,
- CPUWriteMemoryFunc **mem_write,
- void *opaque);
-CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index);
-CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index);
-
-void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
- int len, int is_write);
-static inline void cpu_physical_memory_read(target_phys_addr_t addr,
- uint8_t *buf, int len)
-{
- cpu_physical_memory_rw(addr, buf, len, 0);
-}
-static inline void cpu_physical_memory_write(target_phys_addr_t addr,
- const uint8_t *buf, int len)
-{
- cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1);
-}
-uint32_t ldub_phys(target_phys_addr_t addr);
-uint32_t lduw_phys(target_phys_addr_t addr);
-uint32_t ldl_phys(target_phys_addr_t addr);
-uint64_t ldq_phys(target_phys_addr_t addr);
-void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
-void stb_phys(target_phys_addr_t addr, uint32_t val);
-void stw_phys(target_phys_addr_t addr, uint32_t val);
-void stl_phys(target_phys_addr_t addr, uint32_t val);
-void stq_phys(target_phys_addr_t addr, uint64_t val);
-
-int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
+/* Set if TLB entry references a clean RAM page. The iotlb entry will
+ contain the page physical address. */
+#define TLB_NOTDIRTY (1 << 4)
+/* Set if TLB entry is an IO callback. */
+#define TLB_MMIO (1 << 5)
+
+int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
uint8_t *buf, int len, int is_write);
-#define VGA_DIRTY_FLAG 0x01
-#define CODE_DIRTY_FLAG 0x02
+#define VGA_DIRTY_FLAG 0x01
+#define CODE_DIRTY_FLAG 0x02
+#define KQEMU_DIRTY_FLAG 0x04
+#define MIGRATION_DIRTY_FLAG 0x08
/* read dirty bit (return 0 or 1) */
static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
}
-static inline int cpu_physical_memory_get_dirty(ram_addr_t addr,
+static inline int cpu_physical_memory_get_dirty(ram_addr_t addr,
int dirty_flags)
{
return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
int dirty_flags);
void cpu_tlb_update_dirty(CPUState *env);
+int cpu_physical_memory_set_dirty_tracking(int enable);
+
+int cpu_physical_memory_get_dirty_tracking(void);
+
+int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
+ target_phys_addr_t end_addr);
+
void dump_exec_info(FILE *f,
int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
+/* Coalesced MMIO regions are areas where write operations can be reordered.
+ * This usually implies that write operations are side-effect free. This allows
+ * batching which can make a major impact on performance when using
+ * virtualization.
+ */
+void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
+
+void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
+
+/*******************************************/
+/* host CPU ticks (if available) */
+
+#if defined(_ARCH_PPC)
+
+static inline int64_t cpu_get_real_ticks(void)
+{
+ int64_t retval;
+#ifdef _ARCH_PPC64
+ /* This reads timebase in one 64bit go and includes Cell workaround from:
+ http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
+ */
+ __asm__ __volatile__ (
+ "mftb %0\n\t"
+ "cmpwi %0,0\n\t"
+ "beq- $-8"
+ : "=r" (retval));
+#else
+ /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
+ unsigned long junk;
+ __asm__ __volatile__ (
+ "mftbu %1\n\t"
+ "mftb %L0\n\t"
+ "mftbu %0\n\t"
+ "cmpw %0,%1\n\t"
+ "bne $-16"
+ : "=r" (retval), "=r" (junk));
+#endif
+ return retval;
+}
+
+#elif defined(__i386__)
+
+static inline int64_t cpu_get_real_ticks(void)
+{
+ int64_t val;
+ asm volatile ("rdtsc" : "=A" (val));
+ return val;
+}
+
+#elif defined(__x86_64__)
+
+static inline int64_t cpu_get_real_ticks(void)
+{
+ uint32_t low,high;
+ int64_t val;
+ asm volatile("rdtsc" : "=a" (low), "=d" (high));
+ val = high;
+ val <<= 32;
+ val |= low;
+ return val;
+}
+
+#elif defined(__hppa__)
+
+static inline int64_t cpu_get_real_ticks(void)
+{
+ int val;
+ asm volatile ("mfctl %%cr16, %0" : "=r"(val));
+ return val;
+}
+
+#elif defined(__ia64)
+
+static inline int64_t cpu_get_real_ticks(void)
+{
+ int64_t val;
+ asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
+ return val;
+}
+
+#elif defined(__s390__)
+
+static inline int64_t cpu_get_real_ticks(void)
+{
+ int64_t val;
+ asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
+ return val;
+}
+
+#elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__)
+
+static inline int64_t cpu_get_real_ticks (void)
+{
+#if defined(_LP64)
+ uint64_t rval;
+ asm volatile("rd %%tick,%0" : "=r"(rval));
+ return rval;
+#else
+ union {
+ uint64_t i64;
+ struct {
+ uint32_t high;
+ uint32_t low;
+ } i32;
+ } rval;
+ asm volatile("rd %%tick,%1; srlx %1,32,%0"
+ : "=r"(rval.i32.high), "=r"(rval.i32.low));
+ return rval.i64;
+#endif
+}
+
+#elif defined(__mips__)
+
+static inline int64_t cpu_get_real_ticks(void)
+{
+#if __mips_isa_rev >= 2
+ uint32_t count;
+ static uint32_t cyc_per_count = 0;
+
+ if (!cyc_per_count)
+ __asm__ __volatile__("rdhwr %0, $3" : "=r" (cyc_per_count));
+
+ __asm__ __volatile__("rdhwr %1, $2" : "=r" (count));
+ return (int64_t)(count * cyc_per_count);
+#else
+ /* FIXME */
+ static int64_t ticks = 0;
+ return ticks++;
+#endif
+}
+
+#else
+/* The host CPU doesn't have an easily accessible cycle counter.
+ Just return a monotonically increasing value. This will be
+ totally wrong, but hopefully better than nothing. */
+static inline int64_t cpu_get_real_ticks (void)
+{
+ static int64_t ticks = 0;
+ return ticks++;
+}
+#endif
+
+/* profiling */
+#ifdef CONFIG_PROFILER
+static inline int64_t profile_getclock(void)
+{
+ return cpu_get_real_ticks();
+}
+
+extern int64_t kqemu_time, kqemu_time_start;
+extern int64_t qemu_time, qemu_time_start;
+extern int64_t tlb_flush_time;
+extern int64_t kqemu_exec_count;
+extern int64_t dev_time;
+extern int64_t kqemu_ret_int_count;
+extern int64_t kqemu_ret_excp_count;
+extern int64_t kqemu_ret_intr_count;
+#endif
+
#endif /* CPU_ALL_H */