X-Git-Url: https://repo.jachan.dev/qemu.git/blobdiff_plain/fc8135a46d095f865d285e697a874f617bfeeb90..c4600d5d417ea13e0f1cc047b227a2b5b0e694f5:/include/exec/cpu-all.h diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index 89db792767..da07ce311f 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -31,17 +31,13 @@ #define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */ #define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */ #define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */ +#define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */ /* some important defines: - * - * WORDS_ALIGNED : if defined, the host cpu can only make word aligned - * memory accesses. * * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and * otherwise little endian. * - * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet)) - * * TARGET_WORDS_BIGENDIAN : same for target cpu */ @@ -137,6 +133,8 @@ static inline void tswap64s(uint64_t *s) #define stq_p(p, v) stq_be_p(p, v) #define stfl_p(p, v) stfl_be_p(p, v) #define stfq_p(p, v) stfq_be_p(p, v) +#define ldn_p(p, sz) ldn_be_p(p, sz) +#define stn_p(p, sz, v) stn_be_p(p, sz, v) #else #define lduw_p(p) lduw_le_p(p) #define ldsw_p(p) ldsw_le_p(p) @@ -149,12 +147,13 @@ static inline void tswap64s(uint64_t *s) #define stq_p(p, v) stq_le_p(p, v) #define stfl_p(p, v) stfl_le_p(p, v) #define stfq_p(p, v) stfq_le_p(p, v) +#define ldn_p(p, sz) ldn_le_p(p, sz) +#define stn_p(p, sz, v) stn_le_p(p, sz, v) #endif /* MMU memory access macros */ #if defined(CONFIG_USER_ONLY) -#include #include "exec/user/abitypes.h" /* On some host systems the guest address space is reserved on the host. @@ -164,21 +163,71 @@ extern unsigned long guest_base; extern int have_guest_base; extern unsigned long reserved_va; -#define GUEST_ADDR_MAX (reserved_va ? reserved_va : \ +#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS +#define GUEST_ADDR_MAX (~0ul) +#else +#define GUEST_ADDR_MAX (reserved_va ? reserved_va - 1 : \ (1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1) #endif +#else + +#include "exec/hwaddr.h" + +#define SUFFIX +#define ARG1 as +#define ARG1_DECL AddressSpace *as +#define TARGET_ENDIANNESS +#include "exec/memory_ldst.inc.h" + +#define SUFFIX _cached_slow +#define ARG1 cache +#define ARG1_DECL MemoryRegionCache *cache +#define TARGET_ENDIANNESS +#include "exec/memory_ldst.inc.h" + +static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val) +{ + address_space_stl_notdirty(as, addr, val, + MEMTXATTRS_UNSPECIFIED, NULL); +} + +#define SUFFIX +#define ARG1 as +#define ARG1_DECL AddressSpace *as +#define TARGET_ENDIANNESS +#include "exec/memory_ldst_phys.inc.h" + +/* Inline fast path for direct RAM access. */ +#define ENDIANNESS +#include "exec/memory_ldst_cached.inc.h" + +#define SUFFIX _cached +#define ARG1 cache +#define ARG1_DECL MemoryRegionCache *cache +#define TARGET_ENDIANNESS +#include "exec/memory_ldst_phys.inc.h" +#endif /* page related stuff */ +#ifdef TARGET_PAGE_BITS_VARY +extern bool target_page_bits_decided; +extern int target_page_bits; +#define TARGET_PAGE_BITS ({ assert(target_page_bits_decided); \ + target_page_bits; }) +#else +#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS +#endif + #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) #define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1) #define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK) -/* ??? These should be the larger of uintptr_t and target_ulong. */ -extern uintptr_t qemu_real_host_page_size; -extern uintptr_t qemu_real_host_page_mask; +/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even + * when intptr_t is 32-bit and we are aligning a long long. + */ extern uintptr_t qemu_host_page_size; -extern uintptr_t qemu_host_page_mask; +extern intptr_t qemu_host_page_mask; #define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask) #define REAL_HOST_PAGE_ALIGN(addr) (((addr) + qemu_real_host_page_size - 1) & \ @@ -193,6 +242,9 @@ extern uintptr_t qemu_host_page_mask; /* original state of the write flag (used when tracking self-modifying code */ #define PAGE_WRITE_ORG 0x0010 +/* Invalidate the TLB entry immediately, helpful for s390x + * Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs() */ +#define PAGE_WRITE_INV 0x0040 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) /* FIXME: Code that sets/uses this is broken and needs to go away. */ #define PAGE_RESERVED 0x0020 @@ -266,62 +318,57 @@ CPUArchState *cpu_copy(CPUArchState *env); #if !defined(CONFIG_USER_ONLY) -/* memory API */ - -typedef struct RAMBlock RAMBlock; - -struct RAMBlock { - struct rcu_head rcu; - struct MemoryRegion *mr; - uint8_t *host; - ram_addr_t offset; - ram_addr_t used_length; - ram_addr_t max_length; - void (*resized)(const char*, uint64_t length, void *host); - uint32_t flags; - /* Protected by iothread lock. */ - char idstr[256]; - /* RCU-enabled, writes protected by the ramlist lock */ - QLIST_ENTRY(RAMBlock) next; - int fd; -}; - -static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset) -{ - assert(offset < block->used_length); - assert(block->host); - return (char *)block->host + offset; -} - -typedef struct RAMList { - QemuMutex mutex; - /* Protected by the iothread lock. */ - unsigned long *dirty_memory[DIRTY_MEMORY_NUM]; - RAMBlock *mru_block; - /* RCU-enabled, writes protected by the ramlist lock. */ - QLIST_HEAD(, RAMBlock) blocks; - uint32_t version; -} RAMList; -extern RAMList ram_list; - /* Flags stored in the low bits of the TLB virtual address. These are - defined so that fast path ram access is all zeros. */ + * defined so that fast path ram access is all zeros. + * The flags all must be between TARGET_PAGE_BITS and + * maximum address alignment bit. + */ /* Zero if TLB entry is valid. */ -#define TLB_INVALID_MASK (1 << 3) +#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS - 1)) /* Set if TLB entry references a clean RAM page. The iotlb entry will contain the page physical address. */ -#define TLB_NOTDIRTY (1 << 4) +#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2)) /* Set if TLB entry is an IO callback. */ -#define TLB_MMIO (1 << 5) +#define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3)) +/* Set if TLB entry must have MMU lookup repeated for every access */ +#define TLB_RECHECK (1 << (TARGET_PAGE_BITS - 4)) -void dump_exec_info(FILE *f, fprintf_function cpu_fprintf); -void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf); -ram_addr_t last_ram_offset(void); -void qemu_mutex_lock_ramlist(void); -void qemu_mutex_unlock_ramlist(void); +/* Use this mask to check interception with an alignment mask + * in a TCG backend. + */ +#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \ + | TLB_RECHECK) + +/** + * tlb_hit_page: return true if page aligned @addr is a hit against the + * TLB entry @tlb_addr + * + * @addr: virtual address to test (must be page aligned) + * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) + */ +static inline bool tlb_hit_page(target_ulong tlb_addr, target_ulong addr) +{ + return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)); +} + +/** + * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr + * + * @addr: virtual address to test (need not be page aligned) + * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) + */ +static inline bool tlb_hit(target_ulong tlb_addr, target_ulong addr) +{ + return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK); +} + +void dump_exec_info(void); +void dump_opcount_info(void); #endif /* !CONFIG_USER_ONLY */ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, - uint8_t *buf, int len, int is_write); + uint8_t *buf, target_ulong len, int is_write); + +int cpu_exec(CPUState *cpu); #endif /* CPU_ALL_H */