#include "qemu/log.h"
-void gen_intermediate_code(CPUState *cpu, struct TranslationBlock *tb);
-void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
+void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
target_ulong *data);
void cpu_gen_init(void);
-bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
+
+/**
+ * cpu_restore_state:
+ * @cpu: the vCPU state is to be restore to
+ * @searched_pc: the host PC the fault occurred at
+ * @will_exit: true if the TB executed will be interrupted after some
+ cpu adjustments. Required for maintaining the correct
+ icount valus
+ * @return: true if state was restored, false otherwise
+ *
+ * Attempt to restore the state for a fault occurring in translated
+ * code. If the searched_pc is not in translated code no state is
+ * restored and the function returns false.
+ */
+bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
/**
* cpu_address_space_init:
* @cpu: CPU to add this address space to
- * @as: address space to add
* @asidx: integer index of this address space
+ * @prefix: prefix to be used as name of address space
+ * @mr: the root memory region of address space
*
* Add the specified address space to the CPU's cpu_ases list.
* The address space added with @asidx 0 is the one used for the
*
* Note that with KVM only one address space is supported.
*/
-void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx);
+void cpu_address_space_init(CPUState *cpu, int asidx,
+ const char *prefix, MemoryRegion *mr);
#endif
#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
/* cputlb.c */
+/**
+ * tlb_init - initialize a CPU's TLB
+ * @cpu: CPU whose TLB should be initialized
+ */
+void tlb_init(CPUState *cpu);
/**
* tlb_flush_page:
* @cpu: CPU whose TLB should be flushed
void tlb_set_page(CPUState *cpu, target_ulong vaddr,
hwaddr paddr, int prot,
int mmu_idx, target_ulong size);
-void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
-void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
+void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
uintptr_t retaddr);
#else
+static inline void tlb_init(CPUState *cpu)
+{
+}
static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
{
}
static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
{
}
+
static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
uint16_t idxmap)
{
}
-static inline void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
-{
-}
#endif
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
#define CF_NOCACHE 0x00010000 /* To be freed after execution */
#define CF_USE_ICOUNT 0x00020000
-#define CF_INVALID 0x00040000 /* TB is stale. Setters need tb_lock */
+#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */
#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
+#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
+#define CF_CLUSTER_SHIFT 24
/* cflags' mask for hashing/comparison */
#define CF_HASH_MASK \
- (CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL)
+ (CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL | CF_CLUSTER_MASK)
/* Per-vCPU dynamic tracing state used to generate this TB */
uint32_t trace_vcpu_dstate;
/* original tb when cflags has CF_NOCACHE */
struct TranslationBlock *orig_tb;
/* first and second physical page containing code. The lower bit
- of the pointer tells the index in page_next[] */
- struct TranslationBlock *page_next[2];
+ of the pointer tells the index in page_next[].
+ The list is protected by the TB's page('s) lock(s) */
+ uintptr_t page_next[2];
tb_page_addr_t page_addr[2];
+ /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
+ QemuSpin jmp_lock;
+
/* The following data are used to directly call another TB from
* the code of this one. This can be done either by emitting direct or
* indirect native jump instructions. These jumps are reset so that the TB
#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
uintptr_t jmp_target_arg[2]; /* target address or offset */
- /* Each TB has an associated circular list of TBs jumping to this one.
- * jmp_list_first points to the first TB jumping to this one.
- * jmp_list_next is used to point to the next TB in a list.
- * Since each TB can have two jumps, it can participate in two lists.
- * jmp_list_first and jmp_list_next are 4-byte aligned pointers to a
- * TranslationBlock structure, but the two least significant bits of
- * them are used to encode which data field of the pointed TB should
- * be used to traverse the list further from that TB:
- * 0 => jmp_list_next[0], 1 => jmp_list_next[1], 2 => jmp_list_first.
- * In other words, 0/1 tells which jump is used in the pointed TB,
- * and 2 means that this is a pointer back to the target TB of this list.
+ /*
+ * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
+ * Each TB can have two outgoing jumps, and therefore can participate
+ * in two lists. The list entries are kept in jmp_list_next[2]. The least
+ * significant bit (LSB) of the pointers in these lists is used to encode
+ * which of the two list entries is to be used in the pointed TB.
+ *
+ * List traversals are protected by jmp_lock. The destination TB of each
+ * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
+ * can be acquired from any origin TB.
+ *
+ * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
+ * being invalidated, so that no further outgoing jumps from it can be set.
+ *
+ * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
+ * to a destination TB that has CF_INVALID set.
*/
+ uintptr_t jmp_list_head;
uintptr_t jmp_list_next[2];
- uintptr_t jmp_list_first;
+ uintptr_t jmp_dest[2];
};
extern bool parallel_cpus;
| (use_icount ? CF_USE_ICOUNT : 0);
}
-void tb_remove(TranslationBlock *tb);
+/* TranslationBlock invalidate API */
+#if defined(CONFIG_USER_ONLY)
+void tb_invalidate_phys_addr(target_ulong addr);
+void tb_invalidate_phys_range(target_ulong start, target_ulong end);
+#else
+void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
+#endif
void tb_flush(CPUState *cpu);
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
smaller than 4 bytes, so we don't worry about special-casing this. */
#define GETPC_ADJ 2
-void tb_lock(void);
-void tb_unlock(void);
-void tb_lock_reset(void);
+#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
+void assert_no_pages_locked(void);
+#else
+static inline void assert_no_pages_locked(void)
+{
+}
+#endif
#if !defined(CONFIG_USER_ONLY)
-struct MemoryRegion *iotlb_to_region(CPUState *cpu,
- hwaddr index, MemTxAttrs attrs);
-
-void tlb_fill(CPUState *cpu, target_ulong addr, MMUAccessType access_type,
- int mmu_idx, uintptr_t retaddr);
-
+/**
+ * iotlb_to_section:
+ * @cpu: CPU performing the access
+ * @index: TCG CPU IOTLB entry
+ *
+ * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
+ * it refers to. @index will have been initially created and returned
+ * by memory_region_section_get_iotlb().
+ */
+struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
+ hwaddr index, MemTxAttrs attrs);
#endif
#if defined(CONFIG_USER_ONLY)
MemoryRegionSection *
address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
- hwaddr *xlat, hwaddr *plen);
+ hwaddr *xlat, hwaddr *plen,
+ MemTxAttrs attrs, int *prot);
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
MemoryRegionSection *section,
target_ulong vaddr,
hwaddr paddr, hwaddr xlat,
int prot,
target_ulong *address);
-bool memory_region_is_unassigned(MemoryRegion *mr);
-
#endif
/* vl.c */