* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef CPU_I386_H
#define CPU_I386_H
#define ELF_MACHINE EM_386
#endif
+#define CPUState struct CPUX86State
+
#include "cpu-defs.h"
#include "softfloat.h"
#define DESC_AVL_MASK (1 << 20)
#define DESC_P_MASK (1 << 15)
#define DESC_DPL_SHIFT 13
-#define DESC_DPL_MASK (1 << DESC_DPL_SHIFT)
+#define DESC_DPL_MASK (3 << DESC_DPL_SHIFT)
#define DESC_S_MASK (1 << 12)
#define DESC_TYPE_SHIFT 8
+#define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT)
#define DESC_A_MASK (1 << 8)
#define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */
#define ID_MASK 0x00200000
/* hidden flags - used internally by qemu to represent additional cpu
- states. Only the CPL and INHIBIT_IRQ are not redundant. We avoid
- using the IOPL_MASK, TF_MASK and VM_MASK bit position to ease oring
- with eflags. */
+ states. Only the CPL, INHIBIT_IRQ, SMM and SVMI are not
+ redundant. We avoid using the IOPL_MASK, TF_MASK and VM_MASK bit
+ position to ease oring with eflags. */
/* current cpl */
#define HF_CPL_SHIFT 0
/* true if soft mmu is being used */
#define HF_IOPL_SHIFT 12 /* must be same as eflags */
#define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */
#define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */
-#define HF_OSFXSR_SHIFT 16 /* CR4.OSFXSR */
+#define HF_RF_SHIFT 16 /* must be same as eflags */
#define HF_VM_SHIFT 17 /* must be same as eflags */
#define HF_SMM_SHIFT 19 /* CPU in SMM mode */
-#define HF_GIF_SHIFT 20 /* if set CPU takes interrupts */
-#define HF_HIF_SHIFT 21 /* shadow copy of IF_MASK when in SVM */
-#define HF_NMI_SHIFT 22 /* CPU serving NMI */
-#define HF_SVME_SHIFT 23 /* SVME enabled (copy of EFER.SVME */
-#define HF_SVMI_SHIFT 24 /* SVM intercepts are active */
+#define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */
+#define HF_SVMI_SHIFT 21 /* SVM intercepts are active */
+#define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */
#define HF_CPL_MASK (3 << HF_CPL_SHIFT)
#define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT)
#define HF_MP_MASK (1 << HF_MP_SHIFT)
#define HF_EM_MASK (1 << HF_EM_SHIFT)
#define HF_TS_MASK (1 << HF_TS_SHIFT)
+#define HF_IOPL_MASK (3 << HF_IOPL_SHIFT)
#define HF_LMA_MASK (1 << HF_LMA_SHIFT)
#define HF_CS64_MASK (1 << HF_CS64_SHIFT)
-#define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
+#define HF_RF_MASK (1 << HF_RF_SHIFT)
+#define HF_VM_MASK (1 << HF_VM_SHIFT)
#define HF_SMM_MASK (1 << HF_SMM_SHIFT)
-#define HF_GIF_MASK (1 << HF_GIF_SHIFT)
-#define HF_HIF_MASK (1 << HF_HIF_SHIFT)
-#define HF_NMI_MASK (1 << HF_NMI_SHIFT)
#define HF_SVME_MASK (1 << HF_SVME_SHIFT)
#define HF_SVMI_MASK (1 << HF_SVMI_SHIFT)
+#define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
+
+/* hflags2 */
+
+#define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */
+#define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */
+#define HF2_NMI_SHIFT 2 /* CPU serving NMI */
+#define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */
+
+#define HF2_GIF_MASK (1 << HF2_GIF_SHIFT)
+#define HF2_HIF_MASK (1 << HF2_HIF_SHIFT)
+#define HF2_NMI_MASK (1 << HF2_NMI_SHIFT)
+#define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT)
+
+#define CR0_PE_SHIFT 0
+#define CR0_MP_SHIFT 1
#define CR0_PE_MASK (1 << 0)
#define CR0_MP_MASK (1 << 1)
#define CR4_DE_MASK (1 << 3)
#define CR4_PSE_MASK (1 << 4)
#define CR4_PAE_MASK (1 << 5)
+#define CR4_MCE_MASK (1 << 6)
#define CR4_PGE_MASK (1 << 7)
#define CR4_PCE_MASK (1 << 8)
-#define CR4_OSFXSR_MASK (1 << 9)
+#define CR4_OSFXSR_SHIFT 9
+#define CR4_OSFXSR_MASK (1 << CR4_OSFXSR_SHIFT)
#define CR4_OSXMMEXCPT_MASK (1 << 10)
+#define DR6_BD (1 << 13)
+#define DR6_BS (1 << 14)
+#define DR6_BT (1 << 15)
+#define DR6_FIXED_1 0xffff0ff0
+
+#define DR7_GD (1 << 13)
+#define DR7_TYPE_SHIFT 16
+#define DR7_LEN_SHIFT 18
+#define DR7_FIXED_1 0x00000400
+
#define PG_PRESENT_BIT 0
#define PG_RW_BIT 1
#define PG_USER_BIT 2
#define PG_ERROR_RSVD_MASK 0x08
#define PG_ERROR_I_D_MASK 0x10
+#define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */
+
+#define MCE_CAP_DEF MCG_CTL_P
+#define MCE_BANKS_DEF 10
+
+#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
+
+#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
+#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
+#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
+
+#define MSR_IA32_TSC 0x10
#define MSR_IA32_APICBASE 0x1b
#define MSR_IA32_APICBASE_BSP (1<<8)
#define MSR_IA32_APICBASE_ENABLE (1<<11)
#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
+#define MSR_MTRRcap 0xfe
+#define MSR_MTRRcap_VCNT 8
+#define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8)
+#define MSR_MTRRcap_WC_SUPPORTED (1 << 10)
+
#define MSR_IA32_SYSENTER_CS 0x174
#define MSR_IA32_SYSENTER_ESP 0x175
#define MSR_IA32_SYSENTER_EIP 0x176
#define MSR_MCG_STATUS 0x17a
#define MSR_MCG_CTL 0x17b
+#define MSR_IA32_PERF_STATUS 0x198
+
+#define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg))
+#define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1)
+
+#define MSR_MTRRfix64K_00000 0x250
+#define MSR_MTRRfix16K_80000 0x258
+#define MSR_MTRRfix16K_A0000 0x259
+#define MSR_MTRRfix4K_C0000 0x268
+#define MSR_MTRRfix4K_C8000 0x269
+#define MSR_MTRRfix4K_D0000 0x26a
+#define MSR_MTRRfix4K_D8000 0x26b
+#define MSR_MTRRfix4K_E0000 0x26c
+#define MSR_MTRRfix4K_E8000 0x26d
+#define MSR_MTRRfix4K_F0000 0x26e
+#define MSR_MTRRfix4K_F8000 0x26f
+
#define MSR_PAT 0x277
+#define MSR_MTRRdefType 0x2ff
+
+#define MSR_MC0_CTL 0x400
+#define MSR_MC0_STATUS 0x401
+#define MSR_MC0_ADDR 0x402
+#define MSR_MC0_MISC 0x403
+
#define MSR_EFER 0xc0000080
#define MSR_EFER_SCE (1 << 0)
#define MSR_FSBASE 0xc0000100
#define MSR_GSBASE 0xc0000101
#define MSR_KERNELGSBASE 0xc0000102
+#define MSR_TSC_AUX 0xc0000103
#define MSR_VM_HSAVE_PA 0xc0010117
#define CPUID_PBE (1 << 31)
#define CPUID_EXT_SSE3 (1 << 0)
+#define CPUID_EXT_DTES64 (1 << 2)
#define CPUID_EXT_MONITOR (1 << 3)
#define CPUID_EXT_DSCPL (1 << 4)
#define CPUID_EXT_VMX (1 << 5)
#define CPUID_EXT_CID (1 << 10)
#define CPUID_EXT_CX16 (1 << 13)
#define CPUID_EXT_XTPR (1 << 14)
-#define CPUID_EXT_DCA (1 << 17)
-#define CPUID_EXT_POPCNT (1 << 22)
+#define CPUID_EXT_PDCM (1 << 15)
+#define CPUID_EXT_DCA (1 << 18)
+#define CPUID_EXT_SSE41 (1 << 19)
+#define CPUID_EXT_SSE42 (1 << 20)
+#define CPUID_EXT_X2APIC (1 << 21)
+#define CPUID_EXT_MOVBE (1 << 22)
+#define CPUID_EXT_POPCNT (1 << 23)
+#define CPUID_EXT_XSAVE (1 << 26)
+#define CPUID_EXT_OSXSAVE (1 << 27)
+#define CPUID_EXT_HYPERVISOR (1 << 31)
#define CPUID_EXT2_SYSCALL (1 << 11)
#define CPUID_EXT2_MP (1 << 19)
#define CPUID_EXT3_IBS (1 << 10)
#define CPUID_EXT3_SKINIT (1 << 12)
+#define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */
+#define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */
+#define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */
+
+#define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */
+#define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */
+#define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */
+
+#define CPUID_MWAIT_IBE (1 << 1) /* Interrupts can exit capability */
+#define CPUID_MWAIT_EMX (1 << 0) /* enumeration supported */
+
#define EXCP00_DIVZ 0
-#define EXCP01_SSTP 1
+#define EXCP01_DB 1
#define EXCP02_NMI 2
#define EXCP03_INT3 3
#define EXCP04_INTO 4
enum {
CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
- CC_OP_EFLAGS, /* all cc are explicitely computed, CC_SRC = flags */
+ CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */
CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
CC_OP_MULW,
uint64_t q;
} MMXReg;
-#ifdef WORDS_BIGENDIAN
+#ifdef HOST_WORDS_BIGENDIAN
#define XMM_B(n) _b[15 - (n)]
#define XMM_W(n) _w[7 - (n)]
#define XMM_L(n) _l[3 - (n)]
#endif
#define MMX_Q(n) q
+typedef union {
+#ifdef USE_X86LDOUBLE
+ CPU86_LDouble d __attribute__((aligned(16)));
+#else
+ CPU86_LDouble d;
+#endif
+ MMXReg mmx;
+} FPReg;
+
+typedef struct {
+ uint64_t base;
+ uint64_t mask;
+} MTRRVar;
+
+#define CPU_NB_REGS64 16
+#define CPU_NB_REGS32 8
+
#ifdef TARGET_X86_64
-#define CPU_NB_REGS 16
+#define CPU_NB_REGS CPU_NB_REGS64
#else
-#define CPU_NB_REGS 8
+#define CPU_NB_REGS CPU_NB_REGS32
#endif
#define NB_MMU_MODES 2
target_ulong cc_dst;
uint32_t cc_op;
int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
- uint32_t hflags; /* hidden flags, see HF_xxx constants */
+ uint32_t hflags; /* TB flags, see HF_xxx constants. These flags
+ are known at translation time. */
+ uint32_t hflags2; /* various other flags, see HF2_xxx constants. */
/* segments */
SegmentCache segs[6]; /* selector values */
SegmentCache gdt; /* only base and limit are used */
SegmentCache idt; /* only base and limit are used */
- target_ulong cr[9]; /* NOTE: cr1, cr5-7 are unused */
- uint64_t a20_mask;
+ target_ulong cr[5]; /* NOTE: cr1 is unused */
+ int32_t a20_mask;
/* FPU state */
unsigned int fpstt; /* top of stack index */
- unsigned int fpus;
- unsigned int fpuc;
+ uint16_t fpus;
+ uint16_t fpuc;
uint8_t fptags[8]; /* 0 = valid, 1 = empty */
- union {
-#ifdef USE_X86LDOUBLE
- CPU86_LDouble d __attribute__((aligned(16)));
-#else
- CPU86_LDouble d;
-#endif
- MMXReg mmx;
- } fpregs[8];
+ FPReg fpregs[8];
/* emulator internal variables */
float_status fp_status;
/* sysenter registers */
uint32_t sysenter_cs;
- uint32_t sysenter_esp;
- uint32_t sysenter_eip;
+ target_ulong sysenter_esp;
+ target_ulong sysenter_eip;
uint64_t efer;
uint64_t star;
- target_phys_addr_t vm_hsave;
- target_phys_addr_t vm_vmcb;
+ uint64_t vm_hsave;
+ uint64_t vm_vmcb;
+ uint64_t tsc_offset;
uint64_t intercept;
uint16_t intercept_cr_read;
uint16_t intercept_cr_write;
uint16_t intercept_dr_read;
uint16_t intercept_dr_write;
uint32_t intercept_exceptions;
+ uint8_t v_tpr;
#ifdef TARGET_X86_64
target_ulong lstar;
target_ulong fmask;
target_ulong kernelgsbase;
#endif
+ uint64_t system_time_msr;
+ uint64_t wall_clock_msr;
+
+ uint64_t tsc;
uint64_t pat;
int exception_is_int;
target_ulong exception_next_eip;
target_ulong dr[8]; /* debug registers */
+ union {
+ CPUBreakpoint *cpu_breakpoint[4];
+ CPUWatchpoint *cpu_watchpoint[4];
+ }; /* break/watchpoints for dr[0..3] */
uint32_t smbase;
- int interrupt_request;
- int user_mode_only; /* user mode only simulation */
int old_exception; /* exception in flight */
CPU_COMMON
uint32_t cpuid_ext2_features;
uint32_t cpuid_ext3_features;
uint32_t cpuid_apic_id;
-
-#ifdef USE_KQEMU
- int kqemu_enabled;
- int last_io_time;
-#endif
+ int cpuid_vendor_override;
+
+ /* MTRRs */
+ uint64_t mtrr_fixed[11];
+ uint64_t mtrr_deftype;
+ MTRRVar mtrr_var[8];
+
+ /* For KVM */
+ uint32_t mp_state;
+ int32_t exception_injected;
+ int32_t interrupt_injected;
+ uint8_t soft_interrupt;
+ uint8_t nmi_injected;
+ uint8_t nmi_pending;
+ uint8_t has_error_code;
+ uint32_t sipi_vector;
+ uint32_t cpuid_kvm_features;
+
/* in order to simplify APIC support, we leave this pointer to the
user */
struct APICState *apic_state;
+
+ uint64 mcg_cap;
+ uint64 mcg_status;
+ uint64 mcg_ctl;
+ uint64 mce_banks[MCE_BANKS_DEF*4];
+
+ uint64_t tsc_aux;
+
+ /* vmstate */
+ uint16_t fpus_vmstate;
+ uint16_t fptag_vmstate;
+ uint16_t fpregs_format_vmstate;
} CPUX86State;
CPUX86State *cpu_x86_init(const char *cpu_model);
int cpu_x86_exec(CPUX86State *s);
void cpu_x86_close(CPUX86State *s);
-void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt,
- ...));
+void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
+ const char *optarg);
+void x86_cpudef_setup(void);
+
int cpu_get_pic_interrupt(CPUX86State *s);
/* MSDOS compatibility mode FPU exception support */
void cpu_set_ferr(CPUX86State *s);
}
}
+int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
+ target_ulong *base, unsigned int *limit,
+ unsigned int *flags);
+
/* wrapper, just in case memory mappings must be changed */
static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl)
{
#endif
}
+/* op_helper.c */
/* used for debug or cpu save/restore */
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f);
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper);
+/* cpu-exec.c */
/* the following helpers are only usable in user mode simulation as
they can trigger unexpected exceptions */
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
is returned if the signal was handled by the virtual CPU. */
int cpu_x86_signal_handler(int host_signum, void *pinfo,
void *puc);
+
+/* cpuid.c */
+void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx);
+int cpu_x86_register (CPUX86State *env, const char *cpu_model);
+
+/* helper.c */
+int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
+ int is_write, int mmu_idx, int is_softmmu);
+#define cpu_handle_mmu_fault cpu_x86_handle_mmu_fault
void cpu_x86_set_a20(CPUX86State *env, int a20_state);
-uint64_t cpu_get_tsc(CPUX86State *env);
+static inline int hw_breakpoint_enabled(unsigned long dr7, int index)
+{
+ return (dr7 >> (index * 2)) & 3;
+}
+
+static inline int hw_breakpoint_type(unsigned long dr7, int index)
+{
+ return (dr7 >> (DR7_TYPE_SHIFT + (index * 4))) & 3;
+}
+
+static inline int hw_breakpoint_len(unsigned long dr7, int index)
+{
+ int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 4))) & 3);
+ return (len == 2) ? 8 : len + 1;
+}
+
+void hw_breakpoint_insert(CPUX86State *env, int index);
+void hw_breakpoint_remove(CPUX86State *env, int index);
+int check_hw_breakpoints(CPUX86State *env, int force_dr6_update);
+/* will be suppressed */
+void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
+void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
+void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
+
+/* hw/apic.c */
void cpu_set_apic_base(CPUX86State *env, uint64_t val);
uint64_t cpu_get_apic_base(CPUX86State *env);
void cpu_set_apic_tpr(CPUX86State *env, uint8_t val);
#ifndef NO_CPU_IO_DEFS
uint8_t cpu_get_apic_tpr(CPUX86State *env);
#endif
-void cpu_smm_update(CPUX86State *env);
-/* will be suppressed */
-void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
+/* hw/pc.c */
+void cpu_smm_update(CPUX86State *env);
+uint64_t cpu_get_tsc(CPUX86State *env);
/* used to debug */
#define X86_DUMP_FPU 0x0001 /* dump FPU state too */
#define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */
-#ifdef USE_KQEMU
-static inline int cpu_get_time_fast(void)
-{
- int low, high;
- asm volatile("rdtsc" : "=a" (low), "=d" (high));
- return low;
-}
-#endif
-
#define TARGET_PAGE_BITS 12
-#define CPUState CPUX86State
+#ifdef TARGET_X86_64
+#define TARGET_PHYS_ADDR_SPACE_BITS 52
+/* ??? This is really 48 bits, sign-extended, but the only thing
+ accessible to userland with bit 48 set is the VSYSCALL, and that
+ is handled via other mechanisms. */
+#define TARGET_VIRT_ADDR_SPACE_BITS 47
+#else
+#define TARGET_PHYS_ADDR_SPACE_BITS 36
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+#endif
+
#define cpu_init cpu_x86_init
#define cpu_exec cpu_x86_exec
#define cpu_gen_code cpu_x86_gen_code
#define cpu_signal_handler cpu_x86_signal_handler
-#define cpu_list x86_cpu_list
+#define cpu_list_id x86_cpu_list
+#define cpudef_setup x86_cpudef_setup
+
+#define CPU_SAVE_VERSION 11
/* MMU modes definitions */
#define MMU_MODE0_SUFFIX _kernel
return (env->hflags & HF_CPL_MASK) == 3 ? 1 : 0;
}
+/* translate.c */
void optimize_flags_init(void);
typedef struct CCTable {
int (*compute_c)(void); /* return the C flag */
} CCTable;
-extern CCTable cc_table[];
-
#if defined(CONFIG_USER_ONLY)
static inline void cpu_clone_regs(CPUState *env, target_ulong newsp)
{
#endif
#include "cpu-all.h"
+#include "exec-all.h"
#include "svm.h"
+static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb)
+{
+ env->eip = tb->pc - tb->cs_base;
+}
+
+static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc,
+ target_ulong *cs_base, int *flags)
+{
+ *cs_base = env->segs[R_CS].base;
+ *pc = *cs_base + env->eip;
+ *flags = env->hflags |
+ (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK));
+}
+
+void apic_init_reset(CPUState *env);
+void apic_sipi(CPUState *env);
+void do_cpu_init(CPUState *env);
+void do_cpu_sipi(CPUState *env);
#endif /* CPU_I386_H */