#define CPUArchState struct CPUX86State
-#include "cpu-defs.h"
+#include "exec/cpu-defs.h"
-#include "softfloat.h"
+#include "fpu/softfloat.h"
#define R_EAX 0
#define R_ECX 1
/* hidden flags - used internally by qemu to represent additional cpu
states. Only the CPL, INHIBIT_IRQ, SMM and SVMI are not
- redundant. We avoid using the IOPL_MASK, TF_MASK and VM_MASK bit
- position to ease oring with eflags. */
+ redundant. We avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK
+ bit positions to ease oring with eflags. */
/* current cpl */
#define HF_CPL_SHIFT 0
/* true if soft mmu is being used */
#define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */
#define HF_RF_SHIFT 16 /* must be same as eflags */
#define HF_VM_SHIFT 17 /* must be same as eflags */
+#define HF_AC_SHIFT 18 /* must be same as eflags */
#define HF_SMM_SHIFT 19 /* CPU in SMM mode */
#define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */
#define HF_SVMI_SHIFT 21 /* SVM intercepts are active */
#define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */
+#define HF_SMAP_SHIFT 23 /* CR4.SMAP */
#define HF_CPL_MASK (3 << HF_CPL_SHIFT)
#define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT)
#define HF_CS64_MASK (1 << HF_CS64_SHIFT)
#define HF_RF_MASK (1 << HF_RF_SHIFT)
#define HF_VM_MASK (1 << HF_VM_SHIFT)
+#define HF_AC_MASK (1 << HF_AC_SHIFT)
#define HF_SMM_MASK (1 << HF_SMM_SHIFT)
#define HF_SVME_MASK (1 << HF_SVME_SHIFT)
#define HF_SVMI_MASK (1 << HF_SVMI_SHIFT)
#define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
+#define HF_SMAP_MASK (1 << HF_SMAP_SHIFT)
/* hflags2 */
#define CR4_OSFXSR_SHIFT 9
#define CR4_OSFXSR_MASK (1 << CR4_OSFXSR_SHIFT)
#define CR4_OSXMMEXCPT_MASK (1 << 10)
+#define CR4_VMXE_MASK (1 << 13)
+#define CR4_SMXE_MASK (1 << 14)
+#define CR4_FSGSBASE_MASK (1 << 16)
+#define CR4_PCIDE_MASK (1 << 17)
+#define CR4_OSXSAVE_MASK (1 << 18)
+#define CR4_SMEP_MASK (1 << 20)
+#define CR4_SMAP_MASK (1 << 21)
#define DR6_BD (1 << 13)
#define DR6_BS (1 << 14)
#define DR7_TYPE_SHIFT 16
#define DR7_LEN_SHIFT 18
#define DR7_FIXED_1 0x00000400
+#define DR7_LOCAL_BP_MASK 0x55
+#define DR7_MAX_BP 4
+#define DR7_TYPE_BP_INST 0x0
+#define DR7_TYPE_DATA_WR 0x1
+#define DR7_TYPE_IO_RW 0x2
+#define DR7_TYPE_DATA_RW 0x3
#define PG_PRESENT_BIT 0
#define PG_RW_BIT 1
#define MSR_IA32_APICBASE_BSP (1<<8)
#define MSR_IA32_APICBASE_ENABLE (1<<11)
#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
+#define MSR_TSC_ADJUST 0x0000003b
#define MSR_IA32_TSCDEADLINE 0x6e0
#define MSR_MTRRcap 0xfe
#define MSR_VM_HSAVE_PA 0xc0010117
+/* CPUID feature words */
+typedef enum FeatureWord {
+ FEAT_1_EDX, /* CPUID[1].EDX */
+ FEAT_1_ECX, /* CPUID[1].ECX */
+ FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */
+ FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */
+ FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */
+ FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */
+ FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */
+ FEAT_SVM, /* CPUID[8000_000A].EDX */
+ FEATURE_WORDS,
+} FeatureWord;
+
+typedef uint32_t FeatureWordArray[FEATURE_WORDS];
+
/* cpuid_features bits */
#define CPUID_FP87 (1 << 0)
#define CPUID_VME (1 << 1)
#define CPUID_EXT_TM2 (1 << 8)
#define CPUID_EXT_SSSE3 (1 << 9)
#define CPUID_EXT_CID (1 << 10)
+#define CPUID_EXT_FMA (1 << 12)
#define CPUID_EXT_CX16 (1 << 13)
#define CPUID_EXT_XTPR (1 << 14)
#define CPUID_EXT_PDCM (1 << 15)
+#define CPUID_EXT_PCID (1 << 17)
#define CPUID_EXT_DCA (1 << 18)
#define CPUID_EXT_SSE41 (1 << 19)
#define CPUID_EXT_SSE42 (1 << 20)
#define CPUID_EXT_XSAVE (1 << 26)
#define CPUID_EXT_OSXSAVE (1 << 27)
#define CPUID_EXT_AVX (1 << 28)
+#define CPUID_EXT_F16C (1 << 29)
+#define CPUID_EXT_RDRAND (1 << 30)
#define CPUID_EXT_HYPERVISOR (1 << 31)
#define CPUID_EXT2_FPU (1 << 0)
+#define CPUID_EXT2_VME (1 << 1)
#define CPUID_EXT2_DE (1 << 2)
#define CPUID_EXT2_PSE (1 << 3)
#define CPUID_EXT2_TSC (1 << 4)
#define CPUID_EXT2_3DNOWEXT (1 << 30)
#define CPUID_EXT2_3DNOW (1 << 31)
+/* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */
+#define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \
+ CPUID_EXT2_DE | CPUID_EXT2_PSE | \
+ CPUID_EXT2_TSC | CPUID_EXT2_MSR | \
+ CPUID_EXT2_PAE | CPUID_EXT2_MCE | \
+ CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \
+ CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \
+ CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \
+ CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \
+ CPUID_EXT2_MMX | CPUID_EXT2_FXSR)
+
#define CPUID_EXT3_LAHF_LM (1 << 0)
#define CPUID_EXT3_CMP_LEG (1 << 1)
#define CPUID_EXT3_SVM (1 << 2)
#define CPUID_EXT3_IBS (1 << 10)
#define CPUID_EXT3_XOP (1 << 11)
#define CPUID_EXT3_SKINIT (1 << 12)
+#define CPUID_EXT3_WDT (1 << 13)
+#define CPUID_EXT3_LWP (1 << 15)
#define CPUID_EXT3_FMA4 (1 << 16)
+#define CPUID_EXT3_TCE (1 << 17)
+#define CPUID_EXT3_NODEID (1 << 19)
+#define CPUID_EXT3_TBM (1 << 21)
+#define CPUID_EXT3_TOPOEXT (1 << 22)
+#define CPUID_EXT3_PERFCORE (1 << 23)
+#define CPUID_EXT3_PERFNB (1 << 24)
#define CPUID_SVM_NPT (1 << 0)
#define CPUID_SVM_LBRV (1 << 1)
#define CPUID_SVM_PAUSEFILTER (1 << 10)
#define CPUID_SVM_PFTHRESHOLD (1 << 12)
+#define CPUID_7_0_EBX_FSGSBASE (1 << 0)
+#define CPUID_7_0_EBX_BMI1 (1 << 3)
+#define CPUID_7_0_EBX_HLE (1 << 4)
+#define CPUID_7_0_EBX_AVX2 (1 << 5)
+#define CPUID_7_0_EBX_SMEP (1 << 7)
+#define CPUID_7_0_EBX_BMI2 (1 << 8)
+#define CPUID_7_0_EBX_ERMS (1 << 9)
+#define CPUID_7_0_EBX_INVPCID (1 << 10)
+#define CPUID_7_0_EBX_RTM (1 << 11)
+#define CPUID_7_0_EBX_RDSEED (1 << 18)
+#define CPUID_7_0_EBX_ADX (1 << 19)
+#define CPUID_7_0_EBX_SMAP (1 << 20)
+
+#define CPUID_VENDOR_SZ 12
+
#define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */
#define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */
#define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */
+#define CPUID_VENDOR_INTEL "GenuineIntel"
#define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */
#define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */
#define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */
+#define CPUID_VENDOR_AMD "AuthenticAMD"
-#define CPUID_VENDOR_VIA_1 0x746e6543 /* "Cent" */
-#define CPUID_VENDOR_VIA_2 0x48727561 /* "aurH" */
-#define CPUID_VENDOR_VIA_3 0x736c7561 /* "auls" */
+#define CPUID_VENDOR_VIA "CentaurHauls"
#define CPUID_MWAIT_IBE (1 << 1) /* Interrupts can exit capability */
#define CPUID_MWAIT_EMX (1 << 0) /* enumeration supported */
#define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_3
-enum {
+typedef enum {
CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */
CC_OP_SARQ,
CC_OP_NB,
-};
+} CCOp;
typedef struct SegmentCache {
uint32_t selector;
#define CPU_NB_REGS CPU_NB_REGS32
#endif
-#define NB_MMU_MODES 2
+#define NB_MMU_MODES 3
typedef enum TPRAccess {
TPR_ACCESS_READ,
XMMReg xmm_regs[CPU_NB_REGS];
XMMReg xmm_t0;
MMXReg mmx_t0;
- target_ulong cc_tmp; /* temporary for rcr/rcl */
/* sysenter registers */
uint32_t sysenter_cs;
uint64_t pv_eoi_en_msr;
uint64_t tsc;
+ uint64_t tsc_adjust;
uint64_t tsc_deadline;
uint64_t mcg_status;
uint32_t cpuid_ext2_features;
uint32_t cpuid_ext3_features;
uint32_t cpuid_apic_id;
- int cpuid_vendor_override;
/* Store the results of Centaur's CPUID instructions */
uint32_t cpuid_xlevel2;
uint32_t cpuid_ext4_features;
/* Flags from CPUID[EAX=7,ECX=0].EBX */
- uint32_t cpuid_7_0_ebx;
+ uint32_t cpuid_7_0_ebx_features;
/* MTRRs */
uint64_t mtrr_fixed[11];
}
}
-static inline void cpu_x86_load_seg_cache_sipi(CPUX86State *env,
+static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu,
int sipi_vector)
{
+ CPUX86State *env = &cpu->env;
+
env->eip = 0;
cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8,
sipi_vector << 12,
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx);
-int cpu_x86_register(X86CPU *cpu, const char *cpu_model);
void cpu_clear_apic_feature(CPUX86State *env);
void host_cpuid(uint32_t function, uint32_t count,
uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
int is_write, int mmu_idx);
#define cpu_handle_mmu_fault cpu_x86_handle_mmu_fault
-void cpu_x86_set_a20(CPUX86State *env, int a20_state);
+void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
-static inline int hw_breakpoint_enabled(unsigned long dr7, int index)
+static inline bool hw_local_breakpoint_enabled(unsigned long dr7, int index)
{
- return (dr7 >> (index * 2)) & 3;
+ return (dr7 >> (index * 2)) & 1;
+}
+
+static inline bool hw_global_breakpoint_enabled(unsigned long dr7, int index)
+{
+ return (dr7 >> (index * 2)) & 2;
+
+}
+static inline bool hw_breakpoint_enabled(unsigned long dr7, int index)
+{
+ return hw_global_breakpoint_enabled(dr7, index) ||
+ hw_local_breakpoint_enabled(dr7, index);
}
static inline int hw_breakpoint_type(unsigned long dr7, int index)
void hw_breakpoint_insert(CPUX86State *env, int index);
void hw_breakpoint_remove(CPUX86State *env, int index);
-int check_hw_breakpoints(CPUX86State *env, int force_dr6_update);
+bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update);
void breakpoint_handler(CPUX86State *env);
/* will be suppressed */
void cpu_smm_update(CPUX86State *env);
uint64_t cpu_get_tsc(CPUX86State *env);
-/* used to debug */
-#define X86_DUMP_FPU 0x0001 /* dump FPU state too */
-#define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */
-
#define TARGET_PAGE_BITS 12
#ifdef TARGET_X86_64
/* MMU modes definitions */
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
-#define MMU_USER_IDX 1
+#define MMU_MODE2_SUFFIX _ksmap /* Kernel with SMAP override */
+#define MMU_KERNEL_IDX 0
+#define MMU_USER_IDX 1
+#define MMU_KSMAP_IDX 2
static inline int cpu_mmu_index (CPUX86State *env)
{
- return (env->hflags & HF_CPL_MASK) == 3 ? 1 : 0;
+ return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
+ ((env->hflags & HF_SMAP_MASK) && (env->eflags & AC_MASK))
+ ? MMU_KSMAP_IDX : MMU_KERNEL_IDX;
}
#undef EAX
}
#endif
-#include "cpu-all.h"
+#include "exec/cpu-all.h"
#include "svm.h"
#if !defined(CONFIG_USER_ONLY)
#include "hw/apic.h"
#endif
-static inline bool cpu_has_work(CPUX86State *env)
+static inline bool cpu_has_work(CPUState *cpu)
{
+ CPUX86State *env = &X86_CPU(cpu)->env;
+
return ((env->interrupt_request & (CPU_INTERRUPT_HARD |
CPU_INTERRUPT_POLL)) &&
(env->eflags & IF_MASK)) ||
CPU_INTERRUPT_MCE));
}
-#include "exec-all.h"
+#include "exec/exec-all.h"
static inline void cpu_pc_from_tb(CPUX86State *env, TranslationBlock *tb)
{
*cs_base = env->segs[R_CS].base;
*pc = *cs_base + env->eip;
*flags = env->hflags |
- (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK));
+ (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK));
}
void do_cpu_init(X86CPU *cpu);
#define MCE_INJECT_BROADCAST 1
#define MCE_INJECT_UNCOND_AO 2
-void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank,
+void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
uint64_t status, uint64_t mcg_status, uint64_t addr,
uint64_t misc, int flags);
void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
+void disable_kvm_pv_eoi(void);
+
+/* Return name of 32-bit register, from a R_* constant */
+const char *get_register_name_32(unsigned int reg);
+
+uint32_t x86_cpu_apic_id_from_index(unsigned int cpu_index);
+void enable_compat_apic_id_mode(void);
+
#endif /* CPU_I386_H */