#define TARGET_HAS_ICE 1
#ifdef TARGET_X86_64
-#define ELF_MACHINE EM_X86_64
+#define ELF_MACHINE EM_X86_64
#else
-#define ELF_MACHINE EM_386
+#define ELF_MACHINE EM_386
#endif
#define CPUArchState struct CPUX86State
#define DESC_TSS_BUSY_MASK (1 << 9)
/* eflags masks */
-#define CC_C 0x0001
-#define CC_P 0x0004
-#define CC_A 0x0010
-#define CC_Z 0x0040
+#define CC_C 0x0001
+#define CC_P 0x0004
+#define CC_A 0x0010
+#define CC_Z 0x0040
#define CC_S 0x0080
#define CC_O 0x0800
#define IOPL_SHIFT 12
#define VM_SHIFT 17
-#define TF_MASK 0x00000100
-#define IF_MASK 0x00000200
-#define DF_MASK 0x00000400
-#define IOPL_MASK 0x00003000
-#define NT_MASK 0x00004000
-#define RF_MASK 0x00010000
-#define VM_MASK 0x00020000
-#define AC_MASK 0x00040000
+#define TF_MASK 0x00000100
+#define IF_MASK 0x00000200
+#define DF_MASK 0x00000400
+#define IOPL_MASK 0x00003000
+#define NT_MASK 0x00004000
+#define RF_MASK 0x00010000
+#define VM_MASK 0x00020000
+#define AC_MASK 0x00040000
#define VIF_MASK 0x00080000
#define VIP_MASK 0x00100000
#define ID_MASK 0x00200000
#define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */
#define HF2_GIF_MASK (1 << HF2_GIF_SHIFT)
-#define HF2_HIF_MASK (1 << HF2_HIF_SHIFT)
+#define HF2_HIF_MASK (1 << HF2_HIF_SHIFT)
#define HF2_NMI_MASK (1 << HF2_NMI_SHIFT)
#define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT)
#define DR7_TYPE_IO_RW 0x2
#define DR7_TYPE_DATA_RW 0x3
-#define PG_PRESENT_BIT 0
-#define PG_RW_BIT 1
-#define PG_USER_BIT 2
-#define PG_PWT_BIT 3
-#define PG_PCD_BIT 4
-#define PG_ACCESSED_BIT 5
-#define PG_DIRTY_BIT 6
-#define PG_PSE_BIT 7
-#define PG_GLOBAL_BIT 8
-#define PG_NX_BIT 63
+#define PG_PRESENT_BIT 0
+#define PG_RW_BIT 1
+#define PG_USER_BIT 2
+#define PG_PWT_BIT 3
+#define PG_PCD_BIT 4
+#define PG_ACCESSED_BIT 5
+#define PG_DIRTY_BIT 6
+#define PG_PSE_BIT 7
+#define PG_GLOBAL_BIT 8
+#define PG_NX_BIT 63
#define PG_PRESENT_MASK (1 << PG_PRESENT_BIT)
-#define PG_RW_MASK (1 << PG_RW_BIT)
-#define PG_USER_MASK (1 << PG_USER_BIT)
-#define PG_PWT_MASK (1 << PG_PWT_BIT)
-#define PG_PCD_MASK (1 << PG_PCD_BIT)
+#define PG_RW_MASK (1 << PG_RW_BIT)
+#define PG_USER_MASK (1 << PG_USER_BIT)
+#define PG_PWT_MASK (1 << PG_PWT_BIT)
+#define PG_PCD_MASK (1 << PG_PCD_BIT)
#define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
-#define PG_DIRTY_MASK (1 << PG_DIRTY_BIT)
-#define PG_PSE_MASK (1 << PG_PSE_BIT)
-#define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT)
+#define PG_DIRTY_MASK (1 << PG_DIRTY_BIT)
+#define PG_PSE_MASK (1 << PG_PSE_BIT)
+#define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT)
#define PG_HI_USER_MASK 0x7ff0000000000000LL
-#define PG_NX_MASK (1LL << PG_NX_BIT)
+#define PG_NX_MASK (1LL << PG_NX_BIT)
#define PG_ERROR_W_BIT 1
#define PG_ERROR_RSVD_MASK 0x08
#define PG_ERROR_I_D_MASK 0x10
-#define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */
-#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
+#define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */
+#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
-#define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P)
-#define MCE_BANKS_DEF 10
+#define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P)
+#define MCE_BANKS_DEF 10
-#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
-#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
-#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
+#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
+#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
+#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
-#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
-#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
-#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
-#define MCI_STATUS_EN (1ULL<<60) /* error enabled */
-#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
-#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
-#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
-#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
-#define MCI_STATUS_AR (1ULL<<55) /* Action required */
+#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
+#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
+#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
+#define MCI_STATUS_EN (1ULL<<60) /* error enabled */
+#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
+#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
+#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
+#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
+#define MCI_STATUS_AR (1ULL<<55) /* Action required */
/* MISC register defines */
-#define MCM_ADDR_SEGOFF 0 /* segment offset */
-#define MCM_ADDR_LINEAR 1 /* linear address */
-#define MCM_ADDR_PHYS 2 /* physical address */
-#define MCM_ADDR_MEM 3 /* memory address */
-#define MCM_ADDR_GENERIC 7 /* generic */
+#define MCM_ADDR_SEGOFF 0 /* segment offset */
+#define MCM_ADDR_LINEAR 1 /* linear address */
+#define MCM_ADDR_PHYS 2 /* physical address */
+#define MCM_ADDR_MEM 3 /* memory address */
+#define MCM_ADDR_GENERIC 7 /* generic */
#define MSR_IA32_TSC 0x10
#define MSR_IA32_APICBASE 0x1b
#define MSR_IA32_APICBASE_BSP (1<<8)
#define MSR_IA32_APICBASE_ENABLE (1<<11)
#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
+#define MSR_IA32_FEATURE_CONTROL 0x0000003a
#define MSR_TSC_ADJUST 0x0000003b
#define MSR_IA32_TSCDEADLINE 0x6e0
-#define MSR_MTRRcap 0xfe
-#define MSR_MTRRcap_VCNT 8
-#define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8)
-#define MSR_MTRRcap_WC_SUPPORTED (1 << 10)
+#define MSR_P6_PERFCTR0 0xc1
+
+#define MSR_MTRRcap 0xfe
+#define MSR_MTRRcap_VCNT 8
+#define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8)
+#define MSR_MTRRcap_WC_SUPPORTED (1 << 10)
#define MSR_IA32_SYSENTER_CS 0x174
#define MSR_IA32_SYSENTER_ESP 0x175
#define MSR_MCG_STATUS 0x17a
#define MSR_MCG_CTL 0x17b
+#define MSR_P6_EVNTSEL0 0x186
+
#define MSR_IA32_PERF_STATUS 0x198
-#define MSR_IA32_MISC_ENABLE 0x1a0
+#define MSR_IA32_MISC_ENABLE 0x1a0
/* Indicates good rep/movs microcode on some processors: */
#define MSR_IA32_MISC_ENABLE_DEFAULT 1
-#define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg))
-#define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1)
-
-#define MSR_MTRRfix64K_00000 0x250
-#define MSR_MTRRfix16K_80000 0x258
-#define MSR_MTRRfix16K_A0000 0x259
-#define MSR_MTRRfix4K_C0000 0x268
-#define MSR_MTRRfix4K_C8000 0x269
-#define MSR_MTRRfix4K_D0000 0x26a
-#define MSR_MTRRfix4K_D8000 0x26b
-#define MSR_MTRRfix4K_E0000 0x26c
-#define MSR_MTRRfix4K_E8000 0x26d
-#define MSR_MTRRfix4K_F0000 0x26e
-#define MSR_MTRRfix4K_F8000 0x26f
+#define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg))
+#define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1)
+
+#define MSR_MTRRfix64K_00000 0x250
+#define MSR_MTRRfix16K_80000 0x258
+#define MSR_MTRRfix16K_A0000 0x259
+#define MSR_MTRRfix4K_C0000 0x268
+#define MSR_MTRRfix4K_C8000 0x269
+#define MSR_MTRRfix4K_D0000 0x26a
+#define MSR_MTRRfix4K_D8000 0x26b
+#define MSR_MTRRfix4K_E0000 0x26c
+#define MSR_MTRRfix4K_E8000 0x26d
+#define MSR_MTRRfix4K_F0000 0x26e
+#define MSR_MTRRfix4K_F8000 0x26f
#define MSR_PAT 0x277
-#define MSR_MTRRdefType 0x2ff
+#define MSR_MTRRdefType 0x2ff
-#define MSR_MC0_CTL 0x400
-#define MSR_MC0_STATUS 0x401
-#define MSR_MC0_ADDR 0x402
-#define MSR_MC0_MISC 0x403
+#define MSR_CORE_PERF_FIXED_CTR0 0x309
+#define MSR_CORE_PERF_FIXED_CTR1 0x30a
+#define MSR_CORE_PERF_FIXED_CTR2 0x30b
+#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d
+#define MSR_CORE_PERF_GLOBAL_STATUS 0x38e
+#define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
+
+#define MSR_MC0_CTL 0x400
+#define MSR_MC0_STATUS 0x401
+#define MSR_MC0_ADDR 0x402
+#define MSR_MC0_MISC 0x403
#define MSR_EFER 0xc0000080
#define CPUID_MWAIT_IBE (1 << 1) /* Interrupts can exit capability */
#define CPUID_MWAIT_EMX (1 << 0) /* enumeration supported */
+#ifndef HYPERV_SPINLOCK_NEVER_RETRY
+#define HYPERV_SPINLOCK_NEVER_RETRY 0xFFFFFFFF
+#endif
+
#define EXCP00_DIVZ 0
#define EXCP01_DB 1
#define EXCP02_NMI 2
#define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_3
-enum {
+typedef enum {
CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */
CC_OP_SARL,
CC_OP_SARQ,
+ CC_OP_BMILGB, /* Z,S via CC_DST, C = SRC==0; O=0; P,A undefined */
+ CC_OP_BMILGW,
+ CC_OP_BMILGL,
+ CC_OP_BMILGQ,
+
+ CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */
+ CC_OP_ADOX, /* CC_DST = O, CC_SRC = rest. */
+ CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */
+
+ CC_OP_CLR, /* Z set, all other flags clear. */
+
CC_OP_NB,
-};
+} CCOp;
typedef struct SegmentCache {
uint32_t selector;
#define CPU_NB_REGS CPU_NB_REGS32
#endif
+#define MAX_FIXED_COUNTERS 3
+#define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0)
+
#define NB_MMU_MODES 3
typedef enum TPRAccess {
stored elsewhere */
/* emulator internal eflags handling */
- target_ulong cc_src;
target_ulong cc_dst;
+ target_ulong cc_src;
+ target_ulong cc_src2;
uint32_t cc_op;
int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
uint32_t hflags; /* TB flags, see HF_xxx constants. These flags
XMMReg xmm_regs[CPU_NB_REGS];
XMMReg xmm_t0;
MMXReg mmx_t0;
- target_ulong cc_tmp; /* temporary for rcr/rcl */
/* sysenter registers */
uint32_t sysenter_cs;
#endif
uint64_t system_time_msr;
uint64_t wall_clock_msr;
+ uint64_t steal_time_msr;
uint64_t async_pf_en_msr;
uint64_t pv_eoi_en_msr;
uint64_t mcg_status;
uint64_t msr_ia32_misc_enable;
+ uint64_t msr_ia32_feature_control;
+
+ uint64_t msr_fixed_ctr_ctrl;
+ uint64_t msr_global_ctrl;
+ uint64_t msr_global_status;
+ uint64_t msr_global_ovf_ctrl;
+ uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS];
+ uint64_t msr_gp_counters[MAX_GP_COUNTERS];
+ uint64_t msr_gp_evtsel[MAX_GP_COUNTERS];
/* exception/interrupt handling */
int error_code;
/* processor features (e.g. for CPUID insn) */
uint32_t cpuid_level;
+ uint32_t cpuid_xlevel;
+ uint32_t cpuid_xlevel2;
uint32_t cpuid_vendor1;
uint32_t cpuid_vendor2;
uint32_t cpuid_vendor3;
uint32_t cpuid_version;
- uint32_t cpuid_features;
- uint32_t cpuid_ext_features;
- uint32_t cpuid_xlevel;
+ FeatureWordArray features;
uint32_t cpuid_model[12];
- uint32_t cpuid_ext2_features;
- uint32_t cpuid_ext3_features;
uint32_t cpuid_apic_id;
- /* Store the results of Centaur's CPUID instructions */
- uint32_t cpuid_xlevel2;
- uint32_t cpuid_ext4_features;
- /* Flags from CPUID[EAX=7,ECX=0].EBX */
- uint32_t cpuid_7_0_ebx_features;
/* MTRRs */
uint64_t mtrr_fixed[11];
uint8_t soft_interrupt;
uint8_t has_error_code;
uint32_t sipi_vector;
- uint32_t cpuid_kvm_features;
- uint32_t cpuid_svm_features;
bool tsc_valid;
int tsc_khz;
void *kvm_xsave_buf;
#include "cpu-qom.h"
X86CPU *cpu_x86_init(const char *cpu_model);
+X86CPU *cpu_x86_create(const char *cpu_model, DeviceState *icc_bridge,
+ Error **errp);
int cpu_x86_exec(CPUX86State *s);
void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf);
void x86_cpudef_setup(void);
static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu,
int sipi_vector)
{
+ CPUState *cs = CPU(cpu);
CPUX86State *env = &cpu->env;
env->eip = 0;
sipi_vector << 12,
env->segs[R_CS].limit,
env->segs[R_CS].flags);
- env->halted = 0;
+ cs->halted = 0;
}
int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
#define cpu_gen_code cpu_x86_gen_code
#define cpu_signal_handler cpu_x86_signal_handler
#define cpu_list x86_cpu_list
-#define cpudef_setup x86_cpudef_setup
-
-#define CPU_SAVE_VERSION 12
+#define cpudef_setup x86_cpudef_setup
/* MMU modes definitions */
#define MMU_MODE0_SUFFIX _kernel
? MMU_KSMAP_IDX : MMU_KERNEL_IDX;
}
-#undef EAX
-#define EAX (env->regs[R_EAX])
-#undef ECX
-#define ECX (env->regs[R_ECX])
-#undef EDX
-#define EDX (env->regs[R_EDX])
-#undef EBX
-#define EBX (env->regs[R_EBX])
-#undef ESP
-#define ESP (env->regs[R_ESP])
-#undef EBP
-#define EBP (env->regs[R_EBP])
-#undef ESI
-#define ESI (env->regs[R_ESI])
-#undef EDI
-#define EDI (env->regs[R_EDI])
-#undef EIP
-#define EIP (env->eip)
-#define DF (env->df)
-
-#define CC_SRC (env->cc_src)
-#define CC_DST (env->cc_dst)
-#define CC_OP (env->cc_op)
+#define CC_DST (env->cc_dst)
+#define CC_SRC (env->cc_src)
+#define CC_SRC2 (env->cc_src2)
+#define CC_OP (env->cc_op)
/* n must be a constant to be efficient */
static inline target_long lshift(target_long x, int n)
/* translate.c */
void optimize_flags_init(void);
-#if defined(CONFIG_USER_ONLY)
-static inline void cpu_clone_regs(CPUX86State *env, target_ulong newsp)
-{
- if (newsp)
- env->regs[R_ESP] = newsp;
- env->regs[R_EAX] = 0;
-}
-#endif
-
#include "exec/cpu-all.h"
#include "svm.h"
#if !defined(CONFIG_USER_ONLY)
-#include "hw/apic.h"
+#include "hw/i386/apic.h"
#endif
-static inline bool cpu_has_work(CPUState *cpu)
+static inline bool cpu_has_work(CPUState *cs)
{
- CPUX86State *env = &X86_CPU(cpu)->env;
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
- return ((env->interrupt_request & (CPU_INTERRUPT_HARD |
- CPU_INTERRUPT_POLL)) &&
+ return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
+ CPU_INTERRUPT_POLL)) &&
(env->eflags & IF_MASK)) ||
- (env->interrupt_request & (CPU_INTERRUPT_NMI |
- CPU_INTERRUPT_INIT |
- CPU_INTERRUPT_SIPI |
- CPU_INTERRUPT_MCE));
+ (cs->interrupt_request & (CPU_INTERRUPT_NMI |
+ CPU_INTERRUPT_INIT |
+ CPU_INTERRUPT_SIPI |
+ CPU_INTERRUPT_MCE));
}
#include "exec/exec-all.h"
-static inline void cpu_pc_from_tb(CPUX86State *env, TranslationBlock *tb)
-{
- env->eip = tb->pc - tb->cs_base;
-}
-
static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
{
static inline uint32_t cpu_compute_eflags(CPUX86State *env)
{
- return env->eflags | cpu_cc_compute_all(env, CC_OP) | (DF & DF_MASK);
+ return env->eflags | cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK);
}
/* NOTE: CC_OP must be modified manually to CC_OP_EFLAGS */
int update_mask)
{
CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
- DF = 1 - (2 * ((eflags >> 10) & 1));
+ env->df = 1 - (2 * ((eflags >> 10) & 1));
env->eflags = (env->eflags & ~update_mask) |
(eflags & update_mask) | 0x2;
}
uint64_t param);
void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1);
-/* op_helper.c */
-void do_interrupt(CPUX86State *env);
+/* seg_helper.c */
void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw);
-void do_smm_enter(CPUX86State *env1);
+void do_smm_enter(X86CPU *cpu);
void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
void disable_kvm_pv_eoi(void);
+void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
+ uint32_t feat_add, uint32_t feat_remove);
+
+
/* Return name of 32-bit register, from a R_* constant */
const char *get_register_name_32(unsigned int reg);
uint32_t x86_cpu_apic_id_from_index(unsigned int cpu_index);
void enable_compat_apic_id_mode(void);
+#define APIC_DEFAULT_ADDRESS 0xfee00000
+#define APIC_SPACE_SIZE 0x100000
+
#endif /* CPU_I386_H */