#define MSR_IA32_APICBASE 0x1b
#define MSR_IA32_APICBASE_BSP (1<<8)
#define MSR_IA32_APICBASE_ENABLE (1<<11)
+#define MSR_IA32_APICBASE_EXTD (1 << 10)
#define MSR_IA32_APICBASE_BASE (0xfffffU<<12)
#define MSR_IA32_FEATURE_CONTROL 0x0000003a
#define MSR_TSC_ADJUST 0x0000003b
FEAT_SVM, /* CPUID[8000_000A].EDX */
FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */
FEAT_6_EAX, /* CPUID[6].EAX */
+ FEAT_XSAVE_COMP_LO, /* CPUID[EAX=0xd,ECX=0].EAX */
+ FEAT_XSAVE_COMP_HI, /* CPUID[EAX=0xd,ECX=0].EDX */
FEATURE_WORDS,
} FeatureWord;
#define CPUID_7_0_EBX_RTM (1U << 11)
#define CPUID_7_0_EBX_MPX (1U << 14)
#define CPUID_7_0_EBX_AVX512F (1U << 16) /* AVX-512 Foundation */
+#define CPUID_7_0_EBX_AVX512DQ (1U << 17) /* AVX-512 Doubleword & Quadword Instrs */
#define CPUID_7_0_EBX_RDSEED (1U << 18)
#define CPUID_7_0_EBX_ADX (1U << 19)
#define CPUID_7_0_EBX_SMAP (1U << 20)
+#define CPUID_7_0_EBX_AVX512IFMA (1U << 21) /* AVX-512 Integer Fused Multiply Add */
#define CPUID_7_0_EBX_PCOMMIT (1U << 22) /* Persistent Commit */
#define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23) /* Flush a Cache Line Optimized */
#define CPUID_7_0_EBX_CLWB (1U << 24) /* Cache Line Write Back */
#define CPUID_7_0_EBX_AVX512PF (1U << 26) /* AVX-512 Prefetch */
#define CPUID_7_0_EBX_AVX512ER (1U << 27) /* AVX-512 Exponential and Reciprocal */
#define CPUID_7_0_EBX_AVX512CD (1U << 28) /* AVX-512 Conflict Detection */
+#define CPUID_7_0_EBX_AVX512BW (1U << 30) /* AVX-512 Byte and Word Instructions */
+#define CPUID_7_0_EBX_AVX512VL (1U << 31) /* AVX-512 Vector Length Extensions */
+#define CPUID_7_0_ECX_VBMI (1U << 1) /* AVX-512 Vector Byte Manipulation Instrs */
+#define CPUID_7_0_ECX_UMIP (1U << 2)
#define CPUID_7_0_ECX_PKU (1U << 3)
#define CPUID_7_0_ECX_OSPKE (1U << 4)
+#define CPUID_7_0_ECX_RDPID (1U << 22)
#define CPUID_XSAVE_XSAVEOPT (1U << 0)
#define CPUID_XSAVE_XSAVEC (1U << 1)
/* Use a clearer name for this. */
#define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET
+/* Instead of computing the condition codes after each x86 instruction,
+ * QEMU just stores one operand (called CC_SRC), the result
+ * (called CC_DST) and the type of operation (called CC_OP). When the
+ * condition codes are needed, the condition codes can be calculated
+ * using this information. Condition codes are not generated if they
+ * are only needed for conditional branches.
+ */
typedef enum {
CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */
#define NB_OPMASK_REGS 8
+/* CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish
+ * that APIC ID hasn't been set yet
+ */
+#define UNASSIGNED_APIC_ID 0xFFFFFFFF
+
typedef union X86LegacyXSaveArea {
struct {
uint16_t fcw;
typedef struct X86XSaveHeader {
uint64_t xstate_bv;
uint64_t xcomp_bv;
- uint8_t reserved[48];
+ uint64_t reserve0;
+ uint8_t reserved[40];
} X86XSaveHeader;
/* Ext. save area 2: AVX State */
uint64_t tsc;
uint64_t tsc_adjust;
uint64_t tsc_deadline;
+ uint64_t tsc_aux;
+
+ uint64_t xcr0;
uint64_t mcg_status;
uint64_t msr_ia32_misc_enable;
uint64_t pat;
uint32_t smbase;
+ uint32_t pkru;
+
/* End of state preserved by INIT (dummy marker). */
struct {} end_init_save;
CPU_COMMON
/* Fields from here on are preserved across CPU reset. */
+ struct {} end_reset_fields;
/* processor features (e.g. for CPUID insn) */
- uint32_t cpuid_level;
- uint32_t cpuid_xlevel;
- uint32_t cpuid_xlevel2;
+ /* Minimum level/xlevel/xlevel2, based on CPU model + features */
+ uint32_t cpuid_min_level, cpuid_min_xlevel, cpuid_min_xlevel2;
+ /* Maximum level/xlevel/xlevel2 value for auto-assignment: */
+ uint32_t cpuid_max_level, cpuid_max_xlevel, cpuid_max_xlevel2;
+ /* Actual level/xlevel/xlevel2 value: */
+ uint32_t cpuid_level, cpuid_xlevel, cpuid_xlevel2;
uint32_t cpuid_vendor1;
uint32_t cpuid_vendor2;
uint32_t cpuid_vendor3;
uint64_t mcg_ctl;
uint64_t mcg_ext_ctl;
uint64_t mce_banks[MCE_BANKS_DEF*4];
-
- uint64_t tsc_aux;
+ uint64_t xstate_bv;
/* vmstate */
uint16_t fpus_vmstate;
uint16_t fptag_vmstate;
uint16_t fpregs_format_vmstate;
- uint64_t xstate_bv;
- uint64_t xcr0;
uint64_t xss;
- uint32_t pkru;
-
TPRAccess tpr_access_type;
} CPUX86State;
bool expose_kvm;
bool migratable;
bool host_features;
- int64_t apic_id;
+ uint32_t apic_id;
/* if true the CPUID code directly forward host cache leaves to the guest */
bool cache_info_passthrough;
*/
bool enable_lmce;
+ /* Compatibility bits for old machine types.
+ * If true present virtual l3 cache for VM, the vcpus in the same virtual
+ * socket share an virtual l3 cache.
+ */
+ bool enable_l3_cache;
+
/* Compatibility bits for old machine types: */
bool enable_cpuid_0xb;
+ /* Enable auto level-increase for all CPUID leaves */
+ bool full_cpuid_auto_level;
+
+ /* if true fill the top bits of the MTRR_PHYSMASKn variable range */
+ bool fill_mtrr_mask;
+
+ /* if true override the phys_bits value with a value read from the host */
+ bool host_phys_bits;
+
+ /* Number of physical address bits supported */
+ uint32_t phys_bits;
+
/* in order to simplify APIC support, we leave this pointer to the
user */
struct DeviceState *apic_state;
Notifier machine_done;
struct kvm_msrs *kvm_msr_buf;
+
+ int32_t socket_id;
+ int32_t core_id;
+ int32_t thread_id;
};
static inline X86CPU *x86_env_get_cpu(CPUX86State *env)
void *puc);
/* cpu.c */
-typedef struct ExtSaveArea {
- uint32_t feature, bits;
- uint32_t offset, size;
-} ExtSaveArea;
-
-extern const ExtSaveArea x86_ext_save_areas[];
-
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx);