};
static const char *cpuid_7_0_ebx_feature_name[] = {
- "fsgsbase", NULL, NULL, "bmi1", "hle", "avx2", NULL, "smep",
- "bmi2", "erms", "invpcid", "rtm", NULL, NULL, NULL, NULL,
- NULL, NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
+ "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
+ "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
+ NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
};
static const char *cpuid_apm_edx_feature_name[] = {
.offset = 0x3c0, .size = 0x40 },
[4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
.offset = 0x400, .size = 0x40 },
+ [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
+ .offset = 0x440, .size = 0x40 },
+ [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
+ .offset = 0x480, .size = 0x200 },
+ [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
+ .offset = 0x680, .size = 0x400 },
};
const char *get_register_name_32(unsigned int reg)
return x86_reg_info_32[reg].name;
}
-/* collects per-function cpuid data
- */
-typedef struct model_features_t {
- uint32_t *guest_feat;
- uint32_t *host_feat;
- FeatureWord feat_word;
-} model_features_t;
-
/* KVM-specific features that are automatically added to all CPU models
* when KVM is enabled.
*/
/* Features that are not added by default to any CPU model when KVM is enabled.
*/
static uint32_t kvm_default_unset_features[FEATURE_WORDS] = {
+ [FEAT_1_EDX] = CPUID_ACPI,
[FEAT_1_ECX] = CPUID_EXT_MONITOR,
};
-void x86_cpu_compat_disable_kvm_features(FeatureWord w, uint32_t features)
+void x86_cpu_compat_kvm_no_autoenable(FeatureWord w, uint32_t features)
{
kvm_default_features[w] &= ~features;
}
}
static void add_flagname_to_bitmaps(const char *flagname,
- FeatureWordArray words)
+ FeatureWordArray words,
+ Error **errp)
{
FeatureWord w;
for (w = 0; w < FEATURE_WORDS; w++) {
}
}
if (w == FEATURE_WORDS) {
- fprintf(stderr, "CPU feature %s not found\n", flagname);
+ error_setg(errp, "CPU feature %s not found", flagname);
}
}
.family = 16,
.model = 2,
.stepping = 3,
+ /* Missing: CPUID_HT */
.features[FEAT_1_EDX] =
PPRO_FEATURES |
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
- CPUID_PSE36 | CPUID_VME | CPUID_HT,
+ CPUID_PSE36 | CPUID_VME,
.features[FEAT_1_ECX] =
CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
CPUID_EXT_POPCNT,
.features[FEAT_8000_0001_ECX] =
CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
+ /* Missing: CPUID_SVM_LBRV */
.features[FEAT_SVM] =
- CPUID_SVM_NPT | CPUID_SVM_LBRV,
+ CPUID_SVM_NPT,
.xlevel = 0x8000001A,
.model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
},
.family = 6,
.model = 15,
.stepping = 11,
+ /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
.features[FEAT_1_EDX] =
PPRO_FEATURES |
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
- CPUID_PSE36 | CPUID_VME | CPUID_DTS | CPUID_ACPI | CPUID_SS |
- CPUID_HT | CPUID_TM | CPUID_PBE,
+ CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
+ /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
+ * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
.features[FEAT_1_ECX] =
CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
- CPUID_EXT_DTES64 | CPUID_EXT_DSCPL | CPUID_EXT_VMX | CPUID_EXT_EST |
- CPUID_EXT_TM2 | CPUID_EXT_CX16 | CPUID_EXT_XTPR | CPUID_EXT_PDCM,
+ CPUID_EXT_VMX | CPUID_EXT_CX16,
.features[FEAT_8000_0001_EDX] =
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
.features[FEAT_8000_0001_ECX] =
.family = 6,
.model = 14,
.stepping = 8,
+ /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
.features[FEAT_1_EDX] =
PPRO_FEATURES | CPUID_VME |
- CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_DTS | CPUID_ACPI |
- CPUID_SS | CPUID_HT | CPUID_TM | CPUID_PBE,
+ CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
+ CPUID_SS,
+ /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
+ * CPUID_EXT_PDCM */
.features[FEAT_1_ECX] =
- CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_VMX |
- CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR | CPUID_EXT_PDCM,
+ CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_VMX,
.features[FEAT_8000_0001_EDX] =
CPUID_EXT2_NX,
.xlevel = 0x80000008,
.family = 6,
.model = 28,
.stepping = 2,
+ /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
.features[FEAT_1_EDX] =
PPRO_FEATURES |
- CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | CPUID_DTS |
- CPUID_ACPI | CPUID_SS | CPUID_HT | CPUID_TM | CPUID_PBE,
+ CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
+ CPUID_ACPI | CPUID_SS,
/* Some CPUs got no CPUID_SEP */
+ /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
+ * CPUID_EXT_XTPR */
.features[FEAT_1_ECX] =
CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
- CPUID_EXT_DSCPL | CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR |
CPUID_EXT_MOVBE,
.features[FEAT_8000_0001_EDX] =
(PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
}
}
+static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
+ bool migratable_only);
+
#ifdef CONFIG_KVM
static int cpu_x86_fill_model_id(char *str)
dc->props = host_x86_cpu_properties;
}
-static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
- bool migratable_only);
-
static void host_x86_cpu_initfn(Object *obj)
{
X86CPU *cpu = X86_CPU(obj);
CPUX86State *env = &cpu->env;
KVMState *s = kvm_state;
- FeatureWord w;
assert(kvm_enabled());
+ /* We can't fill the features array here because we don't know yet if
+ * "migratable" is true or false.
+ */
+ cpu->host_features = true;
+
env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
- for (w = 0; w < FEATURE_WORDS; w++) {
- env->features[w] =
- x86_cpu_get_supported_feature_word(w, cpu->migratable);
- }
object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
}
if (value < min || value > max) {
error_setg(errp, "Property %s.%s doesn't take value %" PRId64
- " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
- object_get_typename(obj), name ? name : "null",
- value, min, max);
+ " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
+ object_get_typename(obj), name ? name : "null",
+ value, min, max);
return;
}
cpu->hyperv_spinlock_attempts = value;
while (featurestr) {
char *val;
if (featurestr[0] == '+') {
- add_flagname_to_bitmaps(featurestr + 1, plus_features);
+ add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
} else if (featurestr[0] == '-') {
- add_flagname_to_bitmaps(featurestr + 1, minus_features);
+ add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
} else if ((val = strchr(featurestr, '='))) {
*val = 0; val++;
feat2prop(featurestr);
}
if (numvalue < min) {
error_report("hv-spinlocks value shall always be >= 0x%x"
- ", fixup will be removed in future versions",
- min);
+ ", fixup will be removed in future versions",
+ min);
numvalue = min;
}
snprintf(num, sizeof(num), "%" PRId32, numvalue);
featurestr = strtok(NULL, ",");
}
+ if (cpu->host_features) {
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ env->features[w] =
+ x86_cpu_get_supported_feature_word(w, cpu->migratable);
+ }
+ }
+
for (w = 0; w < FEATURE_WORDS; w++) {
env->features[w] |= plus_features[w];
env->features[w] &= ~minus_features[w];
for (i = 0; i < 8; i++) {
env->fptags[i] = 1;
}
- env->fpuc = 0x37f;
+ cpu_set_fpuc(env, 0x37f);
env->mxcsr = 0x1f80;
env->xstate_bv = XSTATE_FP | XSTATE_SSE;
env->xcr0 = 1;
+ /*
+ * SDM 11.11.5 requires:
+ * - IA32_MTRR_DEF_TYPE MSR.E = 0
+ * - IA32_MTRR_PHYSMASKn.V = 0
+ * All other bits are undefined. For simplification, zero it all.
+ */
+ env->mtrr_deftype = 0;
+ memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
+ memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
+
#if !defined(CONFIG_USER_ONLY)
/* We hard-wire the BSP to the first CPU. */
if (s->cpu_index == 0) {
}
#endif
+
+#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
+ (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
+ (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
+#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
+ (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
+ (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
{
CPUState *cs = CPU(dev);
X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
CPUX86State *env = &cpu->env;
Error *local_err = NULL;
+ static bool ht_warned;
if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
env->cpuid_level = 7;
/* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
* CPUID[1].EDX.
*/
- if (env->cpuid_vendor1 == CPUID_VENDOR_AMD_1 &&
- env->cpuid_vendor2 == CPUID_VENDOR_AMD_2 &&
- env->cpuid_vendor3 == CPUID_VENDOR_AMD_3) {
+ if (IS_AMD_CPU(env)) {
env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
& CPUID_EXT2_AMD_ALIASES);
mce_init(cpu);
qemu_init_vcpu(cs);
+ /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
+ * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
+ * based on inputs (sockets,cores,threads), it is still better to gives
+ * users a warning.
+ *
+ * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
+ * cs->nr_threads hasn't be populated yet and the checking is incorrect.
+ */
+ if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
+ error_report("AMD CPU doesn't support hyperthreading. Please configure"
+ " -smp options properly.");
+ ht_warned = true;
+ }
+
x86_cpu_apic_realize(cpu, &local_err);
if (local_err != NULL) {
goto out;
if (tcg_enabled() && !inited) {
inited = 1;
optimize_flags_init();
-#ifndef CONFIG_USER_ONLY
- cpu_set_debug_excp_handler(breakpoint_handler);
-#endif
}
}
cc->parse_features = x86_cpu_parse_featurestr;
cc->has_work = x86_cpu_has_work;
cc->do_interrupt = x86_cpu_do_interrupt;
+ cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
cc->dump_state = x86_cpu_dump_state;
cc->set_pc = x86_cpu_set_pc;
cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
cc->vmsd = &vmstate_x86_cpu;
#endif
cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
+#ifndef CONFIG_USER_ONLY
+ cc->debug_excp_handler = breakpoint_handler;
+#endif
+ cc->cpu_exec_enter = x86_cpu_exec_enter;
+ cc->cpu_exec_exit = x86_cpu_exec_exit;
}
static const TypeInfo x86_cpu_type_info = {