#define CPUID_2_L1D_32KB_8WAY_64B 0x2c
#define CPUID_2_L1I_32KB_8WAY_64B 0x30
#define CPUID_2_L2_2MB_8WAY_64B 0x7d
+#define CPUID_2_L3_16MB_16WAY_64B 0x4d
/* CPUID Leaf 4 constants: */
#define L2_LINES_PER_TAG 1
#define L2_SIZE_KB_AMD 512
-/* No L3 cache: */
+/* Level 3 unified cache: */
#define L3_SIZE_KB 0 /* disabled */
#define L3_ASSOCIATIVITY 0 /* disabled */
#define L3_LINES_PER_TAG 0 /* disabled */
#define L3_LINE_SIZE 0 /* disabled */
+#define L3_N_LINE_SIZE 64
+#define L3_N_ASSOCIATIVITY 16
+#define L3_N_SETS 16384
+#define L3_N_PARTITIONS 1
+#define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
+#define L3_N_LINES_PER_TAG 1
+#define L3_N_SIZE_KB_AMD 16384
/* TLB definitions: */
dst[CPUID_VENDOR_SZ] = '\0';
}
-/* feature flags taken from "Intel Processor Identification and the CPUID
- * Instruction" and AMD's "CPUID Specification". In cases of disagreement
- * between feature naming conventions, aliases may be added.
- */
-static const char *feature_name[] = {
- "fpu", "vme", "de", "pse",
- "tsc", "msr", "pae", "mce",
- "cx8", "apic", NULL, "sep",
- "mtrr", "pge", "mca", "cmov",
- "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
- NULL, "ds" /* Intel dts */, "acpi", "mmx",
- "fxsr", "sse", "sse2", "ss",
- "ht" /* Intel htt */, "tm", "ia64", "pbe",
-};
-static const char *ext_feature_name[] = {
- "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
- "ds_cpl", "vmx", "smx", "est",
- "tm2", "ssse3", "cid", NULL,
- "fma", "cx16", "xtpr", "pdcm",
- NULL, "pcid", "dca", "sse4.1|sse4_1",
- "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
- "tsc-deadline", "aes", "xsave", "osxsave",
- "avx", "f16c", "rdrand", "hypervisor",
-};
-/* Feature names that are already defined on feature_name[] but are set on
- * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
- * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
- * if and only if CPU vendor is AMD.
- */
-static const char *ext2_feature_name[] = {
- NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
- NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
- NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
- NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
- NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
- "nx|xd", NULL, "mmxext", NULL /* mmx */,
- NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
- NULL, "lm|i64", "3dnowext", "3dnow",
-};
-static const char *ext3_feature_name[] = {
- "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
- "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
- "3dnowprefetch", "osvw", "ibs", "xop",
- "skinit", "wdt", NULL, "lwp",
- "fma4", "tce", NULL, "nodeid_msr",
- NULL, "tbm", "topoext", "perfctr_core",
- "perfctr_nb", NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
-};
-
-static const char *ext4_feature_name[] = {
- NULL, NULL, "xstore", "xstore-en",
- NULL, NULL, "xcrypt", "xcrypt-en",
- "ace2", "ace2-en", "phe", "phe-en",
- "pmm", "pmm-en", NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
-};
-
-static const char *kvm_feature_name[] = {
- "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
- "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- "kvmclock-stable-bit", NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
-};
-
-static const char *svm_feature_name[] = {
- "npt", "lbrv", "svm_lock", "nrip_save",
- "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
- NULL, NULL, "pause_filter", NULL,
- "pfthreshold", NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
-};
-
-static const char *cpuid_7_0_ebx_feature_name[] = {
- "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
- "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
- "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
- "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
-};
-
-static const char *cpuid_7_0_ecx_feature_name[] = {
- NULL, NULL, NULL, "pku",
- "ospke", NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
-};
-
-static const char *cpuid_apm_edx_feature_name[] = {
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- "invtsc", NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
-};
-
-static const char *cpuid_xsave_feature_name[] = {
- "xsaveopt", "xsavec", "xgetbv1", "xsaves",
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
-};
-
-static const char *cpuid_6_feature_name[] = {
- NULL, NULL, "arat", NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
-};
-
#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
typedef struct FeatureWordInfo {
- const char **feat_names;
+ /* feature flags names are taken from "Intel Processor Identification and
+ * the CPUID Instruction" and AMD's "CPUID Specification".
+ * In cases of disagreement between feature naming conventions,
+ * aliases may be added.
+ */
+ const char *feat_names[32];
uint32_t cpuid_eax; /* Input EAX for CPUID */
bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
uint32_t cpuid_ecx; /* Input ECX value for CPUID */
static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
[FEAT_1_EDX] = {
- .feat_names = feature_name,
+ .feat_names = {
+ "fpu", "vme", "de", "pse",
+ "tsc", "msr", "pae", "mce",
+ "cx8", "apic", NULL, "sep",
+ "mtrr", "pge", "mca", "cmov",
+ "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
+ NULL, "ds" /* Intel dts */, "acpi", "mmx",
+ "fxsr", "sse", "sse2", "ss",
+ "ht" /* Intel htt */, "tm", "ia64", "pbe",
+ },
.cpuid_eax = 1, .cpuid_reg = R_EDX,
.tcg_features = TCG_FEATURES,
},
[FEAT_1_ECX] = {
- .feat_names = ext_feature_name,
+ .feat_names = {
+ "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
+ "ds_cpl", "vmx", "smx", "est",
+ "tm2", "ssse3", "cid", NULL,
+ "fma", "cx16", "xtpr", "pdcm",
+ NULL, "pcid", "dca", "sse4.1|sse4_1",
+ "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
+ "tsc-deadline", "aes", "xsave", "osxsave",
+ "avx", "f16c", "rdrand", "hypervisor",
+ },
.cpuid_eax = 1, .cpuid_reg = R_ECX,
.tcg_features = TCG_EXT_FEATURES,
},
+ /* Feature names that are already defined on feature_name[] but
+ * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
+ * names on feat_names below. They are copied automatically
+ * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
+ */
[FEAT_8000_0001_EDX] = {
- .feat_names = ext2_feature_name,
+ .feat_names = {
+ NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
+ NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
+ NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
+ NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
+ NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
+ "nx|xd", NULL, "mmxext", NULL /* mmx */,
+ NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb", "rdtscp",
+ NULL, "lm|i64", "3dnowext", "3dnow",
+ },
.cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
.tcg_features = TCG_EXT2_FEATURES,
},
[FEAT_8000_0001_ECX] = {
- .feat_names = ext3_feature_name,
+ .feat_names = {
+ "lahf_lm", "cmp_legacy", "svm", "extapic",
+ "cr8legacy", "abm", "sse4a", "misalignsse",
+ "3dnowprefetch", "osvw", "ibs", "xop",
+ "skinit", "wdt", NULL, "lwp",
+ "fma4", "tce", NULL, "nodeid_msr",
+ NULL, "tbm", "topoext", "perfctr_core",
+ "perfctr_nb", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
.cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
.tcg_features = TCG_EXT3_FEATURES,
},
[FEAT_C000_0001_EDX] = {
- .feat_names = ext4_feature_name,
+ .feat_names = {
+ NULL, NULL, "xstore", "xstore-en",
+ NULL, NULL, "xcrypt", "xcrypt-en",
+ "ace2", "ace2-en", "phe", "phe-en",
+ "pmm", "pmm-en", NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
.cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
.tcg_features = TCG_EXT4_FEATURES,
},
[FEAT_KVM] = {
- .feat_names = kvm_feature_name,
+ .feat_names = {
+ "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
+ "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ "kvmclock-stable-bit", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
.cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
.tcg_features = TCG_KVM_FEATURES,
},
+ [FEAT_HYPERV_EAX] = {
+ .feat_names = {
+ NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
+ NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
+ NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
+ NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
+ NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
+ NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
+ },
+ [FEAT_HYPERV_EBX] = {
+ .feat_names = {
+ NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
+ NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
+ NULL /* hv_post_messages */, NULL /* hv_signal_events */,
+ NULL /* hv_create_port */, NULL /* hv_connect_port */,
+ NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
+ NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
+ NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
+ },
+ [FEAT_HYPERV_EDX] = {
+ .feat_names = {
+ NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
+ NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
+ NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
+ NULL, NULL,
+ NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
+ },
[FEAT_SVM] = {
- .feat_names = svm_feature_name,
+ .feat_names = {
+ "npt", "lbrv", "svm_lock", "nrip_save",
+ "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
+ NULL, NULL, "pause_filter", NULL,
+ "pfthreshold", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
.cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
.tcg_features = TCG_SVM_FEATURES,
},
[FEAT_7_0_EBX] = {
- .feat_names = cpuid_7_0_ebx_feature_name,
+ .feat_names = {
+ "fsgsbase", "tsc_adjust", NULL, "bmi1",
+ "hle", "avx2", NULL, "smep",
+ "bmi2", "erms", "invpcid", "rtm",
+ NULL, NULL, "mpx", NULL,
+ "avx512f", "avx512dq", "rdseed", "adx",
+ "smap", "avx512ifma", "pcommit", "clflushopt",
+ "clwb", NULL, "avx512pf", "avx512er",
+ "avx512cd", NULL, "avx512bw", "avx512vl",
+ },
.cpuid_eax = 7,
.cpuid_needs_ecx = true, .cpuid_ecx = 0,
.cpuid_reg = R_EBX,
.tcg_features = TCG_7_0_EBX_FEATURES,
},
[FEAT_7_0_ECX] = {
- .feat_names = cpuid_7_0_ecx_feature_name,
+ .feat_names = {
+ NULL, "avx512vbmi", "umip", "pku",
+ "ospke", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, "rdpid", NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
.cpuid_eax = 7,
.cpuid_needs_ecx = true, .cpuid_ecx = 0,
.cpuid_reg = R_ECX,
.tcg_features = TCG_7_0_ECX_FEATURES,
},
[FEAT_8000_0007_EDX] = {
- .feat_names = cpuid_apm_edx_feature_name,
+ .feat_names = {
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ "invtsc", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
.cpuid_eax = 0x80000007,
.cpuid_reg = R_EDX,
.tcg_features = TCG_APM_FEATURES,
.unmigratable_flags = CPUID_APM_INVTSC,
},
[FEAT_XSAVE] = {
- .feat_names = cpuid_xsave_feature_name,
+ .feat_names = {
+ "xsaveopt", "xsavec", "xgetbv1", "xsaves",
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
.cpuid_eax = 0xd,
.cpuid_needs_ecx = true, .cpuid_ecx = 1,
.cpuid_reg = R_EAX,
.tcg_features = TCG_XSAVE_FEATURES,
},
[FEAT_6_EAX] = {
- .feat_names = cpuid_6_feature_name,
+ .feat_names = {
+ NULL, NULL, "arat", NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
.cpuid_eax = 6, .cpuid_reg = R_EAX,
.tcg_features = TCG_6_EAX_FEATURES,
},
};
#undef REGISTER
-const ExtSaveArea x86_ext_save_areas[] = {
+typedef struct ExtSaveArea {
+ uint32_t feature, bits;
+ uint32_t offset, size;
+} ExtSaveArea;
+
+static const ExtSaveArea x86_ext_save_areas[] = {
[XSTATE_YMM_BIT] =
{ .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
.offset = offsetof(X86XSaveArea, avx_state),
.size = sizeof(XSavePKRU) },
};
+static uint32_t xsave_area_size(uint64_t mask)
+{
+ int i;
+ uint64_t ret = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader);
+
+ for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
+ const ExtSaveArea *esa = &x86_ext_save_areas[i];
+ if ((mask >> i) & 1) {
+ ret = MAX(ret, esa->offset + esa->size);
+ }
+ }
+ return ret;
+}
+
const char *get_register_name_32(unsigned int reg)
{
if (reg >= CPU_NB_REGS32) {
FeatureWord w;
for (w = 0; w < FEATURE_WORDS; w++) {
FeatureWordInfo *wi = &feature_word_info[w];
- if (wi->feat_names &&
- lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
+ if (lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
break;
}
}
const char *name;
uint32_t level;
uint32_t xlevel;
- uint32_t xlevel2;
/* vendor is zero-terminated, 12 character ASCII string */
char vendor[CPUID_VENDOR_SZ + 1];
int family;
#ifdef CONFIG_KVM
+static bool lmce_supported(void)
+{
+ uint64_t mce_cap;
+
+ if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
+ return false;
+ }
+
+ return !!(mce_cap & MCG_LMCE_P);
+}
+
static int cpu_x86_fill_model_id(char *str)
{
uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
/* If KVM is disabled, x86_cpu_realizefn() will report an error later */
if (kvm_enabled()) {
- env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
- env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
- env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
+ env->cpuid_min_level =
+ kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
+ env->cpuid_min_xlevel =
+ kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
+ env->cpuid_min_xlevel2 =
+ kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
+
+ if (lmce_supported()) {
+ object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
+ }
}
object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
}
-static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
- void *opaque, Error **errp)
-{
- X86CPU *cpu = X86_CPU(obj);
- int64_t value = cpu->apic_id;
-
- visit_type_int(v, name, &value, errp);
-}
-
-static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
- void *opaque, Error **errp)
-{
- X86CPU *cpu = X86_CPU(obj);
- DeviceState *dev = DEVICE(obj);
- const int64_t min = 0;
- const int64_t max = UINT32_MAX;
- Error *error = NULL;
- int64_t value;
-
- if (dev->realized) {
- error_setg(errp, "Attempt to set property '%s' on '%s' after "
- "it was realized", name, object_get_typename(obj));
- return;
- }
-
- visit_type_int(v, name, &value, &error);
- if (error) {
- error_propagate(errp, error);
- return;
- }
- if (value < min || value > max) {
- error_setg(errp, "Property %s.%s doesn't take value %" PRId64
- " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
- object_get_typename(obj), name, value, min, max);
- return;
- }
-
- if ((value != cpu->apic_id) && cpu_exists(value)) {
- error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
- return;
- }
- cpu->apic_id = value;
-}
-
/* Generic getter for "feature-words" and "filtered-features" properties */
static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
const char *name, void *opaque,
/* Parse "+feature,-feature,feature=foo" CPU feature string
*/
-static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
+static void x86_cpu_parse_featurestr(const char *typename, char *features,
Error **errp)
{
- X86CPU *cpu = X86_CPU(cs);
char *featurestr; /* Single 'key=value" string being parsed */
Error *local_err = NULL;
+ static bool cpu_globals_initialized;
+
+ if (cpu_globals_initialized) {
+ return;
+ }
+ cpu_globals_initialized = true;
if (!features) {
return;
const char *val = NULL;
char *eq = NULL;
char num[32];
+ GlobalProperty *prop;
/* Compatibility syntax: */
if (featurestr[0] == '+') {
name = "tsc-frequency";
}
- object_property_parse(OBJECT(cpu), val, name, &local_err);
+ prop = g_new0(typeof(*prop), 1);
+ prop->driver = typename;
+ prop->property = g_strdup(name);
+ prop->value = g_strdup(val);
+ prop->errp = &error_fatal;
+ qdev_prop_register_global(prop);
}
if (local_err) {
char host_vendor[CPUID_VENDOR_SZ + 1];
FeatureWord w;
- object_property_set_int(OBJECT(cpu), def->level, "level", errp);
+ /* CPU models only set _minimum_ values for level/xlevel: */
+ object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
+ object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
+
object_property_set_int(OBJECT(cpu), def->family, "family", errp);
object_property_set_int(OBJECT(cpu), def->model, "model", errp);
object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
- object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
- object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
for (w = 0; w < FEATURE_WORDS; w++) {
env->features[w] = def->features[w];
}
-X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
-{
- X86CPU *cpu = NULL;
- ObjectClass *oc;
- gchar **model_pieces;
- char *name, *features;
- Error *error = NULL;
-
- model_pieces = g_strsplit(cpu_model, ",", 2);
- if (!model_pieces[0]) {
- error_setg(&error, "Invalid/empty CPU model name");
- goto out;
- }
- name = model_pieces[0];
- features = model_pieces[1];
-
- oc = x86_cpu_class_by_name(name);
- if (oc == NULL) {
- error_setg(&error, "Unable to find CPU definition: %s", name);
- goto out;
- }
-
- cpu = X86_CPU(object_new(object_class_get_name(oc)));
-
- x86_cpu_parse_featurestr(CPU(cpu), features, &error);
- if (error) {
- goto out;
- }
-
-out:
- if (error != NULL) {
- error_propagate(errp, error);
- if (cpu) {
- object_unref(OBJECT(cpu));
- cpu = NULL;
- }
- }
- g_strfreev(model_pieces);
- return cpu;
-}
-
X86CPU *cpu_x86_init(const char *cpu_model)
{
return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
{
X86CPU *cpu = x86_env_get_cpu(env);
CPUState *cs = CPU(cpu);
+ uint32_t pkg_offset;
/* test if maximum index reached */
if (index & 0x80000000) {
}
*eax = 1; /* Number of CPUID[EAX=2] calls required */
*ebx = 0;
- *ecx = 0;
+ if (!cpu->enable_l3_cache) {
+ *ecx = 0;
+ } else {
+ *ecx = L3_N_DESCRIPTOR;
+ }
*edx = (L1D_DESCRIPTOR << 16) | \
(L1I_DESCRIPTOR << 8) | \
(L2_DESCRIPTOR);
*ecx = L2_SETS - 1;
*edx = CPUID_4_NO_INVD_SHARING;
break;
+ case 3: /* L3 cache info */
+ if (!cpu->enable_l3_cache) {
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ break;
+ }
+ *eax |= CPUID_4_TYPE_UNIFIED | \
+ CPUID_4_LEVEL(3) | \
+ CPUID_4_SELF_INIT_LEVEL;
+ pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
+ *eax |= ((1 << pkg_offset) - 1) << 14;
+ *ebx = (L3_N_LINE_SIZE - 1) | \
+ ((L3_N_PARTITIONS - 1) << 12) | \
+ ((L3_N_ASSOCIATIVITY - 1) << 22);
+ *ecx = L3_N_SETS - 1;
+ *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
+ break;
default: /* end of info */
*eax = 0;
*ebx = 0;
switch (count) {
case 0:
- *eax = apicid_core_offset(smp_cores, smp_threads);
- *ebx = smp_threads;
+ *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
+ *ebx = cs->nr_threads;
*ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
break;
case 1:
- *eax = apicid_pkg_offset(smp_cores, smp_threads);
- *ebx = smp_cores * smp_threads;
+ *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
+ *ebx = cs->nr_cores * cs->nr_threads;
*ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
break;
default:
*ebx &= 0xffff; /* The count doesn't need to be reliable. */
break;
case 0xD: {
- KVMState *s = cs->kvm_state;
uint64_t ena_mask;
int i;
if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
break;
}
+
+ ena_mask = (XSTATE_FP_MASK | XSTATE_SSE_MASK);
+ for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
+ const ExtSaveArea *esa = &x86_ext_save_areas[i];
+ if (env->features[esa->feature] & esa->bits) {
+ ena_mask |= (1ULL << i);
+ }
+ }
+
if (kvm_enabled()) {
- ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
- ena_mask <<= 32;
- ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
- } else {
- ena_mask = -1;
+ KVMState *s = cs->kvm_state;
+ uint64_t kvm_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
+ kvm_mask <<= 32;
+ kvm_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
+ ena_mask &= kvm_mask;
}
if (count == 0) {
- *ecx = 0x240;
- for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
- const ExtSaveArea *esa = &x86_ext_save_areas[i];
- if ((env->features[esa->feature] & esa->bits) == esa->bits
- && ((ena_mask >> i) & 1) != 0) {
- if (i < 32) {
- *eax |= 1u << i;
- } else {
- *edx |= 1u << (i - 32);
- }
- *ecx = MAX(*ecx, esa->offset + esa->size);
- }
- }
- *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
+ *ecx = xsave_area_size(ena_mask);;
+ *eax = ena_mask;
+ *edx = ena_mask >> 32;
*ebx = *ecx;
} else if (count == 1) {
*eax = env->features[FEAT_XSAVE];
} else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
const ExtSaveArea *esa = &x86_ext_save_areas[count];
- if ((env->features[esa->feature] & esa->bits) == esa->bits
- && ((ena_mask >> count) & 1) != 0) {
+ if ((ena_mask >> count) & 1) {
*eax = esa->size;
*ebx = esa->offset;
}
*ecx = (L2_SIZE_KB_AMD << 16) | \
(AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
(L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
- *edx = ((L3_SIZE_KB/512) << 18) | \
- (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
- (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
+ if (!cpu->enable_l3_cache) {
+ *edx = ((L3_SIZE_KB / 512) << 18) | \
+ (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
+ (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
+ } else {
+ *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
+ (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
+ (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
+ }
break;
case 0x80000007:
*eax = 0;
break;
case 0x80000008:
/* virtual & phys address size in low 2 bytes. */
-/* XXX: This value must match the one used in the MMU code. */
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
- /* 64 bit processor */
-/* XXX: The physical address space is limited to 42 bits in exec.c. */
- *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
+ /* 64 bit processor, 48 bits virtual, configurable
+ * physical bits.
+ */
+ *eax = 0x00003000 + cpu->phys_bits;
} else {
- if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
- *eax = 0x00000024; /* 36 bits physical */
- } else {
- *eax = 0x00000020; /* 32 bits physical */
- }
+ *eax = cpu->phys_bits;
}
*ebx = 0;
*ecx = 0;
xcc->parent_reset(s);
- memset(env, 0, offsetof(CPUX86State, cpuid_level));
+ memset(env, 0, offsetof(CPUX86State, end_reset_fields));
tlb_flush(s, 1);
/* init to reset state */
-#ifdef CONFIG_SOFTMMU
- env->hflags |= HF_SOFTMMU_MASK;
-#endif
env->hflags2 |= HF2_GIF_MASK;
cpu_x86_update_cr0(env, 0x60000010);
}
for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
const ExtSaveArea *esa = &x86_ext_save_areas[i];
- if ((env->features[esa->feature] & esa->bits) == esa->bits) {
+ if (env->features[esa->feature] & esa->bits) {
xcr0 |= 1ull << i;
}
}
if (((cenv->cpuid_version >> 8) & 0xf) >= 6
&& (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
(CPUID_MCE | CPUID_MCA)) {
- cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
+ cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
+ (cpu->enable_lmce ? MCG_LMCE_P : 0);
cenv->mcg_ctl = ~(uint64_t)0;
for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
cenv->mce_banks[bank * 4] = ~(uint64_t)0;
cpu->apic_state = DEVICE(object_new(apic_type));
- object_property_add_child(OBJECT(cpu), "apic",
- OBJECT(cpu->apic_state), NULL);
+ object_property_add_child(OBJECT(cpu), "lapic",
+ OBJECT(cpu->apic_state), &error_abort);
+ object_unref(OBJECT(cpu->apic_state));
+
qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
/* TODO: convert to link<> */
apic = APIC_COMMON(cpu->apic_state);
}
#endif
+/* Note: Only safe for use on x86(-64) hosts */
+static uint32_t x86_host_phys_bits(void)
+{
+ uint32_t eax;
+ uint32_t host_phys_bits;
+
+ host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
+ if (eax >= 0x80000008) {
+ host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
+ /* Note: According to AMD doc 25481 rev 2.34 they have a field
+ * at 23:16 that can specify a maximum physical address bits for
+ * the guest that can override this value; but I've not seen
+ * anything with that set.
+ */
+ host_phys_bits = eax & 0xff;
+ } else {
+ /* It's an odd 64 bit machine that doesn't have the leaf for
+ * physical address bits; fall back to 36 that's most older
+ * Intel.
+ */
+ host_phys_bits = 36;
+ }
+
+ return host_phys_bits;
+}
+
+static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
+{
+ if (*min < value) {
+ *min = value;
+ }
+}
+
+/* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
+static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
+{
+ CPUX86State *env = &cpu->env;
+ FeatureWordInfo *fi = &feature_word_info[w];
+ uint32_t eax = fi->cpuid_eax;
+ uint32_t region = eax & 0xF0000000;
+
+ if (!env->features[w]) {
+ return;
+ }
+
+ switch (region) {
+ case 0x00000000:
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
+ break;
+ case 0x80000000:
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
+ break;
+ case 0xC0000000:
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
+ break;
+ }
+}
#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
(env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
goto out;
}
- if (cpu->apic_id < 0) {
+ if (cpu->apic_id == UNASSIGNED_APIC_ID) {
error_setg(errp, "apic-id property was not initialized properly");
return;
}
cpu->env.features[w] &= ~minus_features[w];
}
- if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
- env->cpuid_level = 7;
+
+ /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
+ x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
+ if (cpu->full_cpuid_auto_level) {
+ x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
+ x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
+ /* SVM requires CPUID[0x8000000A] */
+ if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
+ }
+ }
+
+ /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
+ if (env->cpuid_level == UINT32_MAX) {
+ env->cpuid_level = env->cpuid_min_level;
+ }
+ if (env->cpuid_xlevel == UINT32_MAX) {
+ env->cpuid_xlevel = env->cpuid_min_xlevel;
+ }
+ if (env->cpuid_xlevel2 == UINT32_MAX) {
+ env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
}
if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
& CPUID_EXT2_AMD_ALIASES);
}
+ /* For 64bit systems think about the number of physical bits to present.
+ * ideally this should be the same as the host; anything other than matching
+ * the host can cause incorrect guest behaviour.
+ * QEMU used to pick the magic value of 40 bits that corresponds to
+ * consumer AMD devices but nothing else.
+ */
+ if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
+ if (kvm_enabled()) {
+ uint32_t host_phys_bits = x86_host_phys_bits();
+ static bool warned;
+
+ if (cpu->host_phys_bits) {
+ /* The user asked for us to use the host physical bits */
+ cpu->phys_bits = host_phys_bits;
+ }
+
+ /* Print a warning if the user set it to a value that's not the
+ * host value.
+ */
+ if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
+ !warned) {
+ error_report("Warning: Host physical bits (%u)"
+ " does not match phys-bits property (%u)",
+ host_phys_bits, cpu->phys_bits);
+ warned = true;
+ }
+
+ if (cpu->phys_bits &&
+ (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
+ cpu->phys_bits < 32)) {
+ error_setg(errp, "phys-bits should be between 32 and %u "
+ " (but is %u)",
+ TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
+ return;
+ }
+ } else {
+ if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
+ error_setg(errp, "TCG only supports phys-bits=%u",
+ TCG_PHYS_ADDR_BITS);
+ return;
+ }
+ }
+ /* 0 means it was not explicitly set by the user (or by machine
+ * compat_props or by the host code above). In this case, the default
+ * is the value used by TCG (40).
+ */
+ if (cpu->phys_bits == 0) {
+ cpu->phys_bits = TCG_PHYS_ADDR_BITS;
+ }
+ } else {
+ /* For 32 bit systems don't use the user set value, but keep
+ * phys_bits consistent with what we tell the guest.
+ */
+ if (cpu->phys_bits != 0) {
+ error_setg(errp, "phys-bits is not user-configurable in 32 bit");
+ return;
+ }
+ if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
+ cpu->phys_bits = 36;
+ } else {
+ cpu->phys_bits = 32;
+ }
+ }
cpu_exec_init(cs, &error_abort);
if (tcg_enabled()) {
}
}
+static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
+{
+ X86CPU *cpu = X86_CPU(dev);
+
+#ifndef CONFIG_USER_ONLY
+ cpu_remove_sync(CPU(dev));
+ qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
+#endif
+
+ if (cpu->apic_state) {
+ object_unparent(OBJECT(cpu->apic_state));
+ cpu->apic_state = NULL;
+ }
+}
+
typedef struct BitProperty {
uint32_t *ptr;
uint32_t mask;
char **names;
FeatureWordInfo *fi = &feature_word_info[w];
- if (!fi->feat_names) {
- return;
- }
if (!fi->feat_names[bitnr]) {
return;
}
object_property_add(obj, "tsc-frequency", "int",
x86_cpuid_get_tsc_freq,
x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
- object_property_add(obj, "apic-id", "int",
- x86_cpuid_get_apic_id,
- x86_cpuid_set_apic_id, NULL, NULL, NULL);
object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
x86_cpu_get_feature_words,
NULL, NULL, (void *)env->features, NULL);
cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
-#ifndef CONFIG_USER_ONLY
- /* Any code creating new X86CPU objects have to set apic-id explicitly */
- cpu->apic_id = -1;
-#endif
-
for (w = 0; w < FEATURE_WORDS; w++) {
int bitnr;
}
static Property x86_cpu_properties[] = {
+#ifdef CONFIG_USER_ONLY
+ /* apic_id = 0 by default for *-user, see commit 9886e834 */
+ DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
+ DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
+ DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
+ DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
+#else
+ DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
+ DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
+ DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
+ DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
+#endif
DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
{ .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
- DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
- DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
- DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
+ DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
+ DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
+ DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
+ DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
+ DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
+ DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
+ DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
+ DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
+ DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
+ DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
+ DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
+ DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
DEFINE_PROP_END_OF_LIST()
};
xcc->parent_realize = dc->realize;
dc->realize = x86_cpu_realizefn;
+ dc->unrealize = x86_cpu_unrealizefn;
dc->props = x86_cpu_properties;
xcc->parent_reset = cc->reset;
cc->cpu_exec_enter = x86_cpu_exec_enter;
cc->cpu_exec_exit = x86_cpu_exec_exit;
+ dc->cannot_instantiate_with_device_add_yet = false;
/*
* Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
* object in cpus -> dangling pointer after final object_unref().