2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
29 #include "qemu/error-report.h"
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/arch_init.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #ifndef CONFIG_USER_ONLY
47 #include "exec/address-spaces.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 /* CPUID Leaf 4 constants: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
69 #define CPUID_4_LEVEL(l) ((l) << 5)
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
79 #define ASSOC_FULL 0xFF
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
96 /* Definitions of the hardcoded cache entries we expose: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
140 /* TLB definitions: */
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
173 dst[CPUID_VENDOR_SZ] = '\0';
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
263 NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
266 static const char *cpuid_apm_edx_feature_name[] = {
267 NULL, NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 "invtsc", NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
277 static const char *cpuid_xsave_feature_name[] = {
278 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
279 NULL, NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
288 static const char *cpuid_6_feature_name[] = {
289 NULL, NULL, "arat", NULL,
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
299 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
300 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
301 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
302 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
303 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
304 CPUID_PSE36 | CPUID_FXSR)
305 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
306 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
307 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
308 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
309 CPUID_PAE | CPUID_SEP | CPUID_APIC)
311 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
312 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
313 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
314 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
315 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
316 /* partly implemented:
317 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
319 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
320 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
321 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
322 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
323 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
325 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
326 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
327 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
328 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
329 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
333 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
335 #define TCG_EXT2_X86_64_FEATURES 0
338 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
339 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
340 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
341 TCG_EXT2_X86_64_FEATURES)
342 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
343 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
344 #define TCG_EXT4_FEATURES 0
345 #define TCG_SVM_FEATURES 0
346 #define TCG_KVM_FEATURES 0
347 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
348 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
350 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
351 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
352 CPUID_7_0_EBX_RDSEED */
353 #define TCG_APM_FEATURES 0
354 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
357 typedef struct FeatureWordInfo {
358 const char **feat_names;
359 uint32_t cpuid_eax; /* Input EAX for CPUID */
360 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
361 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
362 int cpuid_reg; /* output register (R_* constant) */
363 uint32_t tcg_features; /* Feature flags supported by TCG */
364 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
367 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
369 .feat_names = feature_name,
370 .cpuid_eax = 1, .cpuid_reg = R_EDX,
371 .tcg_features = TCG_FEATURES,
374 .feat_names = ext_feature_name,
375 .cpuid_eax = 1, .cpuid_reg = R_ECX,
376 .tcg_features = TCG_EXT_FEATURES,
378 [FEAT_8000_0001_EDX] = {
379 .feat_names = ext2_feature_name,
380 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
381 .tcg_features = TCG_EXT2_FEATURES,
383 [FEAT_8000_0001_ECX] = {
384 .feat_names = ext3_feature_name,
385 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
386 .tcg_features = TCG_EXT3_FEATURES,
388 [FEAT_C000_0001_EDX] = {
389 .feat_names = ext4_feature_name,
390 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
391 .tcg_features = TCG_EXT4_FEATURES,
394 .feat_names = kvm_feature_name,
395 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
396 .tcg_features = TCG_KVM_FEATURES,
399 .feat_names = svm_feature_name,
400 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
401 .tcg_features = TCG_SVM_FEATURES,
404 .feat_names = cpuid_7_0_ebx_feature_name,
406 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
408 .tcg_features = TCG_7_0_EBX_FEATURES,
410 [FEAT_8000_0007_EDX] = {
411 .feat_names = cpuid_apm_edx_feature_name,
412 .cpuid_eax = 0x80000007,
414 .tcg_features = TCG_APM_FEATURES,
415 .unmigratable_flags = CPUID_APM_INVTSC,
418 .feat_names = cpuid_xsave_feature_name,
420 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
425 .feat_names = cpuid_6_feature_name,
426 .cpuid_eax = 6, .cpuid_reg = R_EAX,
427 .tcg_features = TCG_6_EAX_FEATURES,
431 typedef struct X86RegisterInfo32 {
432 /* Name of register */
434 /* QAPI enum value register */
435 X86CPURegister32 qapi_enum;
438 #define REGISTER(reg) \
439 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
440 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
452 typedef struct ExtSaveArea {
453 uint32_t feature, bits;
454 uint32_t offset, size;
457 static const ExtSaveArea ext_save_areas[] = {
458 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
459 .offset = 0x240, .size = 0x100 },
460 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
461 .offset = 0x3c0, .size = 0x40 },
462 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
463 .offset = 0x400, .size = 0x40 },
464 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
465 .offset = 0x440, .size = 0x40 },
466 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
467 .offset = 0x480, .size = 0x200 },
468 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
469 .offset = 0x680, .size = 0x400 },
472 const char *get_register_name_32(unsigned int reg)
474 if (reg >= CPU_NB_REGS32) {
477 return x86_reg_info_32[reg].name;
481 * Returns the set of feature flags that are supported and migratable by
482 * QEMU, for a given FeatureWord.
484 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
486 FeatureWordInfo *wi = &feature_word_info[w];
490 for (i = 0; i < 32; i++) {
491 uint32_t f = 1U << i;
492 /* If the feature name is unknown, it is not supported by QEMU yet */
493 if (!wi->feat_names[i]) {
496 /* Skip features known to QEMU, but explicitly marked as unmigratable */
497 if (wi->unmigratable_flags & f) {
505 void host_cpuid(uint32_t function, uint32_t count,
506 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
512 : "=a"(vec[0]), "=b"(vec[1]),
513 "=c"(vec[2]), "=d"(vec[3])
514 : "0"(function), "c"(count) : "cc");
515 #elif defined(__i386__)
516 asm volatile("pusha \n\t"
518 "mov %%eax, 0(%2) \n\t"
519 "mov %%ebx, 4(%2) \n\t"
520 "mov %%ecx, 8(%2) \n\t"
521 "mov %%edx, 12(%2) \n\t"
523 : : "a"(function), "c"(count), "S"(vec)
539 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
541 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
542 * a substring. ex if !NULL points to the first char after a substring,
543 * otherwise the string is assumed to sized by a terminating nul.
544 * Return lexical ordering of *s1:*s2.
546 static int sstrcmp(const char *s1, const char *e1,
547 const char *s2, const char *e2)
550 if (!*s1 || !*s2 || *s1 != *s2)
553 if (s1 == e1 && s2 == e2)
562 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
563 * '|' delimited (possibly empty) strings in which case search for a match
564 * within the alternatives proceeds left to right. Return 0 for success,
565 * non-zero otherwise.
567 static int altcmp(const char *s, const char *e, const char *altstr)
571 for (q = p = altstr; ; ) {
572 while (*p && *p != '|')
574 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
583 /* search featureset for flag *[s..e), if found set corresponding bit in
584 * *pval and return true, otherwise return false
586 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
587 const char **featureset)
593 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
594 if (*ppc && !altcmp(s, e, *ppc)) {
602 static void add_flagname_to_bitmaps(const char *flagname,
603 FeatureWordArray words,
607 for (w = 0; w < FEATURE_WORDS; w++) {
608 FeatureWordInfo *wi = &feature_word_info[w];
609 if (wi->feat_names &&
610 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
614 if (w == FEATURE_WORDS) {
615 error_setg(errp, "CPU feature %s not found", flagname);
619 /* CPU class name definitions: */
621 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
622 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
624 /* Return type name for a given CPU model name
625 * Caller is responsible for freeing the returned string.
627 static char *x86_cpu_type_name(const char *model_name)
629 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
632 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
637 if (cpu_model == NULL) {
641 typename = x86_cpu_type_name(cpu_model);
642 oc = object_class_by_name(typename);
647 struct X86CPUDefinition {
652 /* vendor is zero-terminated, 12 character ASCII string */
653 char vendor[CPUID_VENDOR_SZ + 1];
657 FeatureWordArray features;
661 static X86CPUDefinition builtin_x86_defs[] = {
665 .vendor = CPUID_VENDOR_AMD,
669 .features[FEAT_1_EDX] =
671 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
673 .features[FEAT_1_ECX] =
674 CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
675 .features[FEAT_8000_0001_EDX] =
676 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
677 .features[FEAT_8000_0001_ECX] =
678 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
679 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
680 .xlevel = 0x8000000A,
685 .vendor = CPUID_VENDOR_AMD,
689 /* Missing: CPUID_HT */
690 .features[FEAT_1_EDX] =
692 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
693 CPUID_PSE36 | CPUID_VME,
694 .features[FEAT_1_ECX] =
695 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
697 .features[FEAT_8000_0001_EDX] =
698 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
699 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
700 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
701 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
703 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
704 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
705 .features[FEAT_8000_0001_ECX] =
706 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
707 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
708 /* Missing: CPUID_SVM_LBRV */
709 .features[FEAT_SVM] =
711 .xlevel = 0x8000001A,
712 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
717 .vendor = CPUID_VENDOR_INTEL,
721 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
722 .features[FEAT_1_EDX] =
724 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
725 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
726 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
727 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
728 .features[FEAT_1_ECX] =
729 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
731 .features[FEAT_8000_0001_EDX] =
732 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
733 .features[FEAT_8000_0001_ECX] =
735 .xlevel = 0x80000008,
736 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
741 .vendor = CPUID_VENDOR_INTEL,
745 /* Missing: CPUID_HT */
746 .features[FEAT_1_EDX] =
747 PPRO_FEATURES | CPUID_VME |
748 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
750 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
751 .features[FEAT_1_ECX] =
752 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
753 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
754 .features[FEAT_8000_0001_EDX] =
755 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
756 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
757 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
758 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
759 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
760 .features[FEAT_8000_0001_ECX] =
762 .xlevel = 0x80000008,
763 .model_id = "Common KVM processor"
768 .vendor = CPUID_VENDOR_INTEL,
772 .features[FEAT_1_EDX] =
774 .features[FEAT_1_ECX] =
775 CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
776 .xlevel = 0x80000004,
781 .vendor = CPUID_VENDOR_INTEL,
785 .features[FEAT_1_EDX] =
786 PPRO_FEATURES | CPUID_VME |
787 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
788 .features[FEAT_1_ECX] =
790 .features[FEAT_8000_0001_ECX] =
792 .xlevel = 0x80000008,
793 .model_id = "Common 32-bit KVM processor"
798 .vendor = CPUID_VENDOR_INTEL,
802 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
803 .features[FEAT_1_EDX] =
804 PPRO_FEATURES | CPUID_VME |
805 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
807 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
808 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
809 .features[FEAT_1_ECX] =
810 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
811 .features[FEAT_8000_0001_EDX] =
813 .xlevel = 0x80000008,
814 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
819 .vendor = CPUID_VENDOR_INTEL,
823 .features[FEAT_1_EDX] =
830 .vendor = CPUID_VENDOR_INTEL,
834 .features[FEAT_1_EDX] =
841 .vendor = CPUID_VENDOR_INTEL,
845 .features[FEAT_1_EDX] =
852 .vendor = CPUID_VENDOR_INTEL,
856 .features[FEAT_1_EDX] =
863 .vendor = CPUID_VENDOR_AMD,
867 .features[FEAT_1_EDX] =
868 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
870 .features[FEAT_8000_0001_EDX] =
871 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
872 .xlevel = 0x80000008,
877 .vendor = CPUID_VENDOR_INTEL,
881 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
882 .features[FEAT_1_EDX] =
884 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
885 CPUID_ACPI | CPUID_SS,
886 /* Some CPUs got no CPUID_SEP */
887 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
889 .features[FEAT_1_ECX] =
890 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
892 .features[FEAT_8000_0001_EDX] =
894 .features[FEAT_8000_0001_ECX] =
896 .xlevel = 0x80000008,
897 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
902 .vendor = CPUID_VENDOR_INTEL,
906 .features[FEAT_1_EDX] =
907 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
908 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
909 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
910 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
911 CPUID_DE | CPUID_FP87,
912 .features[FEAT_1_ECX] =
913 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
914 .features[FEAT_8000_0001_EDX] =
915 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
916 .features[FEAT_8000_0001_ECX] =
918 .xlevel = 0x80000008,
919 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
924 .vendor = CPUID_VENDOR_INTEL,
928 .features[FEAT_1_EDX] =
929 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
930 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
931 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
932 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
933 CPUID_DE | CPUID_FP87,
934 .features[FEAT_1_ECX] =
935 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
937 .features[FEAT_8000_0001_EDX] =
938 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
939 .features[FEAT_8000_0001_ECX] =
941 .xlevel = 0x80000008,
942 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
947 .vendor = CPUID_VENDOR_INTEL,
951 .features[FEAT_1_EDX] =
952 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
953 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
954 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
955 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
956 CPUID_DE | CPUID_FP87,
957 .features[FEAT_1_ECX] =
958 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
959 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
960 .features[FEAT_8000_0001_EDX] =
961 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
962 .features[FEAT_8000_0001_ECX] =
964 .xlevel = 0x80000008,
965 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
970 .vendor = CPUID_VENDOR_INTEL,
974 .features[FEAT_1_EDX] =
975 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
976 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
977 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
978 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
979 CPUID_DE | CPUID_FP87,
980 .features[FEAT_1_ECX] =
981 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
982 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
983 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
984 .features[FEAT_8000_0001_EDX] =
985 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
986 .features[FEAT_8000_0001_ECX] =
988 .features[FEAT_6_EAX] =
990 .xlevel = 0x80000008,
991 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
994 .name = "SandyBridge",
996 .vendor = CPUID_VENDOR_INTEL,
1000 .features[FEAT_1_EDX] =
1001 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1002 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1003 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1004 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1005 CPUID_DE | CPUID_FP87,
1006 .features[FEAT_1_ECX] =
1007 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1008 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1009 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1010 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1012 .features[FEAT_8000_0001_EDX] =
1013 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1015 .features[FEAT_8000_0001_ECX] =
1017 .features[FEAT_XSAVE] =
1018 CPUID_XSAVE_XSAVEOPT,
1019 .features[FEAT_6_EAX] =
1021 .xlevel = 0x80000008,
1022 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1025 .name = "IvyBridge",
1027 .vendor = CPUID_VENDOR_INTEL,
1031 .features[FEAT_1_EDX] =
1032 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1033 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1034 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1035 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1036 CPUID_DE | CPUID_FP87,
1037 .features[FEAT_1_ECX] =
1038 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1039 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1040 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1041 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1042 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1043 .features[FEAT_7_0_EBX] =
1044 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1046 .features[FEAT_8000_0001_EDX] =
1047 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1049 .features[FEAT_8000_0001_ECX] =
1051 .features[FEAT_XSAVE] =
1052 CPUID_XSAVE_XSAVEOPT,
1053 .features[FEAT_6_EAX] =
1055 .xlevel = 0x80000008,
1056 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1059 .name = "Haswell-noTSX",
1061 .vendor = CPUID_VENDOR_INTEL,
1065 .features[FEAT_1_EDX] =
1066 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1067 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1068 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1069 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1070 CPUID_DE | CPUID_FP87,
1071 .features[FEAT_1_ECX] =
1072 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1073 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1074 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1075 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1076 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1077 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1078 .features[FEAT_8000_0001_EDX] =
1079 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1081 .features[FEAT_8000_0001_ECX] =
1082 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1083 .features[FEAT_7_0_EBX] =
1084 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1085 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1086 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1087 .features[FEAT_XSAVE] =
1088 CPUID_XSAVE_XSAVEOPT,
1089 .features[FEAT_6_EAX] =
1091 .xlevel = 0x80000008,
1092 .model_id = "Intel Core Processor (Haswell, no TSX)",
1096 .vendor = CPUID_VENDOR_INTEL,
1100 .features[FEAT_1_EDX] =
1101 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1102 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1103 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1104 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1105 CPUID_DE | CPUID_FP87,
1106 .features[FEAT_1_ECX] =
1107 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1108 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1109 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1110 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1111 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1112 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1113 .features[FEAT_8000_0001_EDX] =
1114 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1116 .features[FEAT_8000_0001_ECX] =
1117 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1118 .features[FEAT_7_0_EBX] =
1119 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1120 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1121 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1123 .features[FEAT_XSAVE] =
1124 CPUID_XSAVE_XSAVEOPT,
1125 .features[FEAT_6_EAX] =
1127 .xlevel = 0x80000008,
1128 .model_id = "Intel Core Processor (Haswell)",
1131 .name = "Broadwell-noTSX",
1133 .vendor = CPUID_VENDOR_INTEL,
1137 .features[FEAT_1_EDX] =
1138 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1139 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1140 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1141 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1142 CPUID_DE | CPUID_FP87,
1143 .features[FEAT_1_ECX] =
1144 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1145 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1146 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1147 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1148 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1149 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1150 .features[FEAT_8000_0001_EDX] =
1151 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1153 .features[FEAT_8000_0001_ECX] =
1154 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1155 .features[FEAT_7_0_EBX] =
1156 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1157 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1158 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1159 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1161 .features[FEAT_XSAVE] =
1162 CPUID_XSAVE_XSAVEOPT,
1163 .features[FEAT_6_EAX] =
1165 .xlevel = 0x80000008,
1166 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1169 .name = "Broadwell",
1171 .vendor = CPUID_VENDOR_INTEL,
1175 .features[FEAT_1_EDX] =
1176 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1177 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1178 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1179 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1180 CPUID_DE | CPUID_FP87,
1181 .features[FEAT_1_ECX] =
1182 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1183 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1184 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1185 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1186 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1187 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1188 .features[FEAT_8000_0001_EDX] =
1189 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1191 .features[FEAT_8000_0001_ECX] =
1192 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1193 .features[FEAT_7_0_EBX] =
1194 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1195 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1196 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1197 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1199 .features[FEAT_XSAVE] =
1200 CPUID_XSAVE_XSAVEOPT,
1201 .features[FEAT_6_EAX] =
1203 .xlevel = 0x80000008,
1204 .model_id = "Intel Core Processor (Broadwell)",
1207 .name = "Opteron_G1",
1209 .vendor = CPUID_VENDOR_AMD,
1213 .features[FEAT_1_EDX] =
1214 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1215 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1216 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1217 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1218 CPUID_DE | CPUID_FP87,
1219 .features[FEAT_1_ECX] =
1221 .features[FEAT_8000_0001_EDX] =
1222 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1223 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1224 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1225 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1226 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1227 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1228 .xlevel = 0x80000008,
1229 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1232 .name = "Opteron_G2",
1234 .vendor = CPUID_VENDOR_AMD,
1238 .features[FEAT_1_EDX] =
1239 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1240 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1241 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1242 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1243 CPUID_DE | CPUID_FP87,
1244 .features[FEAT_1_ECX] =
1245 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1246 .features[FEAT_8000_0001_EDX] =
1247 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1248 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1249 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1250 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1251 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1252 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1253 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1254 .features[FEAT_8000_0001_ECX] =
1255 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1256 .xlevel = 0x80000008,
1257 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1260 .name = "Opteron_G3",
1262 .vendor = CPUID_VENDOR_AMD,
1266 .features[FEAT_1_EDX] =
1267 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1268 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1269 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1270 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1271 CPUID_DE | CPUID_FP87,
1272 .features[FEAT_1_ECX] =
1273 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1275 .features[FEAT_8000_0001_EDX] =
1276 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1277 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1278 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1279 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1280 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1281 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1282 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1283 .features[FEAT_8000_0001_ECX] =
1284 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1285 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1286 .xlevel = 0x80000008,
1287 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1290 .name = "Opteron_G4",
1292 .vendor = CPUID_VENDOR_AMD,
1296 .features[FEAT_1_EDX] =
1297 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1298 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1299 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1300 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1301 CPUID_DE | CPUID_FP87,
1302 .features[FEAT_1_ECX] =
1303 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1304 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1305 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1307 .features[FEAT_8000_0001_EDX] =
1308 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1309 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1310 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1311 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1312 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1313 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1314 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1315 .features[FEAT_8000_0001_ECX] =
1316 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1317 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1318 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1321 .xlevel = 0x8000001A,
1322 .model_id = "AMD Opteron 62xx class CPU",
1325 .name = "Opteron_G5",
1327 .vendor = CPUID_VENDOR_AMD,
1331 .features[FEAT_1_EDX] =
1332 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1333 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1334 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1335 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1336 CPUID_DE | CPUID_FP87,
1337 .features[FEAT_1_ECX] =
1338 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1339 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1340 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1341 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1342 .features[FEAT_8000_0001_EDX] =
1343 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1344 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1345 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1346 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1347 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1348 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1349 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1350 .features[FEAT_8000_0001_ECX] =
1351 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1352 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1353 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1356 .xlevel = 0x8000001A,
1357 .model_id = "AMD Opteron 63xx class CPU",
1361 typedef struct PropValue {
1362 const char *prop, *value;
1365 /* KVM-specific features that are automatically added/removed
1366 * from all CPU models when KVM is enabled.
1368 static PropValue kvm_default_props[] = {
1369 { "kvmclock", "on" },
1370 { "kvm-nopiodelay", "on" },
1371 { "kvm-asyncpf", "on" },
1372 { "kvm-steal-time", "on" },
1373 { "kvm-pv-eoi", "on" },
1374 { "kvmclock-stable-bit", "on" },
1377 { "monitor", "off" },
1382 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1385 for (pv = kvm_default_props; pv->prop; pv++) {
1386 if (!strcmp(pv->prop, prop)) {
1392 /* It is valid to call this function only for properties that
1393 * are already present in the kvm_default_props table.
1398 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1399 bool migratable_only);
1403 static int cpu_x86_fill_model_id(char *str)
1405 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1408 for (i = 0; i < 3; i++) {
1409 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1410 memcpy(str + i * 16 + 0, &eax, 4);
1411 memcpy(str + i * 16 + 4, &ebx, 4);
1412 memcpy(str + i * 16 + 8, &ecx, 4);
1413 memcpy(str + i * 16 + 12, &edx, 4);
1418 static X86CPUDefinition host_cpudef;
1420 static Property host_x86_cpu_properties[] = {
1421 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1422 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1423 DEFINE_PROP_END_OF_LIST()
1426 /* class_init for the "host" CPU model
1428 * This function may be called before KVM is initialized.
1430 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1432 DeviceClass *dc = DEVICE_CLASS(oc);
1433 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1434 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1436 xcc->kvm_required = true;
1438 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1439 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1441 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1442 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1443 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1444 host_cpudef.stepping = eax & 0x0F;
1446 cpu_x86_fill_model_id(host_cpudef.model_id);
1448 xcc->cpu_def = &host_cpudef;
1450 /* level, xlevel, xlevel2, and the feature words are initialized on
1451 * instance_init, because they require KVM to be initialized.
1454 dc->props = host_x86_cpu_properties;
1455 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1456 dc->cannot_destroy_with_object_finalize_yet = true;
1459 static void host_x86_cpu_initfn(Object *obj)
1461 X86CPU *cpu = X86_CPU(obj);
1462 CPUX86State *env = &cpu->env;
1463 KVMState *s = kvm_state;
1465 assert(kvm_enabled());
1467 /* We can't fill the features array here because we don't know yet if
1468 * "migratable" is true or false.
1470 cpu->host_features = true;
1472 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1473 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1474 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1476 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1479 static const TypeInfo host_x86_cpu_type_info = {
1480 .name = X86_CPU_TYPE_NAME("host"),
1481 .parent = TYPE_X86_CPU,
1482 .instance_init = host_x86_cpu_initfn,
1483 .class_init = host_x86_cpu_class_init,
1488 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1490 FeatureWordInfo *f = &feature_word_info[w];
1493 for (i = 0; i < 32; ++i) {
1494 if ((1UL << i) & mask) {
1495 const char *reg = get_register_name_32(f->cpuid_reg);
1497 fprintf(stderr, "warning: %s doesn't support requested feature: "
1498 "CPUID.%02XH:%s%s%s [bit %d]\n",
1499 kvm_enabled() ? "host" : "TCG",
1501 f->feat_names[i] ? "." : "",
1502 f->feat_names[i] ? f->feat_names[i] : "", i);
1507 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1508 const char *name, Error **errp)
1510 X86CPU *cpu = X86_CPU(obj);
1511 CPUX86State *env = &cpu->env;
1514 value = (env->cpuid_version >> 8) & 0xf;
1516 value += (env->cpuid_version >> 20) & 0xff;
1518 visit_type_int(v, &value, name, errp);
1521 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1522 const char *name, Error **errp)
1524 X86CPU *cpu = X86_CPU(obj);
1525 CPUX86State *env = &cpu->env;
1526 const int64_t min = 0;
1527 const int64_t max = 0xff + 0xf;
1528 Error *local_err = NULL;
1531 visit_type_int(v, &value, name, &local_err);
1533 error_propagate(errp, local_err);
1536 if (value < min || value > max) {
1537 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1538 name ? name : "null", value, min, max);
1542 env->cpuid_version &= ~0xff00f00;
1544 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1546 env->cpuid_version |= value << 8;
1550 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1551 const char *name, Error **errp)
1553 X86CPU *cpu = X86_CPU(obj);
1554 CPUX86State *env = &cpu->env;
1557 value = (env->cpuid_version >> 4) & 0xf;
1558 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1559 visit_type_int(v, &value, name, errp);
1562 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1563 const char *name, Error **errp)
1565 X86CPU *cpu = X86_CPU(obj);
1566 CPUX86State *env = &cpu->env;
1567 const int64_t min = 0;
1568 const int64_t max = 0xff;
1569 Error *local_err = NULL;
1572 visit_type_int(v, &value, name, &local_err);
1574 error_propagate(errp, local_err);
1577 if (value < min || value > max) {
1578 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1579 name ? name : "null", value, min, max);
1583 env->cpuid_version &= ~0xf00f0;
1584 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1587 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1588 void *opaque, const char *name,
1591 X86CPU *cpu = X86_CPU(obj);
1592 CPUX86State *env = &cpu->env;
1595 value = env->cpuid_version & 0xf;
1596 visit_type_int(v, &value, name, errp);
1599 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1600 void *opaque, const char *name,
1603 X86CPU *cpu = X86_CPU(obj);
1604 CPUX86State *env = &cpu->env;
1605 const int64_t min = 0;
1606 const int64_t max = 0xf;
1607 Error *local_err = NULL;
1610 visit_type_int(v, &value, name, &local_err);
1612 error_propagate(errp, local_err);
1615 if (value < min || value > max) {
1616 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1617 name ? name : "null", value, min, max);
1621 env->cpuid_version &= ~0xf;
1622 env->cpuid_version |= value & 0xf;
1625 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1627 X86CPU *cpu = X86_CPU(obj);
1628 CPUX86State *env = &cpu->env;
1631 value = g_malloc(CPUID_VENDOR_SZ + 1);
1632 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1633 env->cpuid_vendor3);
1637 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1640 X86CPU *cpu = X86_CPU(obj);
1641 CPUX86State *env = &cpu->env;
1644 if (strlen(value) != CPUID_VENDOR_SZ) {
1645 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1649 env->cpuid_vendor1 = 0;
1650 env->cpuid_vendor2 = 0;
1651 env->cpuid_vendor3 = 0;
1652 for (i = 0; i < 4; i++) {
1653 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1654 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1655 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1659 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1661 X86CPU *cpu = X86_CPU(obj);
1662 CPUX86State *env = &cpu->env;
1666 value = g_malloc(48 + 1);
1667 for (i = 0; i < 48; i++) {
1668 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1674 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1677 X86CPU *cpu = X86_CPU(obj);
1678 CPUX86State *env = &cpu->env;
1681 if (model_id == NULL) {
1684 len = strlen(model_id);
1685 memset(env->cpuid_model, 0, 48);
1686 for (i = 0; i < 48; i++) {
1690 c = (uint8_t)model_id[i];
1692 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1696 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1697 const char *name, Error **errp)
1699 X86CPU *cpu = X86_CPU(obj);
1702 value = cpu->env.tsc_khz * 1000;
1703 visit_type_int(v, &value, name, errp);
1706 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1707 const char *name, Error **errp)
1709 X86CPU *cpu = X86_CPU(obj);
1710 const int64_t min = 0;
1711 const int64_t max = INT64_MAX;
1712 Error *local_err = NULL;
1715 visit_type_int(v, &value, name, &local_err);
1717 error_propagate(errp, local_err);
1720 if (value < min || value > max) {
1721 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1722 name ? name : "null", value, min, max);
1726 cpu->env.tsc_khz = value / 1000;
1729 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1730 const char *name, Error **errp)
1732 X86CPU *cpu = X86_CPU(obj);
1733 int64_t value = cpu->apic_id;
1735 visit_type_int(v, &value, name, errp);
1738 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1739 const char *name, Error **errp)
1741 X86CPU *cpu = X86_CPU(obj);
1742 DeviceState *dev = DEVICE(obj);
1743 const int64_t min = 0;
1744 const int64_t max = UINT32_MAX;
1745 Error *error = NULL;
1748 if (dev->realized) {
1749 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1750 "it was realized", name, object_get_typename(obj));
1754 visit_type_int(v, &value, name, &error);
1756 error_propagate(errp, error);
1759 if (value < min || value > max) {
1760 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1761 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1762 object_get_typename(obj), name, value, min, max);
1766 if ((value != cpu->apic_id) && cpu_exists(value)) {
1767 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1770 cpu->apic_id = value;
1773 /* Generic getter for "feature-words" and "filtered-features" properties */
1774 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1775 const char *name, Error **errp)
1777 uint32_t *array = (uint32_t *)opaque;
1780 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1781 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1782 X86CPUFeatureWordInfoList *list = NULL;
1784 for (w = 0; w < FEATURE_WORDS; w++) {
1785 FeatureWordInfo *wi = &feature_word_info[w];
1786 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1787 qwi->cpuid_input_eax = wi->cpuid_eax;
1788 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1789 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1790 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1791 qwi->features = array[w];
1793 /* List will be in reverse order, but order shouldn't matter */
1794 list_entries[w].next = list;
1795 list_entries[w].value = &word_infos[w];
1796 list = &list_entries[w];
1799 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1800 error_propagate(errp, err);
1803 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1804 const char *name, Error **errp)
1806 X86CPU *cpu = X86_CPU(obj);
1807 int64_t value = cpu->hyperv_spinlock_attempts;
1809 visit_type_int(v, &value, name, errp);
1812 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1813 const char *name, Error **errp)
1815 const int64_t min = 0xFFF;
1816 const int64_t max = UINT_MAX;
1817 X86CPU *cpu = X86_CPU(obj);
1821 visit_type_int(v, &value, name, &err);
1823 error_propagate(errp, err);
1827 if (value < min || value > max) {
1828 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1829 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1830 object_get_typename(obj), name ? name : "null",
1834 cpu->hyperv_spinlock_attempts = value;
1837 static PropertyInfo qdev_prop_spinlocks = {
1839 .get = x86_get_hv_spinlocks,
1840 .set = x86_set_hv_spinlocks,
1843 /* Convert all '_' in a feature string option name to '-', to make feature
1844 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1846 static inline void feat2prop(char *s)
1848 while ((s = strchr(s, '_'))) {
1853 /* Parse "+feature,-feature,feature=foo" CPU feature string
1855 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1858 X86CPU *cpu = X86_CPU(cs);
1859 char *featurestr; /* Single 'key=value" string being parsed */
1861 /* Features to be added */
1862 FeatureWordArray plus_features = { 0 };
1863 /* Features to be removed */
1864 FeatureWordArray minus_features = { 0 };
1866 CPUX86State *env = &cpu->env;
1867 Error *local_err = NULL;
1869 featurestr = features ? strtok(features, ",") : NULL;
1871 while (featurestr) {
1873 if (featurestr[0] == '+') {
1874 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1875 } else if (featurestr[0] == '-') {
1876 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1877 } else if ((val = strchr(featurestr, '='))) {
1879 feat2prop(featurestr);
1880 if (!strcmp(featurestr, "xlevel")) {
1884 numvalue = strtoul(val, &err, 0);
1885 if (!*val || *err) {
1886 error_setg(errp, "bad numerical value %s", val);
1889 if (numvalue < 0x80000000) {
1890 error_report("xlevel value shall always be >= 0x80000000"
1891 ", fixup will be removed in future versions");
1892 numvalue += 0x80000000;
1894 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1895 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1896 } else if (!strcmp(featurestr, "tsc-freq")) {
1901 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1902 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1903 if (tsc_freq < 0 || *err) {
1904 error_setg(errp, "bad numerical value %s", val);
1907 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1908 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1910 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1912 const int min = 0xFFF;
1914 numvalue = strtoul(val, &err, 0);
1915 if (!*val || *err) {
1916 error_setg(errp, "bad numerical value %s", val);
1919 if (numvalue < min) {
1920 error_report("hv-spinlocks value shall always be >= 0x%x"
1921 ", fixup will be removed in future versions",
1925 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1926 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1928 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1931 feat2prop(featurestr);
1932 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1935 error_propagate(errp, local_err);
1938 featurestr = strtok(NULL, ",");
1941 if (cpu->host_features) {
1942 for (w = 0; w < FEATURE_WORDS; w++) {
1944 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1948 for (w = 0; w < FEATURE_WORDS; w++) {
1949 env->features[w] |= plus_features[w];
1950 env->features[w] &= ~minus_features[w];
1954 /* Print all cpuid feature names in featureset
1956 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1961 for (bit = 0; bit < 32; bit++) {
1962 if (featureset[bit]) {
1963 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1969 /* generate CPU information. */
1970 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1972 X86CPUDefinition *def;
1976 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1977 def = &builtin_x86_defs[i];
1978 snprintf(buf, sizeof(buf), "%s", def->name);
1979 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1982 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
1983 "KVM processor with all supported host features "
1984 "(only available in KVM mode)");
1987 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
1988 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
1989 FeatureWordInfo *fw = &feature_word_info[i];
1991 (*cpu_fprintf)(f, " ");
1992 listflags(f, cpu_fprintf, fw->feat_names);
1993 (*cpu_fprintf)(f, "\n");
1997 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
1999 CpuDefinitionInfoList *cpu_list = NULL;
2000 X86CPUDefinition *def;
2003 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2004 CpuDefinitionInfoList *entry;
2005 CpuDefinitionInfo *info;
2007 def = &builtin_x86_defs[i];
2008 info = g_malloc0(sizeof(*info));
2009 info->name = g_strdup(def->name);
2011 entry = g_malloc0(sizeof(*entry));
2012 entry->value = info;
2013 entry->next = cpu_list;
2020 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2021 bool migratable_only)
2023 FeatureWordInfo *wi = &feature_word_info[w];
2026 if (kvm_enabled()) {
2027 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2030 } else if (tcg_enabled()) {
2031 r = wi->tcg_features;
2035 if (migratable_only) {
2036 r &= x86_cpu_get_migratable_flags(w);
2042 * Filters CPU feature words based on host availability of each feature.
2044 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2046 static int x86_cpu_filter_features(X86CPU *cpu)
2048 CPUX86State *env = &cpu->env;
2052 for (w = 0; w < FEATURE_WORDS; w++) {
2053 uint32_t host_feat =
2054 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2055 uint32_t requested_features = env->features[w];
2056 env->features[w] &= host_feat;
2057 cpu->filtered_features[w] = requested_features & ~env->features[w];
2058 if (cpu->filtered_features[w]) {
2059 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2060 report_unavailable_features(w, cpu->filtered_features[w]);
2069 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2072 for (pv = props; pv->prop; pv++) {
2076 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2081 /* Load data from X86CPUDefinition
2083 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2085 CPUX86State *env = &cpu->env;
2087 char host_vendor[CPUID_VENDOR_SZ + 1];
2090 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2091 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2092 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2093 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2094 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2095 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2096 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2097 for (w = 0; w < FEATURE_WORDS; w++) {
2098 env->features[w] = def->features[w];
2101 /* Special cases not set in the X86CPUDefinition structs: */
2102 if (kvm_enabled()) {
2103 x86_cpu_apply_props(cpu, kvm_default_props);
2106 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2108 /* sysenter isn't supported in compatibility mode on AMD,
2109 * syscall isn't supported in compatibility mode on Intel.
2110 * Normally we advertise the actual CPU vendor, but you can
2111 * override this using the 'vendor' property if you want to use
2112 * KVM's sysenter/syscall emulation in compatibility mode and
2113 * when doing cross vendor migration
2115 vendor = def->vendor;
2116 if (kvm_enabled()) {
2117 uint32_t ebx = 0, ecx = 0, edx = 0;
2118 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2119 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2120 vendor = host_vendor;
2123 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2127 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2132 gchar **model_pieces;
2133 char *name, *features;
2134 Error *error = NULL;
2136 model_pieces = g_strsplit(cpu_model, ",", 2);
2137 if (!model_pieces[0]) {
2138 error_setg(&error, "Invalid/empty CPU model name");
2141 name = model_pieces[0];
2142 features = model_pieces[1];
2144 oc = x86_cpu_class_by_name(name);
2146 error_setg(&error, "Unable to find CPU definition: %s", name);
2149 xcc = X86_CPU_CLASS(oc);
2151 if (xcc->kvm_required && !kvm_enabled()) {
2152 error_setg(&error, "CPU model '%s' requires KVM", name);
2156 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2158 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2164 if (error != NULL) {
2165 error_propagate(errp, error);
2167 object_unref(OBJECT(cpu));
2171 g_strfreev(model_pieces);
2175 X86CPU *cpu_x86_init(const char *cpu_model)
2177 Error *error = NULL;
2180 cpu = cpu_x86_create(cpu_model, &error);
2185 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2189 error_report_err(error);
2191 object_unref(OBJECT(cpu));
2198 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2200 X86CPUDefinition *cpudef = data;
2201 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2203 xcc->cpu_def = cpudef;
2206 static void x86_register_cpudef_type(X86CPUDefinition *def)
2208 char *typename = x86_cpu_type_name(def->name);
2211 .parent = TYPE_X86_CPU,
2212 .class_init = x86_cpu_cpudef_class_init,
2220 #if !defined(CONFIG_USER_ONLY)
2222 void cpu_clear_apic_feature(CPUX86State *env)
2224 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2227 #endif /* !CONFIG_USER_ONLY */
2229 /* Initialize list of CPU models, filling some non-static fields if necessary
2231 void x86_cpudef_setup(void)
2234 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2236 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2237 X86CPUDefinition *def = &builtin_x86_defs[i];
2239 /* Look for specific "cpudef" models that */
2240 /* have the QEMU version in .model_id */
2241 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2242 if (strcmp(model_with_versions[j], def->name) == 0) {
2243 pstrcpy(def->model_id, sizeof(def->model_id),
2244 "QEMU Virtual CPU version ");
2245 pstrcat(def->model_id, sizeof(def->model_id),
2253 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2254 uint32_t *eax, uint32_t *ebx,
2255 uint32_t *ecx, uint32_t *edx)
2257 X86CPU *cpu = x86_env_get_cpu(env);
2258 CPUState *cs = CPU(cpu);
2260 /* test if maximum index reached */
2261 if (index & 0x80000000) {
2262 if (index > env->cpuid_xlevel) {
2263 if (env->cpuid_xlevel2 > 0) {
2264 /* Handle the Centaur's CPUID instruction. */
2265 if (index > env->cpuid_xlevel2) {
2266 index = env->cpuid_xlevel2;
2267 } else if (index < 0xC0000000) {
2268 index = env->cpuid_xlevel;
2271 /* Intel documentation states that invalid EAX input will
2272 * return the same information as EAX=cpuid_level
2273 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2275 index = env->cpuid_level;
2279 if (index > env->cpuid_level)
2280 index = env->cpuid_level;
2285 *eax = env->cpuid_level;
2286 *ebx = env->cpuid_vendor1;
2287 *edx = env->cpuid_vendor2;
2288 *ecx = env->cpuid_vendor3;
2291 *eax = env->cpuid_version;
2292 *ebx = (cpu->apic_id << 24) |
2293 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2294 *ecx = env->features[FEAT_1_ECX];
2295 *edx = env->features[FEAT_1_EDX];
2296 if (cs->nr_cores * cs->nr_threads > 1) {
2297 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2298 *edx |= 1 << 28; /* HTT bit */
2302 /* cache info: needed for Pentium Pro compatibility */
2303 if (cpu->cache_info_passthrough) {
2304 host_cpuid(index, 0, eax, ebx, ecx, edx);
2307 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2310 *edx = (L1D_DESCRIPTOR << 16) | \
2311 (L1I_DESCRIPTOR << 8) | \
2315 /* cache info: needed for Core compatibility */
2316 if (cpu->cache_info_passthrough) {
2317 host_cpuid(index, count, eax, ebx, ecx, edx);
2318 *eax &= ~0xFC000000;
2322 case 0: /* L1 dcache info */
2323 *eax |= CPUID_4_TYPE_DCACHE | \
2324 CPUID_4_LEVEL(1) | \
2325 CPUID_4_SELF_INIT_LEVEL;
2326 *ebx = (L1D_LINE_SIZE - 1) | \
2327 ((L1D_PARTITIONS - 1) << 12) | \
2328 ((L1D_ASSOCIATIVITY - 1) << 22);
2329 *ecx = L1D_SETS - 1;
2330 *edx = CPUID_4_NO_INVD_SHARING;
2332 case 1: /* L1 icache info */
2333 *eax |= CPUID_4_TYPE_ICACHE | \
2334 CPUID_4_LEVEL(1) | \
2335 CPUID_4_SELF_INIT_LEVEL;
2336 *ebx = (L1I_LINE_SIZE - 1) | \
2337 ((L1I_PARTITIONS - 1) << 12) | \
2338 ((L1I_ASSOCIATIVITY - 1) << 22);
2339 *ecx = L1I_SETS - 1;
2340 *edx = CPUID_4_NO_INVD_SHARING;
2342 case 2: /* L2 cache info */
2343 *eax |= CPUID_4_TYPE_UNIFIED | \
2344 CPUID_4_LEVEL(2) | \
2345 CPUID_4_SELF_INIT_LEVEL;
2346 if (cs->nr_threads > 1) {
2347 *eax |= (cs->nr_threads - 1) << 14;
2349 *ebx = (L2_LINE_SIZE - 1) | \
2350 ((L2_PARTITIONS - 1) << 12) | \
2351 ((L2_ASSOCIATIVITY - 1) << 22);
2353 *edx = CPUID_4_NO_INVD_SHARING;
2355 default: /* end of info */
2364 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2365 if ((*eax & 31) && cs->nr_cores > 1) {
2366 *eax |= (cs->nr_cores - 1) << 26;
2370 /* mwait info: needed for Core compatibility */
2371 *eax = 0; /* Smallest monitor-line size in bytes */
2372 *ebx = 0; /* Largest monitor-line size in bytes */
2373 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2377 /* Thermal and Power Leaf */
2378 *eax = env->features[FEAT_6_EAX];
2384 /* Structured Extended Feature Flags Enumeration Leaf */
2386 *eax = 0; /* Maximum ECX value for sub-leaves */
2387 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2388 *ecx = 0; /* Reserved */
2389 *edx = 0; /* Reserved */
2398 /* Direct Cache Access Information Leaf */
2399 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2405 /* Architectural Performance Monitoring Leaf */
2406 if (kvm_enabled() && cpu->enable_pmu) {
2407 KVMState *s = cs->kvm_state;
2409 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2410 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2411 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2412 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2421 KVMState *s = cs->kvm_state;
2425 /* Processor Extended State */
2430 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2434 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2435 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2439 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2440 const ExtSaveArea *esa = &ext_save_areas[i];
2441 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2442 (kvm_mask & (1 << i)) != 0) {
2446 *edx |= 1 << (i - 32);
2448 *ecx = MAX(*ecx, esa->offset + esa->size);
2451 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2453 } else if (count == 1) {
2454 *eax = env->features[FEAT_XSAVE];
2455 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2456 const ExtSaveArea *esa = &ext_save_areas[count];
2457 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2458 (kvm_mask & (1 << count)) != 0) {
2466 *eax = env->cpuid_xlevel;
2467 *ebx = env->cpuid_vendor1;
2468 *edx = env->cpuid_vendor2;
2469 *ecx = env->cpuid_vendor3;
2472 *eax = env->cpuid_version;
2474 *ecx = env->features[FEAT_8000_0001_ECX];
2475 *edx = env->features[FEAT_8000_0001_EDX];
2477 /* The Linux kernel checks for the CMPLegacy bit and
2478 * discards multiple thread information if it is set.
2479 * So dont set it here for Intel to make Linux guests happy.
2481 if (cs->nr_cores * cs->nr_threads > 1) {
2482 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2483 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2484 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2485 *ecx |= 1 << 1; /* CmpLegacy bit */
2492 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2493 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2494 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2495 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2498 /* cache info (L1 cache) */
2499 if (cpu->cache_info_passthrough) {
2500 host_cpuid(index, 0, eax, ebx, ecx, edx);
2503 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2504 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2505 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2506 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2507 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2508 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2509 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2510 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2513 /* cache info (L2 cache) */
2514 if (cpu->cache_info_passthrough) {
2515 host_cpuid(index, 0, eax, ebx, ecx, edx);
2518 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2519 (L2_DTLB_2M_ENTRIES << 16) | \
2520 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2521 (L2_ITLB_2M_ENTRIES);
2522 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2523 (L2_DTLB_4K_ENTRIES << 16) | \
2524 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2525 (L2_ITLB_4K_ENTRIES);
2526 *ecx = (L2_SIZE_KB_AMD << 16) | \
2527 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2528 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2529 *edx = ((L3_SIZE_KB/512) << 18) | \
2530 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2531 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2537 *edx = env->features[FEAT_8000_0007_EDX];
2540 /* virtual & phys address size in low 2 bytes. */
2541 /* XXX: This value must match the one used in the MMU code. */
2542 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2543 /* 64 bit processor */
2544 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2545 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2547 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2548 *eax = 0x00000024; /* 36 bits physical */
2550 *eax = 0x00000020; /* 32 bits physical */
2556 if (cs->nr_cores * cs->nr_threads > 1) {
2557 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2561 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2562 *eax = 0x00000001; /* SVM Revision */
2563 *ebx = 0x00000010; /* nr of ASIDs */
2565 *edx = env->features[FEAT_SVM]; /* optional features */
2574 *eax = env->cpuid_xlevel2;
2580 /* Support for VIA CPU's CPUID instruction */
2581 *eax = env->cpuid_version;
2584 *edx = env->features[FEAT_C000_0001_EDX];
2589 /* Reserved for the future, and now filled with zero */
2596 /* reserved values: zero */
2605 /* CPUClass::reset() */
2606 static void x86_cpu_reset(CPUState *s)
2608 X86CPU *cpu = X86_CPU(s);
2609 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2610 CPUX86State *env = &cpu->env;
2613 xcc->parent_reset(s);
2615 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2619 env->old_exception = -1;
2621 /* init to reset state */
2623 #ifdef CONFIG_SOFTMMU
2624 env->hflags |= HF_SOFTMMU_MASK;
2626 env->hflags2 |= HF2_GIF_MASK;
2628 cpu_x86_update_cr0(env, 0x60000010);
2629 env->a20_mask = ~0x0;
2630 env->smbase = 0x30000;
2632 env->idt.limit = 0xffff;
2633 env->gdt.limit = 0xffff;
2634 env->ldt.limit = 0xffff;
2635 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2636 env->tr.limit = 0xffff;
2637 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2639 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2640 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2641 DESC_R_MASK | DESC_A_MASK);
2642 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2643 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2645 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2646 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2648 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2649 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2651 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2652 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2654 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2655 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2659 env->regs[R_EDX] = env->cpuid_version;
2664 for (i = 0; i < 8; i++) {
2667 cpu_set_fpuc(env, 0x37f);
2669 env->mxcsr = 0x1f80;
2670 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2672 env->pat = 0x0007040600070406ULL;
2673 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2675 memset(env->dr, 0, sizeof(env->dr));
2676 env->dr[6] = DR6_FIXED_1;
2677 env->dr[7] = DR7_FIXED_1;
2678 cpu_breakpoint_remove_all(s, BP_CPU);
2679 cpu_watchpoint_remove_all(s, BP_CPU);
2684 * SDM 11.11.5 requires:
2685 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2686 * - IA32_MTRR_PHYSMASKn.V = 0
2687 * All other bits are undefined. For simplification, zero it all.
2689 env->mtrr_deftype = 0;
2690 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2691 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2693 #if !defined(CONFIG_USER_ONLY)
2694 /* We hard-wire the BSP to the first CPU. */
2695 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2697 s->halted = !cpu_is_bsp(cpu);
2699 if (kvm_enabled()) {
2700 kvm_arch_reset_vcpu(cpu);
2705 #ifndef CONFIG_USER_ONLY
2706 bool cpu_is_bsp(X86CPU *cpu)
2708 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2711 /* TODO: remove me, when reset over QOM tree is implemented */
2712 static void x86_cpu_machine_reset_cb(void *opaque)
2714 X86CPU *cpu = opaque;
2715 cpu_reset(CPU(cpu));
2719 static void mce_init(X86CPU *cpu)
2721 CPUX86State *cenv = &cpu->env;
2724 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2725 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2726 (CPUID_MCE | CPUID_MCA)) {
2727 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2728 cenv->mcg_ctl = ~(uint64_t)0;
2729 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2730 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2735 #ifndef CONFIG_USER_ONLY
2736 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2738 APICCommonState *apic;
2739 const char *apic_type = "apic";
2741 if (kvm_irqchip_in_kernel()) {
2742 apic_type = "kvm-apic";
2743 } else if (xen_enabled()) {
2744 apic_type = "xen-apic";
2747 cpu->apic_state = DEVICE(object_new(apic_type));
2749 object_property_add_child(OBJECT(cpu), "apic",
2750 OBJECT(cpu->apic_state), NULL);
2751 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2752 /* TODO: convert to link<> */
2753 apic = APIC_COMMON(cpu->apic_state);
2755 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2758 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2760 APICCommonState *apic;
2761 static bool apic_mmio_map_once;
2763 if (cpu->apic_state == NULL) {
2766 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2769 /* Map APIC MMIO area */
2770 apic = APIC_COMMON(cpu->apic_state);
2771 if (!apic_mmio_map_once) {
2772 memory_region_add_subregion_overlap(get_system_memory(),
2774 MSR_IA32_APICBASE_BASE,
2777 apic_mmio_map_once = true;
2781 static void x86_cpu_machine_done(Notifier *n, void *unused)
2783 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2784 MemoryRegion *smram =
2785 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2788 cpu->smram = g_new(MemoryRegion, 1);
2789 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2790 smram, 0, 1ull << 32);
2791 memory_region_set_enabled(cpu->smram, false);
2792 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2796 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2802 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2803 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2804 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2805 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2806 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2807 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2808 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2810 CPUState *cs = CPU(dev);
2811 X86CPU *cpu = X86_CPU(dev);
2812 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2813 CPUX86State *env = &cpu->env;
2814 Error *local_err = NULL;
2815 static bool ht_warned;
2817 if (cpu->apic_id < 0) {
2818 error_setg(errp, "apic-id property was not initialized properly");
2822 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2823 env->cpuid_level = 7;
2826 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2829 if (IS_AMD_CPU(env)) {
2830 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2831 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2832 & CPUID_EXT2_AMD_ALIASES);
2836 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2837 error_setg(&local_err,
2839 "Host doesn't support requested features" :
2840 "TCG doesn't support requested features");
2844 #ifndef CONFIG_USER_ONLY
2845 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2847 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2848 x86_cpu_apic_create(cpu, &local_err);
2849 if (local_err != NULL) {
2857 #ifndef CONFIG_USER_ONLY
2858 if (tcg_enabled()) {
2859 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2860 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2861 cs->as = g_new(AddressSpace, 1);
2863 /* Outer container... */
2864 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2865 memory_region_set_enabled(cpu->cpu_as_root, true);
2867 /* ... with two regions inside: normal system memory with low
2870 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2871 get_system_memory(), 0, ~0ull);
2872 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2873 memory_region_set_enabled(cpu->cpu_as_mem, true);
2874 address_space_init(cs->as, cpu->cpu_as_root, "CPU");
2876 /* ... SMRAM with higher priority, linked from /machine/smram. */
2877 cpu->machine_done.notify = x86_cpu_machine_done;
2878 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2884 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2885 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2886 * based on inputs (sockets,cores,threads), it is still better to gives
2889 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2890 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2892 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2893 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2894 " -smp options properly.");
2898 x86_cpu_apic_realize(cpu, &local_err);
2899 if (local_err != NULL) {
2904 xcc->parent_realize(dev, &local_err);
2907 if (local_err != NULL) {
2908 error_propagate(errp, local_err);
2913 typedef struct BitProperty {
2918 static void x86_cpu_get_bit_prop(Object *obj,
2924 BitProperty *fp = opaque;
2925 bool value = (*fp->ptr & fp->mask) == fp->mask;
2926 visit_type_bool(v, &value, name, errp);
2929 static void x86_cpu_set_bit_prop(Object *obj,
2935 DeviceState *dev = DEVICE(obj);
2936 BitProperty *fp = opaque;
2937 Error *local_err = NULL;
2940 if (dev->realized) {
2941 qdev_prop_set_after_realize(dev, name, errp);
2945 visit_type_bool(v, &value, name, &local_err);
2947 error_propagate(errp, local_err);
2952 *fp->ptr |= fp->mask;
2954 *fp->ptr &= ~fp->mask;
2958 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
2961 BitProperty *prop = opaque;
2965 /* Register a boolean property to get/set a single bit in a uint32_t field.
2967 * The same property name can be registered multiple times to make it affect
2968 * multiple bits in the same FeatureWord. In that case, the getter will return
2969 * true only if all bits are set.
2971 static void x86_cpu_register_bit_prop(X86CPU *cpu,
2972 const char *prop_name,
2978 uint32_t mask = (1UL << bitnr);
2980 op = object_property_find(OBJECT(cpu), prop_name, NULL);
2983 assert(fp->ptr == field);
2986 fp = g_new0(BitProperty, 1);
2989 object_property_add(OBJECT(cpu), prop_name, "bool",
2990 x86_cpu_get_bit_prop,
2991 x86_cpu_set_bit_prop,
2992 x86_cpu_release_bit_prop, fp, &error_abort);
2996 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3000 Object *obj = OBJECT(cpu);
3003 FeatureWordInfo *fi = &feature_word_info[w];
3005 if (!fi->feat_names) {
3008 if (!fi->feat_names[bitnr]) {
3012 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3014 feat2prop(names[0]);
3015 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3017 for (i = 1; names[i]; i++) {
3018 feat2prop(names[i]);
3019 object_property_add_alias(obj, names[i], obj, names[0],
3026 static void x86_cpu_initfn(Object *obj)
3028 CPUState *cs = CPU(obj);
3029 X86CPU *cpu = X86_CPU(obj);
3030 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3031 CPUX86State *env = &cpu->env;
3036 cpu_exec_init(cs, &error_abort);
3038 object_property_add(obj, "family", "int",
3039 x86_cpuid_version_get_family,
3040 x86_cpuid_version_set_family, NULL, NULL, NULL);
3041 object_property_add(obj, "model", "int",
3042 x86_cpuid_version_get_model,
3043 x86_cpuid_version_set_model, NULL, NULL, NULL);
3044 object_property_add(obj, "stepping", "int",
3045 x86_cpuid_version_get_stepping,
3046 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3047 object_property_add_str(obj, "vendor",
3048 x86_cpuid_get_vendor,
3049 x86_cpuid_set_vendor, NULL);
3050 object_property_add_str(obj, "model-id",
3051 x86_cpuid_get_model_id,
3052 x86_cpuid_set_model_id, NULL);
3053 object_property_add(obj, "tsc-frequency", "int",
3054 x86_cpuid_get_tsc_freq,
3055 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3056 object_property_add(obj, "apic-id", "int",
3057 x86_cpuid_get_apic_id,
3058 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3059 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3060 x86_cpu_get_feature_words,
3061 NULL, NULL, (void *)env->features, NULL);
3062 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3063 x86_cpu_get_feature_words,
3064 NULL, NULL, (void *)cpu->filtered_features, NULL);
3066 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3068 #ifndef CONFIG_USER_ONLY
3069 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3073 for (w = 0; w < FEATURE_WORDS; w++) {
3076 for (bitnr = 0; bitnr < 32; bitnr++) {
3077 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3081 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3083 /* init various static tables used in TCG mode */
3084 if (tcg_enabled() && !inited) {
3086 optimize_flags_init();
3090 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3092 X86CPU *cpu = X86_CPU(cs);
3094 return cpu->apic_id;
3097 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3099 X86CPU *cpu = X86_CPU(cs);
3101 return cpu->env.cr[0] & CR0_PG_MASK;
3104 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3106 X86CPU *cpu = X86_CPU(cs);
3108 cpu->env.eip = value;
3111 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3113 X86CPU *cpu = X86_CPU(cs);
3115 cpu->env.eip = tb->pc - tb->cs_base;
3118 static bool x86_cpu_has_work(CPUState *cs)
3120 X86CPU *cpu = X86_CPU(cs);
3121 CPUX86State *env = &cpu->env;
3123 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3124 CPU_INTERRUPT_POLL)) &&
3125 (env->eflags & IF_MASK)) ||
3126 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3127 CPU_INTERRUPT_INIT |
3128 CPU_INTERRUPT_SIPI |
3129 CPU_INTERRUPT_MCE)) ||
3130 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3131 !(env->hflags & HF_SMM_MASK));
3134 static Property x86_cpu_properties[] = {
3135 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3136 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3137 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3138 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3139 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3140 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3141 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3142 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3143 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3144 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3145 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3146 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3147 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3148 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3149 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3150 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3151 DEFINE_PROP_END_OF_LIST()
3154 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3156 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3157 CPUClass *cc = CPU_CLASS(oc);
3158 DeviceClass *dc = DEVICE_CLASS(oc);
3160 xcc->parent_realize = dc->realize;
3161 dc->realize = x86_cpu_realizefn;
3162 dc->props = x86_cpu_properties;
3164 xcc->parent_reset = cc->reset;
3165 cc->reset = x86_cpu_reset;
3166 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3168 cc->class_by_name = x86_cpu_class_by_name;
3169 cc->parse_features = x86_cpu_parse_featurestr;
3170 cc->has_work = x86_cpu_has_work;
3171 cc->do_interrupt = x86_cpu_do_interrupt;
3172 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3173 cc->dump_state = x86_cpu_dump_state;
3174 cc->set_pc = x86_cpu_set_pc;
3175 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3176 cc->gdb_read_register = x86_cpu_gdb_read_register;
3177 cc->gdb_write_register = x86_cpu_gdb_write_register;
3178 cc->get_arch_id = x86_cpu_get_arch_id;
3179 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3180 #ifdef CONFIG_USER_ONLY
3181 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3183 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3184 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3185 cc->write_elf64_note = x86_cpu_write_elf64_note;
3186 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3187 cc->write_elf32_note = x86_cpu_write_elf32_note;
3188 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3189 cc->vmsd = &vmstate_x86_cpu;
3191 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3192 #ifndef CONFIG_USER_ONLY
3193 cc->debug_excp_handler = breakpoint_handler;
3195 cc->cpu_exec_enter = x86_cpu_exec_enter;
3196 cc->cpu_exec_exit = x86_cpu_exec_exit;
3199 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3200 * object in cpus -> dangling pointer after final object_unref().
3202 dc->cannot_destroy_with_object_finalize_yet = true;
3205 static const TypeInfo x86_cpu_type_info = {
3206 .name = TYPE_X86_CPU,
3208 .instance_size = sizeof(X86CPU),
3209 .instance_init = x86_cpu_initfn,
3211 .class_size = sizeof(X86CPUClass),
3212 .class_init = x86_cpu_common_class_init,
3215 static void x86_cpu_register_types(void)
3219 type_register_static(&x86_cpu_type_info);
3220 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3221 x86_register_cpudef_type(&builtin_x86_defs[i]);
3224 type_register_static(&host_x86_cpu_type_info);
3228 type_init(x86_cpu_register_types)