2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "qemu-option.h"
28 #include "qemu-config.h"
30 #include "qapi/qapi-visit-core.h"
31 #include "arch_init.h"
36 #if defined(CONFIG_KVM)
37 #include <linux/kvm_para.h>
40 /* feature flags taken from "Intel Processor Identification and the CPUID
41 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
42 * between feature naming conventions, aliases may be added.
44 static const char *feature_name[] = {
45 "fpu", "vme", "de", "pse",
46 "tsc", "msr", "pae", "mce",
47 "cx8", "apic", NULL, "sep",
48 "mtrr", "pge", "mca", "cmov",
49 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
50 NULL, "ds" /* Intel dts */, "acpi", "mmx",
51 "fxsr", "sse", "sse2", "ss",
52 "ht" /* Intel htt */, "tm", "ia64", "pbe",
54 static const char *ext_feature_name[] = {
55 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
56 "ds_cpl", "vmx", "smx", "est",
57 "tm2", "ssse3", "cid", NULL,
58 "fma", "cx16", "xtpr", "pdcm",
59 NULL, "pcid", "dca", "sse4.1|sse4_1",
60 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
61 "tsc-deadline", "aes", "xsave", "osxsave",
62 "avx", NULL, NULL, "hypervisor",
64 /* Feature names that are already defined on feature_name[] but are set on
65 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
66 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
67 * if and only if CPU vendor is AMD.
69 static const char *ext2_feature_name[] = {
70 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
71 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
72 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
73 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
74 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
75 "nx|xd", NULL, "mmxext", NULL /* mmx */,
76 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
77 NULL, "lm|i64", "3dnowext", "3dnow",
79 static const char *ext3_feature_name[] = {
80 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
81 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
82 "3dnowprefetch", "osvw", "ibs", "xop",
83 "skinit", "wdt", NULL, NULL,
84 "fma4", NULL, "cvt16", "nodeid_msr",
85 NULL, NULL, NULL, NULL,
86 NULL, NULL, NULL, NULL,
87 NULL, NULL, NULL, NULL,
90 static const char *kvm_feature_name[] = {
91 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock", "kvm_asyncpf", NULL, "kvm_pv_eoi", NULL,
92 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
93 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
94 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
97 static const char *svm_feature_name[] = {
98 "npt", "lbrv", "svm_lock", "nrip_save",
99 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
100 NULL, NULL, "pause_filter", NULL,
101 "pfthreshold", NULL, NULL, NULL,
102 NULL, NULL, NULL, NULL,
103 NULL, NULL, NULL, NULL,
104 NULL, NULL, NULL, NULL,
105 NULL, NULL, NULL, NULL,
108 static const char *cpuid_7_0_ebx_feature_name[] = {
109 NULL, NULL, NULL, NULL, NULL, NULL, NULL, "smep",
110 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
111 NULL, NULL, NULL, NULL, "smap", NULL, NULL, NULL,
112 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
115 /* collects per-function cpuid data
117 typedef struct model_features_t {
118 uint32_t *guest_feat;
121 const char **flag_names;
126 int enforce_cpuid = 0;
128 void host_cpuid(uint32_t function, uint32_t count,
129 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
131 #if defined(CONFIG_KVM)
136 : "=a"(vec[0]), "=b"(vec[1]),
137 "=c"(vec[2]), "=d"(vec[3])
138 : "0"(function), "c"(count) : "cc");
140 asm volatile("pusha \n\t"
142 "mov %%eax, 0(%2) \n\t"
143 "mov %%ebx, 4(%2) \n\t"
144 "mov %%ecx, 8(%2) \n\t"
145 "mov %%edx, 12(%2) \n\t"
147 : : "a"(function), "c"(count), "S"(vec)
162 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
164 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
165 * a substring. ex if !NULL points to the first char after a substring,
166 * otherwise the string is assumed to sized by a terminating nul.
167 * Return lexical ordering of *s1:*s2.
169 static int sstrcmp(const char *s1, const char *e1, const char *s2,
173 if (!*s1 || !*s2 || *s1 != *s2)
176 if (s1 == e1 && s2 == e2)
185 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
186 * '|' delimited (possibly empty) strings in which case search for a match
187 * within the alternatives proceeds left to right. Return 0 for success,
188 * non-zero otherwise.
190 static int altcmp(const char *s, const char *e, const char *altstr)
194 for (q = p = altstr; ; ) {
195 while (*p && *p != '|')
197 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
206 /* search featureset for flag *[s..e), if found set corresponding bit in
207 * *pval and return true, otherwise return false
209 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
210 const char **featureset)
216 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
217 if (*ppc && !altcmp(s, e, *ppc)) {
225 static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
226 uint32_t *ext_features,
227 uint32_t *ext2_features,
228 uint32_t *ext3_features,
229 uint32_t *kvm_features,
230 uint32_t *svm_features,
231 uint32_t *cpuid_7_0_ebx_features)
233 if (!lookup_feature(features, flagname, NULL, feature_name) &&
234 !lookup_feature(ext_features, flagname, NULL, ext_feature_name) &&
235 !lookup_feature(ext2_features, flagname, NULL, ext2_feature_name) &&
236 !lookup_feature(ext3_features, flagname, NULL, ext3_feature_name) &&
237 !lookup_feature(kvm_features, flagname, NULL, kvm_feature_name) &&
238 !lookup_feature(svm_features, flagname, NULL, svm_feature_name) &&
239 !lookup_feature(cpuid_7_0_ebx_features, flagname, NULL,
240 cpuid_7_0_ebx_feature_name))
241 fprintf(stderr, "CPU feature %s not found\n", flagname);
244 typedef struct x86_def_t {
245 struct x86_def_t *next;
248 uint32_t vendor1, vendor2, vendor3;
253 uint32_t features, ext_features, ext2_features, ext3_features;
254 uint32_t kvm_features, svm_features;
258 /* Store the results of Centaur's CPUID instructions */
259 uint32_t ext4_features;
261 /* The feature bits on CPUID[EAX=7,ECX=0].EBX */
262 uint32_t cpuid_7_0_ebx_features;
265 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
266 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
267 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
268 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
269 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
270 CPUID_PSE36 | CPUID_FXSR)
271 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
272 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
273 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
274 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
275 CPUID_PAE | CPUID_SEP | CPUID_APIC)
277 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
278 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
279 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
280 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
281 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
282 /* partly implemented:
283 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64)
284 CPUID_PSE36 (needed for Solaris) */
286 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
287 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | \
288 CPUID_EXT_CX16 | CPUID_EXT_POPCNT | \
289 CPUID_EXT_HYPERVISOR)
291 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
292 CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_XSAVE */
293 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
294 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
295 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT)
297 CPUID_EXT2_PDPE1GB */
298 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
299 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
300 #define TCG_SVM_FEATURES 0
301 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP)
303 /* maintains list of cpu model definitions
305 static x86_def_t *x86_defs = {NULL};
307 /* built-in cpu model definitions (deprecated)
309 static x86_def_t builtin_x86_defs[] = {
313 .vendor1 = CPUID_VENDOR_AMD_1,
314 .vendor2 = CPUID_VENDOR_AMD_2,
315 .vendor3 = CPUID_VENDOR_AMD_3,
319 .features = PPRO_FEATURES |
320 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
322 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
323 .ext2_features = (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
324 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
325 .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
326 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
327 .xlevel = 0x8000000A,
332 .vendor1 = CPUID_VENDOR_AMD_1,
333 .vendor2 = CPUID_VENDOR_AMD_2,
334 .vendor3 = CPUID_VENDOR_AMD_3,
338 .features = PPRO_FEATURES |
339 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
340 CPUID_PSE36 | CPUID_VME | CPUID_HT,
341 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
343 .ext2_features = (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
344 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
345 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
346 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
347 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
349 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
350 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
351 .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
352 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
353 .svm_features = CPUID_SVM_NPT | CPUID_SVM_LBRV,
354 .xlevel = 0x8000001A,
355 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
363 .features = PPRO_FEATURES |
364 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
365 CPUID_PSE36 | CPUID_VME | CPUID_DTS | CPUID_ACPI | CPUID_SS |
366 CPUID_HT | CPUID_TM | CPUID_PBE,
367 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
368 CPUID_EXT_DTES64 | CPUID_EXT_DSCPL | CPUID_EXT_VMX | CPUID_EXT_EST |
369 CPUID_EXT_TM2 | CPUID_EXT_CX16 | CPUID_EXT_XTPR | CPUID_EXT_PDCM,
370 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
371 .ext3_features = CPUID_EXT3_LAHF_LM,
372 .xlevel = 0x80000008,
373 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
378 .vendor1 = CPUID_VENDOR_INTEL_1,
379 .vendor2 = CPUID_VENDOR_INTEL_2,
380 .vendor3 = CPUID_VENDOR_INTEL_3,
384 /* Missing: CPUID_VME, CPUID_HT */
385 .features = PPRO_FEATURES |
386 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
388 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
389 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16,
390 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
391 .ext2_features = (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
392 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
393 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
394 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
395 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
396 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
398 .xlevel = 0x80000008,
399 .model_id = "Common KVM processor"
407 .features = PPRO_FEATURES,
408 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
409 .xlevel = 0x80000004,
417 .features = PPRO_FEATURES |
418 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
419 .ext_features = CPUID_EXT_SSE3,
420 .ext2_features = PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES,
422 .xlevel = 0x80000008,
423 .model_id = "Common 32-bit KVM processor"
431 .features = PPRO_FEATURES | CPUID_VME |
432 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_DTS | CPUID_ACPI |
433 CPUID_SS | CPUID_HT | CPUID_TM | CPUID_PBE,
434 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_VMX |
435 CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR | CPUID_EXT_PDCM,
436 .ext2_features = CPUID_EXT2_NX,
437 .xlevel = 0x80000008,
438 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
446 .features = I486_FEATURES,
455 .features = PENTIUM_FEATURES,
464 .features = PENTIUM2_FEATURES,
473 .features = PENTIUM3_FEATURES,
479 .vendor1 = CPUID_VENDOR_AMD_1,
480 .vendor2 = CPUID_VENDOR_AMD_2,
481 .vendor3 = CPUID_VENDOR_AMD_3,
485 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
487 .ext2_features = (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
488 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
489 .xlevel = 0x80000008,
493 /* original is on level 10 */
498 .features = PPRO_FEATURES |
499 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | CPUID_DTS |
500 CPUID_ACPI | CPUID_SS | CPUID_HT | CPUID_TM | CPUID_PBE,
501 /* Some CPUs got no CPUID_SEP */
502 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
503 CPUID_EXT_DSCPL | CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR,
504 .ext2_features = (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
506 .ext3_features = CPUID_EXT3_LAHF_LM,
507 .xlevel = 0x8000000A,
508 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
513 .vendor1 = CPUID_VENDOR_INTEL_1,
514 .vendor2 = CPUID_VENDOR_INTEL_2,
515 .vendor3 = CPUID_VENDOR_INTEL_3,
519 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
520 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
521 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
522 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
523 CPUID_DE | CPUID_FP87,
524 .ext_features = CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
525 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
526 .ext3_features = CPUID_EXT3_LAHF_LM,
527 .xlevel = 0x8000000A,
528 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
533 .vendor1 = CPUID_VENDOR_INTEL_1,
534 .vendor2 = CPUID_VENDOR_INTEL_2,
535 .vendor3 = CPUID_VENDOR_INTEL_3,
539 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
540 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
541 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
542 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
543 CPUID_DE | CPUID_FP87,
544 .ext_features = CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
546 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
547 .ext3_features = CPUID_EXT3_LAHF_LM,
548 .xlevel = 0x8000000A,
549 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
554 .vendor1 = CPUID_VENDOR_INTEL_1,
555 .vendor2 = CPUID_VENDOR_INTEL_2,
556 .vendor3 = CPUID_VENDOR_INTEL_3,
560 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
561 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
562 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
563 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
564 CPUID_DE | CPUID_FP87,
565 .ext_features = CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
566 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
567 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
568 .ext3_features = CPUID_EXT3_LAHF_LM,
569 .xlevel = 0x8000000A,
570 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
575 .vendor1 = CPUID_VENDOR_INTEL_1,
576 .vendor2 = CPUID_VENDOR_INTEL_2,
577 .vendor3 = CPUID_VENDOR_INTEL_3,
581 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
582 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
583 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
584 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
585 CPUID_DE | CPUID_FP87,
586 .ext_features = CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
587 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
589 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
590 .ext3_features = CPUID_EXT3_LAHF_LM,
591 .xlevel = 0x8000000A,
592 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
595 .name = "SandyBridge",
597 .vendor1 = CPUID_VENDOR_INTEL_1,
598 .vendor2 = CPUID_VENDOR_INTEL_2,
599 .vendor3 = CPUID_VENDOR_INTEL_3,
603 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
604 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
605 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
606 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
607 CPUID_DE | CPUID_FP87,
608 .ext_features = CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
609 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
610 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
611 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
613 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
615 .ext3_features = CPUID_EXT3_LAHF_LM,
616 .xlevel = 0x8000000A,
617 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
620 .name = "Opteron_G1",
622 .vendor1 = CPUID_VENDOR_AMD_1,
623 .vendor2 = CPUID_VENDOR_AMD_2,
624 .vendor3 = CPUID_VENDOR_AMD_3,
628 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
629 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
630 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
631 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
632 CPUID_DE | CPUID_FP87,
633 .ext_features = CPUID_EXT_SSE3,
634 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
635 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
636 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
637 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
638 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
639 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
640 .xlevel = 0x80000008,
641 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
644 .name = "Opteron_G2",
646 .vendor1 = CPUID_VENDOR_AMD_1,
647 .vendor2 = CPUID_VENDOR_AMD_2,
648 .vendor3 = CPUID_VENDOR_AMD_3,
652 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
653 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
654 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
655 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
656 CPUID_DE | CPUID_FP87,
657 .ext_features = CPUID_EXT_CX16 | CPUID_EXT_SSE3,
658 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
659 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
660 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
661 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
662 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
663 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
664 CPUID_EXT2_DE | CPUID_EXT2_FPU,
665 .ext3_features = CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
666 .xlevel = 0x80000008,
667 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
670 .name = "Opteron_G3",
672 .vendor1 = CPUID_VENDOR_AMD_1,
673 .vendor2 = CPUID_VENDOR_AMD_2,
674 .vendor3 = CPUID_VENDOR_AMD_3,
678 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
679 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
680 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
681 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
682 CPUID_DE | CPUID_FP87,
683 .ext_features = CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
685 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
686 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
687 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
688 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
689 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
690 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
691 CPUID_EXT2_DE | CPUID_EXT2_FPU,
692 .ext3_features = CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
693 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
694 .xlevel = 0x80000008,
695 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
698 .name = "Opteron_G4",
700 .vendor1 = CPUID_VENDOR_AMD_1,
701 .vendor2 = CPUID_VENDOR_AMD_2,
702 .vendor3 = CPUID_VENDOR_AMD_3,
706 .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
707 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
708 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
709 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
710 CPUID_DE | CPUID_FP87,
711 .ext_features = CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
712 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
713 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
715 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
716 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
717 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
718 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
719 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
720 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
721 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
722 .ext3_features = CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
723 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
724 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
726 .xlevel = 0x8000001A,
727 .model_id = "AMD Opteron 62xx class CPU",
731 static int cpu_x86_fill_model_id(char *str)
733 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
736 for (i = 0; i < 3; i++) {
737 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
738 memcpy(str + i * 16 + 0, &eax, 4);
739 memcpy(str + i * 16 + 4, &ebx, 4);
740 memcpy(str + i * 16 + 8, &ecx, 4);
741 memcpy(str + i * 16 + 12, &edx, 4);
746 static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
748 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
750 x86_cpu_def->name = "host";
751 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
752 x86_cpu_def->level = eax;
753 x86_cpu_def->vendor1 = ebx;
754 x86_cpu_def->vendor2 = edx;
755 x86_cpu_def->vendor3 = ecx;
757 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
758 x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
759 x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
760 x86_cpu_def->stepping = eax & 0x0F;
761 x86_cpu_def->ext_features = ecx;
762 x86_cpu_def->features = edx;
764 if (kvm_enabled() && x86_cpu_def->level >= 7) {
765 x86_cpu_def->cpuid_7_0_ebx_features = kvm_arch_get_supported_cpuid(kvm_state, 0x7, 0, R_EBX);
767 x86_cpu_def->cpuid_7_0_ebx_features = 0;
770 host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);
771 x86_cpu_def->xlevel = eax;
773 host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);
774 x86_cpu_def->ext2_features = edx;
775 x86_cpu_def->ext3_features = ecx;
776 cpu_x86_fill_model_id(x86_cpu_def->model_id);
777 x86_cpu_def->vendor_override = 0;
779 /* Call Centaur's CPUID instruction. */
780 if (x86_cpu_def->vendor1 == CPUID_VENDOR_VIA_1 &&
781 x86_cpu_def->vendor2 == CPUID_VENDOR_VIA_2 &&
782 x86_cpu_def->vendor3 == CPUID_VENDOR_VIA_3) {
783 host_cpuid(0xC0000000, 0, &eax, &ebx, &ecx, &edx);
784 if (eax >= 0xC0000001) {
785 /* Support VIA max extended level */
786 x86_cpu_def->xlevel2 = eax;
787 host_cpuid(0xC0000001, 0, &eax, &ebx, &ecx, &edx);
788 x86_cpu_def->ext4_features = edx;
793 * Every SVM feature requires emulation support in KVM - so we can't just
794 * read the host features here. KVM might even support SVM features not
795 * available on the host hardware. Just set all bits and mask out the
796 * unsupported ones later.
798 x86_cpu_def->svm_features = -1;
803 static int unavailable_host_feature(struct model_features_t *f, uint32_t mask)
807 for (i = 0; i < 32; ++i)
809 fprintf(stderr, "warning: host cpuid %04x_%04x lacks requested"
810 " flag '%s' [0x%08x]\n",
811 f->cpuid >> 16, f->cpuid & 0xffff,
812 f->flag_names[i] ? f->flag_names[i] : "[reserved]", mask);
818 /* best effort attempt to inform user requested cpu flags aren't making
819 * their way to the guest. Note: ft[].check_feat ideally should be
820 * specified via a guest_def field to suppress report of extraneous flags.
822 static int check_features_against_host(x86_def_t *guest_def)
827 struct model_features_t ft[] = {
828 {&guest_def->features, &host_def.features,
829 ~0, feature_name, 0x00000000},
830 {&guest_def->ext_features, &host_def.ext_features,
831 ~CPUID_EXT_HYPERVISOR, ext_feature_name, 0x00000001},
832 {&guest_def->ext2_features, &host_def.ext2_features,
833 ~PPRO_FEATURES, ext2_feature_name, 0x80000000},
834 {&guest_def->ext3_features, &host_def.ext3_features,
835 ~CPUID_EXT3_SVM, ext3_feature_name, 0x80000001}};
837 cpu_x86_fill_host(&host_def);
838 for (rv = 0, i = 0; i < ARRAY_SIZE(ft); ++i)
839 for (mask = 1; mask; mask <<= 1)
840 if (ft[i].check_feat & mask && *ft[i].guest_feat & mask &&
841 !(*ft[i].host_feat & mask)) {
842 unavailable_host_feature(&ft[i], mask);
848 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
849 const char *name, Error **errp)
851 X86CPU *cpu = X86_CPU(obj);
852 CPUX86State *env = &cpu->env;
855 value = (env->cpuid_version >> 8) & 0xf;
857 value += (env->cpuid_version >> 20) & 0xff;
859 visit_type_int(v, &value, name, errp);
862 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
863 const char *name, Error **errp)
865 X86CPU *cpu = X86_CPU(obj);
866 CPUX86State *env = &cpu->env;
867 const int64_t min = 0;
868 const int64_t max = 0xff + 0xf;
871 visit_type_int(v, &value, name, errp);
872 if (error_is_set(errp)) {
875 if (value < min || value > max) {
876 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
877 name ? name : "null", value, min, max);
881 env->cpuid_version &= ~0xff00f00;
883 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
885 env->cpuid_version |= value << 8;
889 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
890 const char *name, Error **errp)
892 X86CPU *cpu = X86_CPU(obj);
893 CPUX86State *env = &cpu->env;
896 value = (env->cpuid_version >> 4) & 0xf;
897 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
898 visit_type_int(v, &value, name, errp);
901 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
902 const char *name, Error **errp)
904 X86CPU *cpu = X86_CPU(obj);
905 CPUX86State *env = &cpu->env;
906 const int64_t min = 0;
907 const int64_t max = 0xff;
910 visit_type_int(v, &value, name, errp);
911 if (error_is_set(errp)) {
914 if (value < min || value > max) {
915 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
916 name ? name : "null", value, min, max);
920 env->cpuid_version &= ~0xf00f0;
921 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
924 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
925 void *opaque, const char *name,
928 X86CPU *cpu = X86_CPU(obj);
929 CPUX86State *env = &cpu->env;
932 value = env->cpuid_version & 0xf;
933 visit_type_int(v, &value, name, errp);
936 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
937 void *opaque, const char *name,
940 X86CPU *cpu = X86_CPU(obj);
941 CPUX86State *env = &cpu->env;
942 const int64_t min = 0;
943 const int64_t max = 0xf;
946 visit_type_int(v, &value, name, errp);
947 if (error_is_set(errp)) {
950 if (value < min || value > max) {
951 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
952 name ? name : "null", value, min, max);
956 env->cpuid_version &= ~0xf;
957 env->cpuid_version |= value & 0xf;
960 static void x86_cpuid_get_level(Object *obj, Visitor *v, void *opaque,
961 const char *name, Error **errp)
963 X86CPU *cpu = X86_CPU(obj);
965 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
968 static void x86_cpuid_set_level(Object *obj, Visitor *v, void *opaque,
969 const char *name, Error **errp)
971 X86CPU *cpu = X86_CPU(obj);
973 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
976 static void x86_cpuid_get_xlevel(Object *obj, Visitor *v, void *opaque,
977 const char *name, Error **errp)
979 X86CPU *cpu = X86_CPU(obj);
981 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
984 static void x86_cpuid_set_xlevel(Object *obj, Visitor *v, void *opaque,
985 const char *name, Error **errp)
987 X86CPU *cpu = X86_CPU(obj);
989 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
992 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
994 X86CPU *cpu = X86_CPU(obj);
995 CPUX86State *env = &cpu->env;
999 value = (char *)g_malloc(12 + 1);
1000 for (i = 0; i < 4; i++) {
1001 value[i ] = env->cpuid_vendor1 >> (8 * i);
1002 value[i + 4] = env->cpuid_vendor2 >> (8 * i);
1003 value[i + 8] = env->cpuid_vendor3 >> (8 * i);
1009 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1012 X86CPU *cpu = X86_CPU(obj);
1013 CPUX86State *env = &cpu->env;
1016 if (strlen(value) != 12) {
1017 error_set(errp, QERR_PROPERTY_VALUE_BAD, "",
1022 env->cpuid_vendor1 = 0;
1023 env->cpuid_vendor2 = 0;
1024 env->cpuid_vendor3 = 0;
1025 for (i = 0; i < 4; i++) {
1026 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1027 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1028 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1030 env->cpuid_vendor_override = 1;
1033 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1035 X86CPU *cpu = X86_CPU(obj);
1036 CPUX86State *env = &cpu->env;
1040 value = g_malloc(48 + 1);
1041 for (i = 0; i < 48; i++) {
1042 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1048 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1051 X86CPU *cpu = X86_CPU(obj);
1052 CPUX86State *env = &cpu->env;
1055 if (model_id == NULL) {
1058 len = strlen(model_id);
1059 memset(env->cpuid_model, 0, 48);
1060 for (i = 0; i < 48; i++) {
1064 c = (uint8_t)model_id[i];
1066 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1070 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1071 const char *name, Error **errp)
1073 X86CPU *cpu = X86_CPU(obj);
1076 value = cpu->env.tsc_khz * 1000;
1077 visit_type_int(v, &value, name, errp);
1080 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1081 const char *name, Error **errp)
1083 X86CPU *cpu = X86_CPU(obj);
1084 const int64_t min = 0;
1085 const int64_t max = INT64_MAX;
1088 visit_type_int(v, &value, name, errp);
1089 if (error_is_set(errp)) {
1092 if (value < min || value > max) {
1093 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1094 name ? name : "null", value, min, max);
1098 cpu->env.tsc_khz = value / 1000;
1101 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
1106 char *s = g_strdup(cpu_model);
1107 char *featurestr, *name = strtok(s, ",");
1108 /* Features to be added*/
1109 uint32_t plus_features = 0, plus_ext_features = 0;
1110 uint32_t plus_ext2_features = 0, plus_ext3_features = 0;
1111 uint32_t plus_kvm_features = 0, plus_svm_features = 0;
1112 uint32_t plus_7_0_ebx_features = 0;
1113 /* Features to be removed */
1114 uint32_t minus_features = 0, minus_ext_features = 0;
1115 uint32_t minus_ext2_features = 0, minus_ext3_features = 0;
1116 uint32_t minus_kvm_features = 0, minus_svm_features = 0;
1117 uint32_t minus_7_0_ebx_features = 0;
1120 for (def = x86_defs; def; def = def->next)
1121 if (name && !strcmp(name, def->name))
1123 if (kvm_enabled() && name && strcmp(name, "host") == 0) {
1124 cpu_x86_fill_host(x86_cpu_def);
1128 memcpy(x86_cpu_def, def, sizeof(*def));
1131 #if defined(CONFIG_KVM)
1132 plus_kvm_features = (1 << KVM_FEATURE_CLOCKSOURCE) |
1133 (1 << KVM_FEATURE_NOP_IO_DELAY) |
1134 (1 << KVM_FEATURE_MMU_OP) |
1135 (1 << KVM_FEATURE_CLOCKSOURCE2) |
1136 (1 << KVM_FEATURE_ASYNC_PF) |
1137 (1 << KVM_FEATURE_STEAL_TIME) |
1138 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
1140 plus_kvm_features = 0;
1143 add_flagname_to_bitmaps("hypervisor", &plus_features,
1144 &plus_ext_features, &plus_ext2_features, &plus_ext3_features,
1145 &plus_kvm_features, &plus_svm_features, &plus_7_0_ebx_features);
1147 featurestr = strtok(NULL, ",");
1149 while (featurestr) {
1151 if (featurestr[0] == '+') {
1152 add_flagname_to_bitmaps(featurestr + 1, &plus_features,
1153 &plus_ext_features, &plus_ext2_features,
1154 &plus_ext3_features, &plus_kvm_features,
1155 &plus_svm_features, &plus_7_0_ebx_features);
1156 } else if (featurestr[0] == '-') {
1157 add_flagname_to_bitmaps(featurestr + 1, &minus_features,
1158 &minus_ext_features, &minus_ext2_features,
1159 &minus_ext3_features, &minus_kvm_features,
1160 &minus_svm_features, &minus_7_0_ebx_features);
1161 } else if ((val = strchr(featurestr, '='))) {
1163 if (!strcmp(featurestr, "family")) {
1165 numvalue = strtoul(val, &err, 0);
1166 if (!*val || *err || numvalue > 0xff + 0xf) {
1167 fprintf(stderr, "bad numerical value %s\n", val);
1170 x86_cpu_def->family = numvalue;
1171 } else if (!strcmp(featurestr, "model")) {
1173 numvalue = strtoul(val, &err, 0);
1174 if (!*val || *err || numvalue > 0xff) {
1175 fprintf(stderr, "bad numerical value %s\n", val);
1178 x86_cpu_def->model = numvalue;
1179 } else if (!strcmp(featurestr, "stepping")) {
1181 numvalue = strtoul(val, &err, 0);
1182 if (!*val || *err || numvalue > 0xf) {
1183 fprintf(stderr, "bad numerical value %s\n", val);
1186 x86_cpu_def->stepping = numvalue ;
1187 } else if (!strcmp(featurestr, "level")) {
1189 numvalue = strtoul(val, &err, 0);
1190 if (!*val || *err) {
1191 fprintf(stderr, "bad numerical value %s\n", val);
1194 x86_cpu_def->level = numvalue;
1195 } else if (!strcmp(featurestr, "xlevel")) {
1197 numvalue = strtoul(val, &err, 0);
1198 if (!*val || *err) {
1199 fprintf(stderr, "bad numerical value %s\n", val);
1202 if (numvalue < 0x80000000) {
1203 numvalue += 0x80000000;
1205 x86_cpu_def->xlevel = numvalue;
1206 } else if (!strcmp(featurestr, "vendor")) {
1207 if (strlen(val) != 12) {
1208 fprintf(stderr, "vendor string must be 12 chars long\n");
1211 x86_cpu_def->vendor1 = 0;
1212 x86_cpu_def->vendor2 = 0;
1213 x86_cpu_def->vendor3 = 0;
1214 for(i = 0; i < 4; i++) {
1215 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
1216 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
1217 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
1219 x86_cpu_def->vendor_override = 1;
1220 } else if (!strcmp(featurestr, "model_id")) {
1221 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
1223 } else if (!strcmp(featurestr, "tsc_freq")) {
1227 tsc_freq = strtosz_suffix_unit(val, &err,
1228 STRTOSZ_DEFSUFFIX_B, 1000);
1229 if (tsc_freq < 0 || *err) {
1230 fprintf(stderr, "bad numerical value %s\n", val);
1233 x86_cpu_def->tsc_khz = tsc_freq / 1000;
1234 } else if (!strcmp(featurestr, "hv_spinlocks")) {
1236 numvalue = strtoul(val, &err, 0);
1237 if (!*val || *err) {
1238 fprintf(stderr, "bad numerical value %s\n", val);
1241 hyperv_set_spinlock_retries(numvalue);
1243 fprintf(stderr, "unrecognized feature %s\n", featurestr);
1246 } else if (!strcmp(featurestr, "check")) {
1248 } else if (!strcmp(featurestr, "enforce")) {
1249 check_cpuid = enforce_cpuid = 1;
1250 } else if (!strcmp(featurestr, "hv_relaxed")) {
1251 hyperv_enable_relaxed_timing(true);
1252 } else if (!strcmp(featurestr, "hv_vapic")) {
1253 hyperv_enable_vapic_recommended(true);
1255 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
1258 featurestr = strtok(NULL, ",");
1260 x86_cpu_def->features |= plus_features;
1261 x86_cpu_def->ext_features |= plus_ext_features;
1262 x86_cpu_def->ext2_features |= plus_ext2_features;
1263 x86_cpu_def->ext3_features |= plus_ext3_features;
1264 x86_cpu_def->kvm_features |= plus_kvm_features;
1265 x86_cpu_def->svm_features |= plus_svm_features;
1266 x86_cpu_def->cpuid_7_0_ebx_features |= plus_7_0_ebx_features;
1267 x86_cpu_def->features &= ~minus_features;
1268 x86_cpu_def->ext_features &= ~minus_ext_features;
1269 x86_cpu_def->ext2_features &= ~minus_ext2_features;
1270 x86_cpu_def->ext3_features &= ~minus_ext3_features;
1271 x86_cpu_def->kvm_features &= ~minus_kvm_features;
1272 x86_cpu_def->svm_features &= ~minus_svm_features;
1273 x86_cpu_def->cpuid_7_0_ebx_features &= ~minus_7_0_ebx_features;
1275 if (check_features_against_host(x86_cpu_def) && enforce_cpuid)
1278 if (x86_cpu_def->cpuid_7_0_ebx_features && x86_cpu_def->level < 7) {
1279 x86_cpu_def->level = 7;
1289 /* generate a composite string into buf of all cpuid names in featureset
1290 * selected by fbits. indicate truncation at bufsize in the event of overflow.
1291 * if flags, suppress names undefined in featureset.
1293 static void listflags(char *buf, int bufsize, uint32_t fbits,
1294 const char **featureset, uint32_t flags)
1296 const char **p = &featureset[31];
1300 b = 4 <= bufsize ? buf + (bufsize -= 3) - 1 : NULL;
1302 for (q = buf, bit = 31; fbits && bufsize; --p, fbits &= ~(1 << bit), --bit)
1303 if (fbits & 1 << bit && (*p || !flags)) {
1305 nc = snprintf(q, bufsize, "%s%s", q == buf ? "" : " ", *p);
1307 nc = snprintf(q, bufsize, "%s[%d]", q == buf ? "" : " ", bit);
1308 if (bufsize <= nc) {
1310 memcpy(b, "...", sizeof("..."));
1319 /* generate CPU information. */
1320 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1325 for (def = x86_defs; def; def = def->next) {
1326 snprintf(buf, sizeof(buf), "%s", def->name);
1327 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1329 if (kvm_enabled()) {
1330 (*cpu_fprintf)(f, "x86 %16s\n", "[host]");
1332 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
1333 listflags(buf, sizeof(buf), (uint32_t)~0, feature_name, 1);
1334 (*cpu_fprintf)(f, " %s\n", buf);
1335 listflags(buf, sizeof(buf), (uint32_t)~0, ext_feature_name, 1);
1336 (*cpu_fprintf)(f, " %s\n", buf);
1337 listflags(buf, sizeof(buf), (uint32_t)~0, ext2_feature_name, 1);
1338 (*cpu_fprintf)(f, " %s\n", buf);
1339 listflags(buf, sizeof(buf), (uint32_t)~0, ext3_feature_name, 1);
1340 (*cpu_fprintf)(f, " %s\n", buf);
1343 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
1345 CpuDefinitionInfoList *cpu_list = NULL;
1348 for (def = x86_defs; def; def = def->next) {
1349 CpuDefinitionInfoList *entry;
1350 CpuDefinitionInfo *info;
1352 info = g_malloc0(sizeof(*info));
1353 info->name = g_strdup(def->name);
1355 entry = g_malloc0(sizeof(*entry));
1356 entry->value = info;
1357 entry->next = cpu_list;
1364 int cpu_x86_register(X86CPU *cpu, const char *cpu_model)
1366 CPUX86State *env = &cpu->env;
1367 x86_def_t def1, *def = &def1;
1368 Error *error = NULL;
1370 memset(def, 0, sizeof(*def));
1372 if (cpu_x86_find_by_name(def, cpu_model) < 0)
1375 env->cpuid_vendor1 = def->vendor1;
1376 env->cpuid_vendor2 = def->vendor2;
1377 env->cpuid_vendor3 = def->vendor3;
1379 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
1380 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
1381 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
1383 env->cpuid_vendor_override = def->vendor_override;
1384 object_property_set_int(OBJECT(cpu), def->level, "level", &error);
1385 object_property_set_int(OBJECT(cpu), def->family, "family", &error);
1386 object_property_set_int(OBJECT(cpu), def->model, "model", &error);
1387 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", &error);
1388 env->cpuid_features = def->features;
1389 env->cpuid_ext_features = def->ext_features;
1390 env->cpuid_ext2_features = def->ext2_features;
1391 env->cpuid_ext3_features = def->ext3_features;
1392 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", &error);
1393 env->cpuid_kvm_features = def->kvm_features;
1394 env->cpuid_svm_features = def->svm_features;
1395 env->cpuid_ext4_features = def->ext4_features;
1396 env->cpuid_7_0_ebx_features = def->cpuid_7_0_ebx_features;
1397 env->cpuid_xlevel2 = def->xlevel2;
1398 object_property_set_int(OBJECT(cpu), (int64_t)def->tsc_khz * 1000,
1399 "tsc-frequency", &error);
1401 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
1404 if (env->cpuid_vendor1 == CPUID_VENDOR_AMD_1 &&
1405 env->cpuid_vendor2 == CPUID_VENDOR_AMD_2 &&
1406 env->cpuid_vendor3 == CPUID_VENDOR_AMD_3) {
1407 env->cpuid_ext2_features &= ~CPUID_EXT2_AMD_ALIASES;
1408 env->cpuid_ext2_features |= (def->features & CPUID_EXT2_AMD_ALIASES);
1411 if (!kvm_enabled()) {
1412 env->cpuid_features &= TCG_FEATURES;
1413 env->cpuid_ext_features &= TCG_EXT_FEATURES;
1414 env->cpuid_ext2_features &= (TCG_EXT2_FEATURES
1415 #ifdef TARGET_X86_64
1416 | CPUID_EXT2_SYSCALL | CPUID_EXT2_LM
1419 env->cpuid_ext3_features &= TCG_EXT3_FEATURES;
1420 env->cpuid_svm_features &= TCG_SVM_FEATURES;
1422 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", &error);
1423 if (error_is_set(&error)) {
1430 #if !defined(CONFIG_USER_ONLY)
1432 void cpu_clear_apic_feature(CPUX86State *env)
1434 env->cpuid_features &= ~CPUID_APIC;
1437 #endif /* !CONFIG_USER_ONLY */
1439 /* Initialize list of CPU models, filling some non-static fields if necessary
1441 void x86_cpudef_setup(void)
1444 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
1446 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
1447 x86_def_t *def = &builtin_x86_defs[i];
1448 def->next = x86_defs;
1450 /* Look for specific "cpudef" models that */
1451 /* have the QEMU version in .model_id */
1452 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
1453 if (strcmp(model_with_versions[j], def->name) == 0) {
1454 pstrcpy(def->model_id, sizeof(def->model_id),
1455 "QEMU Virtual CPU version ");
1456 pstrcat(def->model_id, sizeof(def->model_id),
1457 qemu_get_version());
1466 static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx,
1467 uint32_t *ecx, uint32_t *edx)
1469 *ebx = env->cpuid_vendor1;
1470 *edx = env->cpuid_vendor2;
1471 *ecx = env->cpuid_vendor3;
1473 /* sysenter isn't supported on compatibility mode on AMD, syscall
1474 * isn't supported in compatibility mode on Intel.
1475 * Normally we advertise the actual cpu vendor, but you can override
1476 * this if you want to use KVM's sysenter/syscall emulation
1477 * in compatibility mode and when doing cross vendor migration
1479 if (kvm_enabled() && ! env->cpuid_vendor_override) {
1480 host_cpuid(0, 0, NULL, ebx, ecx, edx);
1484 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1485 uint32_t *eax, uint32_t *ebx,
1486 uint32_t *ecx, uint32_t *edx)
1488 /* test if maximum index reached */
1489 if (index & 0x80000000) {
1490 if (index > env->cpuid_xlevel) {
1491 if (env->cpuid_xlevel2 > 0) {
1492 /* Handle the Centaur's CPUID instruction. */
1493 if (index > env->cpuid_xlevel2) {
1494 index = env->cpuid_xlevel2;
1495 } else if (index < 0xC0000000) {
1496 index = env->cpuid_xlevel;
1499 index = env->cpuid_xlevel;
1503 if (index > env->cpuid_level)
1504 index = env->cpuid_level;
1509 *eax = env->cpuid_level;
1510 get_cpuid_vendor(env, ebx, ecx, edx);
1513 *eax = env->cpuid_version;
1514 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1515 *ecx = env->cpuid_ext_features;
1516 *edx = env->cpuid_features;
1517 if (env->nr_cores * env->nr_threads > 1) {
1518 *ebx |= (env->nr_cores * env->nr_threads) << 16;
1519 *edx |= 1 << 28; /* HTT bit */
1523 /* cache info: needed for Pentium Pro compatibility */
1530 /* cache info: needed for Core compatibility */
1531 if (env->nr_cores > 1) {
1532 *eax = (env->nr_cores - 1) << 26;
1537 case 0: /* L1 dcache info */
1543 case 1: /* L1 icache info */
1549 case 2: /* L2 cache info */
1551 if (env->nr_threads > 1) {
1552 *eax |= (env->nr_threads - 1) << 14;
1558 default: /* end of info */
1567 /* mwait info: needed for Core compatibility */
1568 *eax = 0; /* Smallest monitor-line size in bytes */
1569 *ebx = 0; /* Largest monitor-line size in bytes */
1570 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1574 /* Thermal and Power Leaf */
1581 /* Structured Extended Feature Flags Enumeration Leaf */
1583 *eax = 0; /* Maximum ECX value for sub-leaves */
1584 *ebx = env->cpuid_7_0_ebx_features; /* Feature flags */
1585 *ecx = 0; /* Reserved */
1586 *edx = 0; /* Reserved */
1595 /* Direct Cache Access Information Leaf */
1596 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1602 /* Architectural Performance Monitoring Leaf */
1603 if (kvm_enabled()) {
1604 KVMState *s = env->kvm_state;
1606 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
1607 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
1608 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
1609 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
1618 /* Processor Extended State */
1619 if (!(env->cpuid_ext_features & CPUID_EXT_XSAVE)) {
1626 if (kvm_enabled()) {
1627 KVMState *s = env->kvm_state;
1629 *eax = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EAX);
1630 *ebx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EBX);
1631 *ecx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_ECX);
1632 *edx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EDX);
1641 *eax = env->cpuid_xlevel;
1642 *ebx = env->cpuid_vendor1;
1643 *edx = env->cpuid_vendor2;
1644 *ecx = env->cpuid_vendor3;
1647 *eax = env->cpuid_version;
1649 *ecx = env->cpuid_ext3_features;
1650 *edx = env->cpuid_ext2_features;
1652 /* The Linux kernel checks for the CMPLegacy bit and
1653 * discards multiple thread information if it is set.
1654 * So dont set it here for Intel to make Linux guests happy.
1656 if (env->nr_cores * env->nr_threads > 1) {
1657 uint32_t tebx, tecx, tedx;
1658 get_cpuid_vendor(env, &tebx, &tecx, &tedx);
1659 if (tebx != CPUID_VENDOR_INTEL_1 ||
1660 tedx != CPUID_VENDOR_INTEL_2 ||
1661 tecx != CPUID_VENDOR_INTEL_3) {
1662 *ecx |= 1 << 1; /* CmpLegacy bit */
1669 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1670 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1671 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1672 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1675 /* cache info (L1 cache) */
1682 /* cache info (L2 cache) */
1689 /* virtual & phys address size in low 2 bytes. */
1690 /* XXX: This value must match the one used in the MMU code. */
1691 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1692 /* 64 bit processor */
1693 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1694 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1696 if (env->cpuid_features & CPUID_PSE36)
1697 *eax = 0x00000024; /* 36 bits physical */
1699 *eax = 0x00000020; /* 32 bits physical */
1704 if (env->nr_cores * env->nr_threads > 1) {
1705 *ecx |= (env->nr_cores * env->nr_threads) - 1;
1709 if (env->cpuid_ext3_features & CPUID_EXT3_SVM) {
1710 *eax = 0x00000001; /* SVM Revision */
1711 *ebx = 0x00000010; /* nr of ASIDs */
1713 *edx = env->cpuid_svm_features; /* optional features */
1722 *eax = env->cpuid_xlevel2;
1728 /* Support for VIA CPU's CPUID instruction */
1729 *eax = env->cpuid_version;
1732 *edx = env->cpuid_ext4_features;
1737 /* Reserved for the future, and now filled with zero */
1744 /* reserved values: zero */
1753 /* CPUClass::reset() */
1754 static void x86_cpu_reset(CPUState *s)
1756 X86CPU *cpu = X86_CPU(s);
1757 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
1758 CPUX86State *env = &cpu->env;
1761 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
1762 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
1763 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
1766 xcc->parent_reset(s);
1769 memset(env, 0, offsetof(CPUX86State, breakpoints));
1773 env->old_exception = -1;
1775 /* init to reset state */
1777 #ifdef CONFIG_SOFTMMU
1778 env->hflags |= HF_SOFTMMU_MASK;
1780 env->hflags2 |= HF2_GIF_MASK;
1782 cpu_x86_update_cr0(env, 0x60000010);
1783 env->a20_mask = ~0x0;
1784 env->smbase = 0x30000;
1786 env->idt.limit = 0xffff;
1787 env->gdt.limit = 0xffff;
1788 env->ldt.limit = 0xffff;
1789 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
1790 env->tr.limit = 0xffff;
1791 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
1793 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
1794 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
1795 DESC_R_MASK | DESC_A_MASK);
1796 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
1797 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
1799 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
1800 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
1802 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
1803 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
1805 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
1806 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
1808 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
1809 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
1813 env->regs[R_EDX] = env->cpuid_version;
1818 for (i = 0; i < 8; i++) {
1823 env->mxcsr = 0x1f80;
1825 env->pat = 0x0007040600070406ULL;
1826 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
1828 memset(env->dr, 0, sizeof(env->dr));
1829 env->dr[6] = DR6_FIXED_1;
1830 env->dr[7] = DR7_FIXED_1;
1831 cpu_breakpoint_remove_all(env, BP_CPU);
1832 cpu_watchpoint_remove_all(env, BP_CPU);
1834 #if !defined(CONFIG_USER_ONLY)
1835 /* We hard-wire the BSP to the first CPU. */
1836 if (env->cpu_index == 0) {
1837 apic_designate_bsp(env->apic_state);
1840 env->halted = !cpu_is_bsp(cpu);
1844 #ifndef CONFIG_USER_ONLY
1845 bool cpu_is_bsp(X86CPU *cpu)
1847 return cpu_get_apic_base(cpu->env.apic_state) & MSR_IA32_APICBASE_BSP;
1850 /* TODO: remove me, when reset over QOM tree is implemented */
1851 static void x86_cpu_machine_reset_cb(void *opaque)
1853 X86CPU *cpu = opaque;
1854 cpu_reset(CPU(cpu));
1858 static void mce_init(X86CPU *cpu)
1860 CPUX86State *cenv = &cpu->env;
1863 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
1864 && (cenv->cpuid_features & (CPUID_MCE | CPUID_MCA)) ==
1865 (CPUID_MCE | CPUID_MCA)) {
1866 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1867 cenv->mcg_ctl = ~(uint64_t)0;
1868 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
1869 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
1874 void x86_cpu_realize(Object *obj, Error **errp)
1876 X86CPU *cpu = X86_CPU(obj);
1878 #ifndef CONFIG_USER_ONLY
1879 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
1883 qemu_init_vcpu(&cpu->env);
1884 cpu_reset(CPU(cpu));
1887 static void x86_cpu_initfn(Object *obj)
1889 X86CPU *cpu = X86_CPU(obj);
1890 CPUX86State *env = &cpu->env;
1895 object_property_add(obj, "family", "int",
1896 x86_cpuid_version_get_family,
1897 x86_cpuid_version_set_family, NULL, NULL, NULL);
1898 object_property_add(obj, "model", "int",
1899 x86_cpuid_version_get_model,
1900 x86_cpuid_version_set_model, NULL, NULL, NULL);
1901 object_property_add(obj, "stepping", "int",
1902 x86_cpuid_version_get_stepping,
1903 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
1904 object_property_add(obj, "level", "int",
1905 x86_cpuid_get_level,
1906 x86_cpuid_set_level, NULL, NULL, NULL);
1907 object_property_add(obj, "xlevel", "int",
1908 x86_cpuid_get_xlevel,
1909 x86_cpuid_set_xlevel, NULL, NULL, NULL);
1910 object_property_add_str(obj, "vendor",
1911 x86_cpuid_get_vendor,
1912 x86_cpuid_set_vendor, NULL);
1913 object_property_add_str(obj, "model-id",
1914 x86_cpuid_get_model_id,
1915 x86_cpuid_set_model_id, NULL);
1916 object_property_add(obj, "tsc-frequency", "int",
1917 x86_cpuid_get_tsc_freq,
1918 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
1920 env->cpuid_apic_id = env->cpu_index;
1922 /* init various static tables used in TCG mode */
1923 if (tcg_enabled() && !inited) {
1925 optimize_flags_init();
1926 #ifndef CONFIG_USER_ONLY
1927 cpu_set_debug_excp_handler(breakpoint_handler);
1932 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
1934 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1935 CPUClass *cc = CPU_CLASS(oc);
1937 xcc->parent_reset = cc->reset;
1938 cc->reset = x86_cpu_reset;
1941 static const TypeInfo x86_cpu_type_info = {
1942 .name = TYPE_X86_CPU,
1944 .instance_size = sizeof(X86CPU),
1945 .instance_init = x86_cpu_initfn,
1947 .class_size = sizeof(X86CPUClass),
1948 .class_init = x86_cpu_common_class_init,
1951 static void x86_cpu_register_types(void)
1953 type_register_static(&x86_cpu_type_info);
1956 type_init(x86_cpu_register_types)