2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "qemu-option.h"
28 #include "qemu-config.h"
30 #include "qapi/qapi-visit-core.h"
34 /* feature flags taken from "Intel Processor Identification and the CPUID
35 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
36 * between feature naming conventions, aliases may be added.
38 static const char *feature_name[] = {
39 "fpu", "vme", "de", "pse",
40 "tsc", "msr", "pae", "mce",
41 "cx8", "apic", NULL, "sep",
42 "mtrr", "pge", "mca", "cmov",
43 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
44 NULL, "ds" /* Intel dts */, "acpi", "mmx",
45 "fxsr", "sse", "sse2", "ss",
46 "ht" /* Intel htt */, "tm", "ia64", "pbe",
48 static const char *ext_feature_name[] = {
49 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
50 "ds_cpl", "vmx", "smx", "est",
51 "tm2", "ssse3", "cid", NULL,
52 "fma", "cx16", "xtpr", "pdcm",
53 NULL, NULL, "dca", "sse4.1|sse4_1",
54 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
55 "tsc-deadline", "aes", "xsave", "osxsave",
56 "avx", NULL, NULL, "hypervisor",
58 static const char *ext2_feature_name[] = {
59 "fpu", "vme", "de", "pse",
60 "tsc", "msr", "pae", "mce",
61 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall",
62 "mtrr", "pge", "mca", "cmov",
63 "pat", "pse36", NULL, NULL /* Linux mp */,
64 "nx|xd", NULL, "mmxext", "mmx",
65 "fxsr", "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
66 NULL, "lm|i64", "3dnowext", "3dnow",
68 static const char *ext3_feature_name[] = {
69 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
70 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
71 "3dnowprefetch", "osvw", "ibs", "xop",
72 "skinit", "wdt", NULL, NULL,
73 "fma4", NULL, "cvt16", "nodeid_msr",
74 NULL, NULL, NULL, NULL,
75 NULL, NULL, NULL, NULL,
76 NULL, NULL, NULL, NULL,
79 static const char *kvm_feature_name[] = {
80 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock", "kvm_asyncpf", NULL, NULL, NULL,
81 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
82 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
83 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
86 static const char *svm_feature_name[] = {
87 "npt", "lbrv", "svm_lock", "nrip_save",
88 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
89 NULL, NULL, "pause_filter", NULL,
90 "pfthreshold", NULL, NULL, NULL,
91 NULL, NULL, NULL, NULL,
92 NULL, NULL, NULL, NULL,
93 NULL, NULL, NULL, NULL,
94 NULL, NULL, NULL, NULL,
97 /* collects per-function cpuid data
99 typedef struct model_features_t {
100 uint32_t *guest_feat;
103 const char **flag_names;
108 int enforce_cpuid = 0;
110 void host_cpuid(uint32_t function, uint32_t count,
111 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
113 #if defined(CONFIG_KVM)
118 : "=a"(vec[0]), "=b"(vec[1]),
119 "=c"(vec[2]), "=d"(vec[3])
120 : "0"(function), "c"(count) : "cc");
122 asm volatile("pusha \n\t"
124 "mov %%eax, 0(%2) \n\t"
125 "mov %%ebx, 4(%2) \n\t"
126 "mov %%ecx, 8(%2) \n\t"
127 "mov %%edx, 12(%2) \n\t"
129 : : "a"(function), "c"(count), "S"(vec)
144 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
146 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
147 * a substring. ex if !NULL points to the first char after a substring,
148 * otherwise the string is assumed to sized by a terminating nul.
149 * Return lexical ordering of *s1:*s2.
151 static int sstrcmp(const char *s1, const char *e1, const char *s2,
155 if (!*s1 || !*s2 || *s1 != *s2)
158 if (s1 == e1 && s2 == e2)
167 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
168 * '|' delimited (possibly empty) strings in which case search for a match
169 * within the alternatives proceeds left to right. Return 0 for success,
170 * non-zero otherwise.
172 static int altcmp(const char *s, const char *e, const char *altstr)
176 for (q = p = altstr; ; ) {
177 while (*p && *p != '|')
179 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
188 /* search featureset for flag *[s..e), if found set corresponding bit in
189 * *pval and return true, otherwise return false
191 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
192 const char **featureset)
198 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
199 if (*ppc && !altcmp(s, e, *ppc)) {
207 static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
208 uint32_t *ext_features,
209 uint32_t *ext2_features,
210 uint32_t *ext3_features,
211 uint32_t *kvm_features,
212 uint32_t *svm_features)
214 if (!lookup_feature(features, flagname, NULL, feature_name) &&
215 !lookup_feature(ext_features, flagname, NULL, ext_feature_name) &&
216 !lookup_feature(ext2_features, flagname, NULL, ext2_feature_name) &&
217 !lookup_feature(ext3_features, flagname, NULL, ext3_feature_name) &&
218 !lookup_feature(kvm_features, flagname, NULL, kvm_feature_name) &&
219 !lookup_feature(svm_features, flagname, NULL, svm_feature_name))
220 fprintf(stderr, "CPU feature %s not found\n", flagname);
223 typedef struct x86_def_t {
224 struct x86_def_t *next;
227 uint32_t vendor1, vendor2, vendor3;
232 uint32_t features, ext_features, ext2_features, ext3_features;
233 uint32_t kvm_features, svm_features;
238 /* Store the results of Centaur's CPUID instructions */
239 uint32_t ext4_features;
241 /* The feature bits on CPUID[EAX=7,ECX=0].EBX */
242 uint32_t cpuid_7_0_ebx_features;
245 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
246 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
247 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
248 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
249 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
250 CPUID_PSE36 | CPUID_FXSR)
251 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
252 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
253 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
254 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
255 CPUID_PAE | CPUID_SEP | CPUID_APIC)
256 #define EXT2_FEATURE_MASK 0x0183F3FF
258 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
259 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
260 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
261 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
262 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
263 /* partly implemented:
264 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64)
265 CPUID_PSE36 (needed for Solaris) */
267 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
268 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | \
269 CPUID_EXT_CX16 | CPUID_EXT_POPCNT | \
270 CPUID_EXT_HYPERVISOR)
272 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
273 CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_XSAVE */
274 #define TCG_EXT2_FEATURES ((TCG_FEATURES & EXT2_FEATURE_MASK) | \
275 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
276 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT)
278 CPUID_EXT2_PDPE1GB */
279 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
280 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
281 #define TCG_SVM_FEATURES 0
283 /* maintains list of cpu model definitions
285 static x86_def_t *x86_defs = {NULL};
287 /* built-in cpu model definitions (deprecated)
289 static x86_def_t builtin_x86_defs[] = {
293 .vendor1 = CPUID_VENDOR_AMD_1,
294 .vendor2 = CPUID_VENDOR_AMD_2,
295 .vendor3 = CPUID_VENDOR_AMD_3,
299 .features = PPRO_FEATURES |
300 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
302 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
303 .ext2_features = (PPRO_FEATURES & EXT2_FEATURE_MASK) |
304 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
305 .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
306 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
307 .xlevel = 0x8000000A,
308 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
313 .vendor1 = CPUID_VENDOR_AMD_1,
314 .vendor2 = CPUID_VENDOR_AMD_2,
315 .vendor3 = CPUID_VENDOR_AMD_3,
319 .features = PPRO_FEATURES |
320 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
321 CPUID_PSE36 | CPUID_VME | CPUID_HT,
322 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
324 .ext2_features = (PPRO_FEATURES & EXT2_FEATURE_MASK) |
325 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
326 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
327 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
328 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
330 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
331 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
332 .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
333 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
334 .svm_features = CPUID_SVM_NPT | CPUID_SVM_LBRV,
335 .xlevel = 0x8000001A,
336 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
344 .features = PPRO_FEATURES |
345 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
346 CPUID_PSE36 | CPUID_VME | CPUID_DTS | CPUID_ACPI | CPUID_SS |
347 CPUID_HT | CPUID_TM | CPUID_PBE,
348 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
349 CPUID_EXT_DTES64 | CPUID_EXT_DSCPL | CPUID_EXT_VMX | CPUID_EXT_EST |
350 CPUID_EXT_TM2 | CPUID_EXT_CX16 | CPUID_EXT_XTPR | CPUID_EXT_PDCM,
351 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
352 .ext3_features = CPUID_EXT3_LAHF_LM,
353 .xlevel = 0x80000008,
354 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
359 .vendor1 = CPUID_VENDOR_INTEL_1,
360 .vendor2 = CPUID_VENDOR_INTEL_2,
361 .vendor3 = CPUID_VENDOR_INTEL_3,
365 /* Missing: CPUID_VME, CPUID_HT */
366 .features = PPRO_FEATURES |
367 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
369 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
370 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16,
371 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
372 .ext2_features = (PPRO_FEATURES & EXT2_FEATURE_MASK) |
373 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
374 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
375 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
376 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
377 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
379 .xlevel = 0x80000008,
380 .model_id = "Common KVM processor"
388 .features = PPRO_FEATURES,
389 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
390 .xlevel = 0x80000004,
391 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
399 .features = PPRO_FEATURES |
400 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
401 .ext_features = CPUID_EXT_SSE3,
402 .ext2_features = PPRO_FEATURES & EXT2_FEATURE_MASK,
404 .xlevel = 0x80000008,
405 .model_id = "Common 32-bit KVM processor"
413 .features = PPRO_FEATURES | CPUID_VME |
414 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_DTS | CPUID_ACPI |
415 CPUID_SS | CPUID_HT | CPUID_TM | CPUID_PBE,
416 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_VMX |
417 CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR | CPUID_EXT_PDCM,
418 .ext2_features = CPUID_EXT2_NX,
419 .xlevel = 0x80000008,
420 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
428 .features = I486_FEATURES,
437 .features = PENTIUM_FEATURES,
446 .features = PENTIUM2_FEATURES,
455 .features = PENTIUM3_FEATURES,
461 .vendor1 = CPUID_VENDOR_AMD_1,
462 .vendor2 = CPUID_VENDOR_AMD_2,
463 .vendor3 = CPUID_VENDOR_AMD_3,
467 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
468 .ext2_features = (PPRO_FEATURES & EXT2_FEATURE_MASK) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
469 .xlevel = 0x80000008,
470 /* XXX: put another string ? */
471 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
475 /* original is on level 10 */
480 .features = PPRO_FEATURES |
481 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | CPUID_DTS |
482 CPUID_ACPI | CPUID_SS | CPUID_HT | CPUID_TM | CPUID_PBE,
483 /* Some CPUs got no CPUID_SEP */
484 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
485 CPUID_EXT_DSCPL | CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR,
486 .ext2_features = (PPRO_FEATURES & EXT2_FEATURE_MASK) | CPUID_EXT2_NX,
487 .ext3_features = CPUID_EXT3_LAHF_LM,
488 .xlevel = 0x8000000A,
489 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
493 static int cpu_x86_fill_model_id(char *str)
495 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
498 for (i = 0; i < 3; i++) {
499 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
500 memcpy(str + i * 16 + 0, &eax, 4);
501 memcpy(str + i * 16 + 4, &ebx, 4);
502 memcpy(str + i * 16 + 8, &ecx, 4);
503 memcpy(str + i * 16 + 12, &edx, 4);
508 static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
510 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
512 x86_cpu_def->name = "host";
513 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
514 x86_cpu_def->level = eax;
515 x86_cpu_def->vendor1 = ebx;
516 x86_cpu_def->vendor2 = edx;
517 x86_cpu_def->vendor3 = ecx;
519 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
520 x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
521 x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
522 x86_cpu_def->stepping = eax & 0x0F;
523 x86_cpu_def->ext_features = ecx;
524 x86_cpu_def->features = edx;
526 if (kvm_enabled() && x86_cpu_def->level >= 7) {
527 x86_cpu_def->cpuid_7_0_ebx_features = kvm_arch_get_supported_cpuid(kvm_state, 0x7, 0, R_EBX);
529 x86_cpu_def->cpuid_7_0_ebx_features = 0;
532 host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);
533 x86_cpu_def->xlevel = eax;
535 host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);
536 x86_cpu_def->ext2_features = edx;
537 x86_cpu_def->ext3_features = ecx;
538 cpu_x86_fill_model_id(x86_cpu_def->model_id);
539 x86_cpu_def->vendor_override = 0;
541 /* Call Centaur's CPUID instruction. */
542 if (x86_cpu_def->vendor1 == CPUID_VENDOR_VIA_1 &&
543 x86_cpu_def->vendor2 == CPUID_VENDOR_VIA_2 &&
544 x86_cpu_def->vendor3 == CPUID_VENDOR_VIA_3) {
545 host_cpuid(0xC0000000, 0, &eax, &ebx, &ecx, &edx);
546 if (eax >= 0xC0000001) {
547 /* Support VIA max extended level */
548 x86_cpu_def->xlevel2 = eax;
549 host_cpuid(0xC0000001, 0, &eax, &ebx, &ecx, &edx);
550 x86_cpu_def->ext4_features = edx;
555 * Every SVM feature requires emulation support in KVM - so we can't just
556 * read the host features here. KVM might even support SVM features not
557 * available on the host hardware. Just set all bits and mask out the
558 * unsupported ones later.
560 x86_cpu_def->svm_features = -1;
565 static int unavailable_host_feature(struct model_features_t *f, uint32_t mask)
569 for (i = 0; i < 32; ++i)
571 fprintf(stderr, "warning: host cpuid %04x_%04x lacks requested"
572 " flag '%s' [0x%08x]\n",
573 f->cpuid >> 16, f->cpuid & 0xffff,
574 f->flag_names[i] ? f->flag_names[i] : "[reserved]", mask);
580 /* best effort attempt to inform user requested cpu flags aren't making
581 * their way to the guest. Note: ft[].check_feat ideally should be
582 * specified via a guest_def field to suppress report of extraneous flags.
584 static int check_features_against_host(x86_def_t *guest_def)
589 struct model_features_t ft[] = {
590 {&guest_def->features, &host_def.features,
591 ~0, feature_name, 0x00000000},
592 {&guest_def->ext_features, &host_def.ext_features,
593 ~CPUID_EXT_HYPERVISOR, ext_feature_name, 0x00000001},
594 {&guest_def->ext2_features, &host_def.ext2_features,
595 ~PPRO_FEATURES, ext2_feature_name, 0x80000000},
596 {&guest_def->ext3_features, &host_def.ext3_features,
597 ~CPUID_EXT3_SVM, ext3_feature_name, 0x80000001}};
599 cpu_x86_fill_host(&host_def);
600 for (rv = 0, i = 0; i < ARRAY_SIZE(ft); ++i)
601 for (mask = 1; mask; mask <<= 1)
602 if (ft[i].check_feat & mask && *ft[i].guest_feat & mask &&
603 !(*ft[i].host_feat & mask)) {
604 unavailable_host_feature(&ft[i], mask);
610 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
611 const char *name, Error **errp)
613 X86CPU *cpu = X86_CPU(obj);
614 CPUX86State *env = &cpu->env;
617 value = (env->cpuid_version >> 8) & 0xf;
619 value += (env->cpuid_version >> 20) & 0xff;
621 visit_type_int(v, &value, name, errp);
624 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
625 const char *name, Error **errp)
627 X86CPU *cpu = X86_CPU(obj);
628 CPUX86State *env = &cpu->env;
629 const int64_t min = 0;
630 const int64_t max = 0xff + 0xf;
633 visit_type_int(v, &value, name, errp);
634 if (error_is_set(errp)) {
637 if (value < min || value > max) {
638 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
639 name ? name : "null", value, min, max);
643 env->cpuid_version &= ~0xff00f00;
645 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
647 env->cpuid_version |= value << 8;
651 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
652 const char *name, Error **errp)
654 X86CPU *cpu = X86_CPU(obj);
655 CPUX86State *env = &cpu->env;
658 value = (env->cpuid_version >> 4) & 0xf;
659 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
660 visit_type_int(v, &value, name, errp);
663 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
664 const char *name, Error **errp)
666 X86CPU *cpu = X86_CPU(obj);
667 CPUX86State *env = &cpu->env;
668 const int64_t min = 0;
669 const int64_t max = 0xff;
672 visit_type_int(v, &value, name, errp);
673 if (error_is_set(errp)) {
676 if (value < min || value > max) {
677 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
678 name ? name : "null", value, min, max);
682 env->cpuid_version &= ~0xf00f0;
683 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
686 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
687 void *opaque, const char *name,
690 X86CPU *cpu = X86_CPU(obj);
691 CPUX86State *env = &cpu->env;
694 value = env->cpuid_version & 0xf;
695 visit_type_int(v, &value, name, errp);
698 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
699 void *opaque, const char *name,
702 X86CPU *cpu = X86_CPU(obj);
703 CPUX86State *env = &cpu->env;
704 const int64_t min = 0;
705 const int64_t max = 0xf;
708 visit_type_int(v, &value, name, errp);
709 if (error_is_set(errp)) {
712 if (value < min || value > max) {
713 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
714 name ? name : "null", value, min, max);
718 env->cpuid_version &= ~0xf;
719 env->cpuid_version |= value & 0xf;
722 static void x86_cpuid_get_level(Object *obj, Visitor *v, void *opaque,
723 const char *name, Error **errp)
725 X86CPU *cpu = X86_CPU(obj);
728 value = cpu->env.cpuid_level;
729 /* TODO Use visit_type_uint32() once available */
730 visit_type_int(v, &value, name, errp);
733 static void x86_cpuid_set_level(Object *obj, Visitor *v, void *opaque,
734 const char *name, Error **errp)
736 X86CPU *cpu = X86_CPU(obj);
737 const int64_t min = 0;
738 const int64_t max = UINT32_MAX;
741 /* TODO Use visit_type_uint32() once available */
742 visit_type_int(v, &value, name, errp);
743 if (error_is_set(errp)) {
746 if (value < min || value > max) {
747 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
748 name ? name : "null", value, min, max);
752 cpu->env.cpuid_level = value;
755 static void x86_cpuid_get_xlevel(Object *obj, Visitor *v, void *opaque,
756 const char *name, Error **errp)
758 X86CPU *cpu = X86_CPU(obj);
761 value = cpu->env.cpuid_xlevel;
762 /* TODO Use visit_type_uint32() once available */
763 visit_type_int(v, &value, name, errp);
766 static void x86_cpuid_set_xlevel(Object *obj, Visitor *v, void *opaque,
767 const char *name, Error **errp)
769 X86CPU *cpu = X86_CPU(obj);
770 const int64_t min = 0;
771 const int64_t max = UINT32_MAX;
774 /* TODO Use visit_type_uint32() once available */
775 visit_type_int(v, &value, name, errp);
776 if (error_is_set(errp)) {
779 if (value < min || value > max) {
780 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
781 name ? name : "null", value, min, max);
785 cpu->env.cpuid_xlevel = value;
788 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
790 X86CPU *cpu = X86_CPU(obj);
791 CPUX86State *env = &cpu->env;
795 value = (char *)g_malloc(12 + 1);
796 for (i = 0; i < 4; i++) {
797 value[i ] = env->cpuid_vendor1 >> (8 * i);
798 value[i + 4] = env->cpuid_vendor2 >> (8 * i);
799 value[i + 8] = env->cpuid_vendor3 >> (8 * i);
805 static void x86_cpuid_set_vendor(Object *obj, const char *value,
808 X86CPU *cpu = X86_CPU(obj);
809 CPUX86State *env = &cpu->env;
812 if (strlen(value) != 12) {
813 error_set(errp, QERR_PROPERTY_VALUE_BAD, "",
818 env->cpuid_vendor1 = 0;
819 env->cpuid_vendor2 = 0;
820 env->cpuid_vendor3 = 0;
821 for (i = 0; i < 4; i++) {
822 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
823 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
824 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
826 env->cpuid_vendor_override = 1;
829 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
831 X86CPU *cpu = X86_CPU(obj);
832 CPUX86State *env = &cpu->env;
836 value = g_malloc(48 + 1);
837 for (i = 0; i < 48; i++) {
838 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
844 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
847 X86CPU *cpu = X86_CPU(obj);
848 CPUX86State *env = &cpu->env;
851 if (model_id == NULL) {
854 len = strlen(model_id);
855 memset(env->cpuid_model, 0, 48);
856 for (i = 0; i < 48; i++) {
860 c = (uint8_t)model_id[i];
862 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
866 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
867 const char *name, Error **errp)
869 X86CPU *cpu = X86_CPU(obj);
872 value = cpu->env.tsc_khz * 1000;
873 visit_type_int(v, &value, name, errp);
876 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
877 const char *name, Error **errp)
879 X86CPU *cpu = X86_CPU(obj);
880 const int64_t min = 0;
881 const int64_t max = INT_MAX;
884 visit_type_int(v, &value, name, errp);
885 if (error_is_set(errp)) {
888 if (value < min || value > max) {
889 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
890 name ? name : "null", value, min, max);
894 cpu->env.tsc_khz = value / 1000;
897 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
902 char *s = g_strdup(cpu_model);
903 char *featurestr, *name = strtok(s, ",");
904 /* Features to be added*/
905 uint32_t plus_features = 0, plus_ext_features = 0;
906 uint32_t plus_ext2_features = 0, plus_ext3_features = 0;
907 uint32_t plus_kvm_features = 0, plus_svm_features = 0;
908 /* Features to be removed */
909 uint32_t minus_features = 0, minus_ext_features = 0;
910 uint32_t minus_ext2_features = 0, minus_ext3_features = 0;
911 uint32_t minus_kvm_features = 0, minus_svm_features = 0;
914 for (def = x86_defs; def; def = def->next)
915 if (name && !strcmp(name, def->name))
917 if (kvm_enabled() && name && strcmp(name, "host") == 0) {
918 cpu_x86_fill_host(x86_cpu_def);
922 memcpy(x86_cpu_def, def, sizeof(*def));
925 plus_kvm_features = ~0; /* not supported bits will be filtered out later */
927 add_flagname_to_bitmaps("hypervisor", &plus_features,
928 &plus_ext_features, &plus_ext2_features, &plus_ext3_features,
929 &plus_kvm_features, &plus_svm_features);
931 featurestr = strtok(NULL, ",");
935 if (featurestr[0] == '+') {
936 add_flagname_to_bitmaps(featurestr + 1, &plus_features,
937 &plus_ext_features, &plus_ext2_features,
938 &plus_ext3_features, &plus_kvm_features,
940 } else if (featurestr[0] == '-') {
941 add_flagname_to_bitmaps(featurestr + 1, &minus_features,
942 &minus_ext_features, &minus_ext2_features,
943 &minus_ext3_features, &minus_kvm_features,
944 &minus_svm_features);
945 } else if ((val = strchr(featurestr, '='))) {
947 if (!strcmp(featurestr, "family")) {
949 numvalue = strtoul(val, &err, 0);
950 if (!*val || *err || numvalue > 0xff + 0xf) {
951 fprintf(stderr, "bad numerical value %s\n", val);
954 x86_cpu_def->family = numvalue;
955 } else if (!strcmp(featurestr, "model")) {
957 numvalue = strtoul(val, &err, 0);
958 if (!*val || *err || numvalue > 0xff) {
959 fprintf(stderr, "bad numerical value %s\n", val);
962 x86_cpu_def->model = numvalue;
963 } else if (!strcmp(featurestr, "stepping")) {
965 numvalue = strtoul(val, &err, 0);
966 if (!*val || *err || numvalue > 0xf) {
967 fprintf(stderr, "bad numerical value %s\n", val);
970 x86_cpu_def->stepping = numvalue ;
971 } else if (!strcmp(featurestr, "level")) {
973 numvalue = strtoul(val, &err, 0);
975 fprintf(stderr, "bad numerical value %s\n", val);
978 x86_cpu_def->level = numvalue;
979 } else if (!strcmp(featurestr, "xlevel")) {
981 numvalue = strtoul(val, &err, 0);
983 fprintf(stderr, "bad numerical value %s\n", val);
986 if (numvalue < 0x80000000) {
987 numvalue += 0x80000000;
989 x86_cpu_def->xlevel = numvalue;
990 } else if (!strcmp(featurestr, "vendor")) {
991 if (strlen(val) != 12) {
992 fprintf(stderr, "vendor string must be 12 chars long\n");
995 x86_cpu_def->vendor1 = 0;
996 x86_cpu_def->vendor2 = 0;
997 x86_cpu_def->vendor3 = 0;
998 for(i = 0; i < 4; i++) {
999 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
1000 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
1001 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
1003 x86_cpu_def->vendor_override = 1;
1004 } else if (!strcmp(featurestr, "model_id")) {
1005 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
1007 } else if (!strcmp(featurestr, "tsc_freq")) {
1011 tsc_freq = strtosz_suffix_unit(val, &err,
1012 STRTOSZ_DEFSUFFIX_B, 1000);
1013 if (tsc_freq < 0 || *err) {
1014 fprintf(stderr, "bad numerical value %s\n", val);
1017 x86_cpu_def->tsc_khz = tsc_freq / 1000;
1018 } else if (!strcmp(featurestr, "hv_spinlocks")) {
1020 numvalue = strtoul(val, &err, 0);
1021 if (!*val || *err) {
1022 fprintf(stderr, "bad numerical value %s\n", val);
1025 hyperv_set_spinlock_retries(numvalue);
1027 fprintf(stderr, "unrecognized feature %s\n", featurestr);
1030 } else if (!strcmp(featurestr, "check")) {
1032 } else if (!strcmp(featurestr, "enforce")) {
1033 check_cpuid = enforce_cpuid = 1;
1034 } else if (!strcmp(featurestr, "hv_relaxed")) {
1035 hyperv_enable_relaxed_timing(true);
1036 } else if (!strcmp(featurestr, "hv_vapic")) {
1037 hyperv_enable_vapic_recommended(true);
1039 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
1042 featurestr = strtok(NULL, ",");
1044 x86_cpu_def->features |= plus_features;
1045 x86_cpu_def->ext_features |= plus_ext_features;
1046 x86_cpu_def->ext2_features |= plus_ext2_features;
1047 x86_cpu_def->ext3_features |= plus_ext3_features;
1048 x86_cpu_def->kvm_features |= plus_kvm_features;
1049 x86_cpu_def->svm_features |= plus_svm_features;
1050 x86_cpu_def->features &= ~minus_features;
1051 x86_cpu_def->ext_features &= ~minus_ext_features;
1052 x86_cpu_def->ext2_features &= ~minus_ext2_features;
1053 x86_cpu_def->ext3_features &= ~minus_ext3_features;
1054 x86_cpu_def->kvm_features &= ~minus_kvm_features;
1055 x86_cpu_def->svm_features &= ~minus_svm_features;
1057 if (check_features_against_host(x86_cpu_def) && enforce_cpuid)
1068 /* generate a composite string into buf of all cpuid names in featureset
1069 * selected by fbits. indicate truncation at bufsize in the event of overflow.
1070 * if flags, suppress names undefined in featureset.
1072 static void listflags(char *buf, int bufsize, uint32_t fbits,
1073 const char **featureset, uint32_t flags)
1075 const char **p = &featureset[31];
1079 b = 4 <= bufsize ? buf + (bufsize -= 3) - 1 : NULL;
1081 for (q = buf, bit = 31; fbits && bufsize; --p, fbits &= ~(1 << bit), --bit)
1082 if (fbits & 1 << bit && (*p || !flags)) {
1084 nc = snprintf(q, bufsize, "%s%s", q == buf ? "" : " ", *p);
1086 nc = snprintf(q, bufsize, "%s[%d]", q == buf ? "" : " ", bit);
1087 if (bufsize <= nc) {
1089 memcpy(b, "...", sizeof("..."));
1098 /* generate CPU information:
1099 * -? list model names
1100 * -?model list model names/IDs
1101 * -?dump output all model (x86_def_t) data
1102 * -?cpuid list all recognized cpuid flag names
1104 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
1106 unsigned char model = !strcmp("?model", optarg);
1107 unsigned char dump = !strcmp("?dump", optarg);
1108 unsigned char cpuid = !strcmp("?cpuid", optarg);
1113 (*cpu_fprintf)(f, "Recognized CPUID flags:\n");
1114 listflags(buf, sizeof (buf), (uint32_t)~0, feature_name, 1);
1115 (*cpu_fprintf)(f, " f_edx: %s\n", buf);
1116 listflags(buf, sizeof (buf), (uint32_t)~0, ext_feature_name, 1);
1117 (*cpu_fprintf)(f, " f_ecx: %s\n", buf);
1118 listflags(buf, sizeof (buf), (uint32_t)~0, ext2_feature_name, 1);
1119 (*cpu_fprintf)(f, " extf_edx: %s\n", buf);
1120 listflags(buf, sizeof (buf), (uint32_t)~0, ext3_feature_name, 1);
1121 (*cpu_fprintf)(f, " extf_ecx: %s\n", buf);
1124 for (def = x86_defs; def; def = def->next) {
1125 snprintf(buf, sizeof (buf), def->flags ? "[%s]": "%s", def->name);
1126 if (model || dump) {
1127 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1129 (*cpu_fprintf)(f, "x86 %16s\n", buf);
1132 memcpy(buf, &def->vendor1, sizeof (def->vendor1));
1133 memcpy(buf + 4, &def->vendor2, sizeof (def->vendor2));
1134 memcpy(buf + 8, &def->vendor3, sizeof (def->vendor3));
1137 " family %d model %d stepping %d level %d xlevel 0x%x"
1139 def->family, def->model, def->stepping, def->level,
1141 listflags(buf, sizeof (buf), def->features, feature_name, 0);
1142 (*cpu_fprintf)(f, " feature_edx %08x (%s)\n", def->features,
1144 listflags(buf, sizeof (buf), def->ext_features, ext_feature_name,
1146 (*cpu_fprintf)(f, " feature_ecx %08x (%s)\n", def->ext_features,
1148 listflags(buf, sizeof (buf), def->ext2_features, ext2_feature_name,
1150 (*cpu_fprintf)(f, " extfeature_edx %08x (%s)\n",
1151 def->ext2_features, buf);
1152 listflags(buf, sizeof (buf), def->ext3_features, ext3_feature_name,
1154 (*cpu_fprintf)(f, " extfeature_ecx %08x (%s)\n",
1155 def->ext3_features, buf);
1156 (*cpu_fprintf)(f, "\n");
1159 if (kvm_enabled()) {
1160 (*cpu_fprintf)(f, "x86 %16s\n", "[host]");
1164 int cpu_x86_register(X86CPU *cpu, const char *cpu_model)
1166 CPUX86State *env = &cpu->env;
1167 x86_def_t def1, *def = &def1;
1168 Error *error = NULL;
1170 memset(def, 0, sizeof(*def));
1172 if (cpu_x86_find_by_name(def, cpu_model) < 0)
1175 env->cpuid_vendor1 = def->vendor1;
1176 env->cpuid_vendor2 = def->vendor2;
1177 env->cpuid_vendor3 = def->vendor3;
1179 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
1180 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
1181 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
1183 env->cpuid_vendor_override = def->vendor_override;
1184 object_property_set_int(OBJECT(cpu), def->level, "level", &error);
1185 object_property_set_int(OBJECT(cpu), def->family, "family", &error);
1186 object_property_set_int(OBJECT(cpu), def->model, "model", &error);
1187 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", &error);
1188 env->cpuid_features = def->features;
1189 env->cpuid_ext_features = def->ext_features;
1190 env->cpuid_ext2_features = def->ext2_features;
1191 env->cpuid_ext3_features = def->ext3_features;
1192 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", &error);
1193 env->cpuid_kvm_features = def->kvm_features;
1194 env->cpuid_svm_features = def->svm_features;
1195 env->cpuid_ext4_features = def->ext4_features;
1196 env->cpuid_7_0_ebx = def->cpuid_7_0_ebx_features;
1197 env->cpuid_xlevel2 = def->xlevel2;
1198 object_property_set_int(OBJECT(cpu), (int64_t)def->tsc_khz * 1000,
1199 "tsc-frequency", &error);
1200 if (!kvm_enabled()) {
1201 env->cpuid_features &= TCG_FEATURES;
1202 env->cpuid_ext_features &= TCG_EXT_FEATURES;
1203 env->cpuid_ext2_features &= (TCG_EXT2_FEATURES
1204 #ifdef TARGET_X86_64
1205 | CPUID_EXT2_SYSCALL | CPUID_EXT2_LM
1208 env->cpuid_ext3_features &= TCG_EXT3_FEATURES;
1209 env->cpuid_svm_features &= TCG_SVM_FEATURES;
1211 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", &error);
1212 if (error_is_set(&error)) {
1219 #if !defined(CONFIG_USER_ONLY)
1220 /* copy vendor id string to 32 bit register, nul pad as needed
1222 static void cpyid(const char *s, uint32_t *id)
1224 char *d = (char *)id;
1227 for (i = sizeof (*id); i--; )
1228 *d++ = *s ? *s++ : '\0';
1231 /* interpret radix and convert from string to arbitrary scalar,
1232 * otherwise flag failure
1234 #define setscalar(pval, str, perr) \
1239 ul = strtoul(str, &pend, 0); \
1240 *str && !*pend ? (*pval = ul) : (*perr = 1); \
1243 /* map cpuid options to feature bits, otherwise return failure
1244 * (option tags in *str are delimited by whitespace)
1246 static void setfeatures(uint32_t *pval, const char *str,
1247 const char **featureset, int *perr)
1251 for (q = p = str; *p || *q; q = p) {
1254 while (*p && !iswhite(*p))
1258 if (!lookup_feature(pval, q, p, featureset)) {
1259 fprintf(stderr, "error: feature \"%.*s\" not available in set\n",
1267 /* map config file options to x86_def_t form
1269 static int cpudef_setfield(const char *name, const char *str, void *opaque)
1271 x86_def_t *def = opaque;
1274 if (!strcmp(name, "name")) {
1275 g_free((void *)def->name);
1276 def->name = g_strdup(str);
1277 } else if (!strcmp(name, "model_id")) {
1278 strncpy(def->model_id, str, sizeof (def->model_id));
1279 } else if (!strcmp(name, "level")) {
1280 setscalar(&def->level, str, &err)
1281 } else if (!strcmp(name, "vendor")) {
1282 cpyid(&str[0], &def->vendor1);
1283 cpyid(&str[4], &def->vendor2);
1284 cpyid(&str[8], &def->vendor3);
1285 } else if (!strcmp(name, "family")) {
1286 setscalar(&def->family, str, &err)
1287 } else if (!strcmp(name, "model")) {
1288 setscalar(&def->model, str, &err)
1289 } else if (!strcmp(name, "stepping")) {
1290 setscalar(&def->stepping, str, &err)
1291 } else if (!strcmp(name, "feature_edx")) {
1292 setfeatures(&def->features, str, feature_name, &err);
1293 } else if (!strcmp(name, "feature_ecx")) {
1294 setfeatures(&def->ext_features, str, ext_feature_name, &err);
1295 } else if (!strcmp(name, "extfeature_edx")) {
1296 setfeatures(&def->ext2_features, str, ext2_feature_name, &err);
1297 } else if (!strcmp(name, "extfeature_ecx")) {
1298 setfeatures(&def->ext3_features, str, ext3_feature_name, &err);
1299 } else if (!strcmp(name, "xlevel")) {
1300 setscalar(&def->xlevel, str, &err)
1302 fprintf(stderr, "error: unknown option [%s = %s]\n", name, str);
1306 fprintf(stderr, "error: bad option value [%s = %s]\n", name, str);
1312 /* register config file entry as x86_def_t
1314 static int cpudef_register(QemuOpts *opts, void *opaque)
1316 x86_def_t *def = g_malloc0(sizeof (x86_def_t));
1318 qemu_opt_foreach(opts, cpudef_setfield, def, 1);
1319 def->next = x86_defs;
1324 void cpu_clear_apic_feature(CPUX86State *env)
1326 env->cpuid_features &= ~CPUID_APIC;
1329 #endif /* !CONFIG_USER_ONLY */
1331 /* register "cpudef" models defined in configuration file. Here we first
1332 * preload any built-in definitions
1334 void x86_cpudef_setup(void)
1338 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
1339 builtin_x86_defs[i].next = x86_defs;
1340 builtin_x86_defs[i].flags = 1;
1341 x86_defs = &builtin_x86_defs[i];
1343 #if !defined(CONFIG_USER_ONLY)
1344 qemu_opts_foreach(qemu_find_opts("cpudef"), cpudef_register, NULL, 0);
1348 static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx,
1349 uint32_t *ecx, uint32_t *edx)
1351 *ebx = env->cpuid_vendor1;
1352 *edx = env->cpuid_vendor2;
1353 *ecx = env->cpuid_vendor3;
1355 /* sysenter isn't supported on compatibility mode on AMD, syscall
1356 * isn't supported in compatibility mode on Intel.
1357 * Normally we advertise the actual cpu vendor, but you can override
1358 * this if you want to use KVM's sysenter/syscall emulation
1359 * in compatibility mode and when doing cross vendor migration
1361 if (kvm_enabled() && ! env->cpuid_vendor_override) {
1362 host_cpuid(0, 0, NULL, ebx, ecx, edx);
1366 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1367 uint32_t *eax, uint32_t *ebx,
1368 uint32_t *ecx, uint32_t *edx)
1370 /* test if maximum index reached */
1371 if (index & 0x80000000) {
1372 if (index > env->cpuid_xlevel) {
1373 if (env->cpuid_xlevel2 > 0) {
1374 /* Handle the Centaur's CPUID instruction. */
1375 if (index > env->cpuid_xlevel2) {
1376 index = env->cpuid_xlevel2;
1377 } else if (index < 0xC0000000) {
1378 index = env->cpuid_xlevel;
1381 index = env->cpuid_xlevel;
1385 if (index > env->cpuid_level)
1386 index = env->cpuid_level;
1391 *eax = env->cpuid_level;
1392 get_cpuid_vendor(env, ebx, ecx, edx);
1395 *eax = env->cpuid_version;
1396 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1397 *ecx = env->cpuid_ext_features;
1398 *edx = env->cpuid_features;
1399 if (env->nr_cores * env->nr_threads > 1) {
1400 *ebx |= (env->nr_cores * env->nr_threads) << 16;
1401 *edx |= 1 << 28; /* HTT bit */
1405 /* cache info: needed for Pentium Pro compatibility */
1412 /* cache info: needed for Core compatibility */
1413 if (env->nr_cores > 1) {
1414 *eax = (env->nr_cores - 1) << 26;
1419 case 0: /* L1 dcache info */
1425 case 1: /* L1 icache info */
1431 case 2: /* L2 cache info */
1433 if (env->nr_threads > 1) {
1434 *eax |= (env->nr_threads - 1) << 14;
1440 default: /* end of info */
1449 /* mwait info: needed for Core compatibility */
1450 *eax = 0; /* Smallest monitor-line size in bytes */
1451 *ebx = 0; /* Largest monitor-line size in bytes */
1452 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1456 /* Thermal and Power Leaf */
1463 /* Structured Extended Feature Flags Enumeration Leaf */
1465 *eax = 0; /* Maximum ECX value for sub-leaves */
1466 *ebx = env->cpuid_7_0_ebx; /* Feature flags */
1467 *ecx = 0; /* Reserved */
1468 *edx = 0; /* Reserved */
1477 /* Direct Cache Access Information Leaf */
1478 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1484 /* Architectural Performance Monitoring Leaf */
1485 if (kvm_enabled()) {
1486 KVMState *s = env->kvm_state;
1488 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
1489 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
1490 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
1491 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
1500 /* Processor Extended State */
1501 if (!(env->cpuid_ext_features & CPUID_EXT_XSAVE)) {
1508 if (kvm_enabled()) {
1509 KVMState *s = env->kvm_state;
1511 *eax = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EAX);
1512 *ebx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EBX);
1513 *ecx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_ECX);
1514 *edx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EDX);
1523 *eax = env->cpuid_xlevel;
1524 *ebx = env->cpuid_vendor1;
1525 *edx = env->cpuid_vendor2;
1526 *ecx = env->cpuid_vendor3;
1529 *eax = env->cpuid_version;
1531 *ecx = env->cpuid_ext3_features;
1532 *edx = env->cpuid_ext2_features;
1534 /* The Linux kernel checks for the CMPLegacy bit and
1535 * discards multiple thread information if it is set.
1536 * So dont set it here for Intel to make Linux guests happy.
1538 if (env->nr_cores * env->nr_threads > 1) {
1539 uint32_t tebx, tecx, tedx;
1540 get_cpuid_vendor(env, &tebx, &tecx, &tedx);
1541 if (tebx != CPUID_VENDOR_INTEL_1 ||
1542 tedx != CPUID_VENDOR_INTEL_2 ||
1543 tecx != CPUID_VENDOR_INTEL_3) {
1544 *ecx |= 1 << 1; /* CmpLegacy bit */
1551 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1552 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1553 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1554 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1557 /* cache info (L1 cache) */
1564 /* cache info (L2 cache) */
1571 /* virtual & phys address size in low 2 bytes. */
1572 /* XXX: This value must match the one used in the MMU code. */
1573 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1574 /* 64 bit processor */
1575 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1576 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1578 if (env->cpuid_features & CPUID_PSE36)
1579 *eax = 0x00000024; /* 36 bits physical */
1581 *eax = 0x00000020; /* 32 bits physical */
1586 if (env->nr_cores * env->nr_threads > 1) {
1587 *ecx |= (env->nr_cores * env->nr_threads) - 1;
1591 if (env->cpuid_ext3_features & CPUID_EXT3_SVM) {
1592 *eax = 0x00000001; /* SVM Revision */
1593 *ebx = 0x00000010; /* nr of ASIDs */
1595 *edx = env->cpuid_svm_features; /* optional features */
1604 *eax = env->cpuid_xlevel2;
1610 /* Support for VIA CPU's CPUID instruction */
1611 *eax = env->cpuid_version;
1614 *edx = env->cpuid_ext4_features;
1619 /* Reserved for the future, and now filled with zero */
1626 /* reserved values: zero */
1635 /* CPUClass::reset() */
1636 static void x86_cpu_reset(CPUState *s)
1638 X86CPU *cpu = X86_CPU(s);
1639 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
1640 CPUX86State *env = &cpu->env;
1643 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
1644 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
1645 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1648 xcc->parent_reset(s);
1651 memset(env, 0, offsetof(CPUX86State, breakpoints));
1655 env->old_exception = -1;
1657 /* init to reset state */
1659 #ifdef CONFIG_SOFTMMU
1660 env->hflags |= HF_SOFTMMU_MASK;
1662 env->hflags2 |= HF2_GIF_MASK;
1664 cpu_x86_update_cr0(env, 0x60000010);
1665 env->a20_mask = ~0x0;
1666 env->smbase = 0x30000;
1668 env->idt.limit = 0xffff;
1669 env->gdt.limit = 0xffff;
1670 env->ldt.limit = 0xffff;
1671 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
1672 env->tr.limit = 0xffff;
1673 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
1675 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
1676 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
1677 DESC_R_MASK | DESC_A_MASK);
1678 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
1679 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
1681 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
1682 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
1684 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
1685 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
1687 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
1688 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
1690 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
1691 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
1695 env->regs[R_EDX] = env->cpuid_version;
1700 for (i = 0; i < 8; i++) {
1705 env->mxcsr = 0x1f80;
1707 env->pat = 0x0007040600070406ULL;
1708 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
1710 memset(env->dr, 0, sizeof(env->dr));
1711 env->dr[6] = DR6_FIXED_1;
1712 env->dr[7] = DR7_FIXED_1;
1713 cpu_breakpoint_remove_all(env, BP_CPU);
1714 cpu_watchpoint_remove_all(env, BP_CPU);
1717 static void mce_init(X86CPU *cpu)
1719 CPUX86State *cenv = &cpu->env;
1722 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
1723 && (cenv->cpuid_features & (CPUID_MCE | CPUID_MCA)) ==
1724 (CPUID_MCE | CPUID_MCA)) {
1725 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1726 cenv->mcg_ctl = ~(uint64_t)0;
1727 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
1728 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
1733 void x86_cpu_realize(Object *obj, Error **errp)
1735 X86CPU *cpu = X86_CPU(obj);
1738 qemu_init_vcpu(&cpu->env);
1741 static void x86_cpu_initfn(Object *obj)
1743 X86CPU *cpu = X86_CPU(obj);
1744 CPUX86State *env = &cpu->env;
1748 object_property_add(obj, "family", "int",
1749 x86_cpuid_version_get_family,
1750 x86_cpuid_version_set_family, NULL, NULL, NULL);
1751 object_property_add(obj, "model", "int",
1752 x86_cpuid_version_get_model,
1753 x86_cpuid_version_set_model, NULL, NULL, NULL);
1754 object_property_add(obj, "stepping", "int",
1755 x86_cpuid_version_get_stepping,
1756 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
1757 object_property_add(obj, "level", "int",
1758 x86_cpuid_get_level,
1759 x86_cpuid_set_level, NULL, NULL, NULL);
1760 object_property_add(obj, "xlevel", "int",
1761 x86_cpuid_get_xlevel,
1762 x86_cpuid_set_xlevel, NULL, NULL, NULL);
1763 object_property_add_str(obj, "vendor",
1764 x86_cpuid_get_vendor,
1765 x86_cpuid_set_vendor, NULL);
1766 object_property_add_str(obj, "model-id",
1767 x86_cpuid_get_model_id,
1768 x86_cpuid_set_model_id, NULL);
1769 object_property_add(obj, "tsc-frequency", "int",
1770 x86_cpuid_get_tsc_freq,
1771 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
1773 env->cpuid_apic_id = env->cpu_index;
1776 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
1778 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1779 CPUClass *cc = CPU_CLASS(oc);
1781 xcc->parent_reset = cc->reset;
1782 cc->reset = x86_cpu_reset;
1785 static const TypeInfo x86_cpu_type_info = {
1786 .name = TYPE_X86_CPU,
1788 .instance_size = sizeof(X86CPU),
1789 .instance_init = x86_cpu_initfn,
1791 .class_size = sizeof(X86CPUClass),
1792 .class_init = x86_cpu_common_class_init,
1795 static void x86_cpu_register_types(void)
1797 type_register_static(&x86_cpu_type_info);
1800 type_init(x86_cpu_register_types)