2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
29 #include "qemu/error-report.h"
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/arch_init.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #ifndef CONFIG_USER_ONLY
47 #include "exec/address-spaces.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 /* CPUID Leaf 4 constants: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
69 #define CPUID_4_LEVEL(l) ((l) << 5)
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
79 #define ASSOC_FULL 0xFF
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
96 /* Definitions of the hardcoded cache entries we expose: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
140 /* TLB definitions: */
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
173 dst[CPUID_VENDOR_SZ] = '\0';
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
263 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
266 static const char *cpuid_7_0_ecx_feature_name[] = {
267 NULL, NULL, NULL, "pku",
268 "ospke", NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
277 static const char *cpuid_apm_edx_feature_name[] = {
278 NULL, NULL, NULL, NULL,
279 NULL, NULL, NULL, NULL,
280 "invtsc", NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
288 static const char *cpuid_xsave_feature_name[] = {
289 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
299 static const char *cpuid_6_feature_name[] = {
300 NULL, NULL, "arat", NULL,
301 NULL, NULL, NULL, NULL,
302 NULL, NULL, NULL, NULL,
303 NULL, NULL, NULL, NULL,
304 NULL, NULL, NULL, NULL,
305 NULL, NULL, NULL, NULL,
306 NULL, NULL, NULL, NULL,
307 NULL, NULL, NULL, NULL,
310 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
311 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
312 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
313 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
314 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
315 CPUID_PSE36 | CPUID_FXSR)
316 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
317 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
318 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
319 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
320 CPUID_PAE | CPUID_SEP | CPUID_APIC)
322 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
323 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
324 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
325 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
326 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
327 /* partly implemented:
328 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
330 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
331 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
332 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
333 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
334 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
335 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
337 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
338 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
339 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
340 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
341 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
344 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
346 #define TCG_EXT2_X86_64_FEATURES 0
349 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
350 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
351 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
352 TCG_EXT2_X86_64_FEATURES)
353 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
354 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
355 #define TCG_EXT4_FEATURES 0
356 #define TCG_SVM_FEATURES 0
357 #define TCG_KVM_FEATURES 0
358 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
359 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
360 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
361 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX)
363 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
364 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
365 CPUID_7_0_EBX_RDSEED */
366 #define TCG_7_0_ECX_FEATURES 0
367 #define TCG_APM_FEATURES 0
368 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
369 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
371 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
373 typedef struct FeatureWordInfo {
374 const char **feat_names;
375 uint32_t cpuid_eax; /* Input EAX for CPUID */
376 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
377 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
378 int cpuid_reg; /* output register (R_* constant) */
379 uint32_t tcg_features; /* Feature flags supported by TCG */
380 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
383 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
385 .feat_names = feature_name,
386 .cpuid_eax = 1, .cpuid_reg = R_EDX,
387 .tcg_features = TCG_FEATURES,
390 .feat_names = ext_feature_name,
391 .cpuid_eax = 1, .cpuid_reg = R_ECX,
392 .tcg_features = TCG_EXT_FEATURES,
394 [FEAT_8000_0001_EDX] = {
395 .feat_names = ext2_feature_name,
396 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
397 .tcg_features = TCG_EXT2_FEATURES,
399 [FEAT_8000_0001_ECX] = {
400 .feat_names = ext3_feature_name,
401 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
402 .tcg_features = TCG_EXT3_FEATURES,
404 [FEAT_C000_0001_EDX] = {
405 .feat_names = ext4_feature_name,
406 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
407 .tcg_features = TCG_EXT4_FEATURES,
410 .feat_names = kvm_feature_name,
411 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
412 .tcg_features = TCG_KVM_FEATURES,
415 .feat_names = svm_feature_name,
416 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
417 .tcg_features = TCG_SVM_FEATURES,
420 .feat_names = cpuid_7_0_ebx_feature_name,
422 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
424 .tcg_features = TCG_7_0_EBX_FEATURES,
427 .feat_names = cpuid_7_0_ecx_feature_name,
429 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
431 .tcg_features = TCG_7_0_ECX_FEATURES,
433 [FEAT_8000_0007_EDX] = {
434 .feat_names = cpuid_apm_edx_feature_name,
435 .cpuid_eax = 0x80000007,
437 .tcg_features = TCG_APM_FEATURES,
438 .unmigratable_flags = CPUID_APM_INVTSC,
441 .feat_names = cpuid_xsave_feature_name,
443 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
445 .tcg_features = TCG_XSAVE_FEATURES,
448 .feat_names = cpuid_6_feature_name,
449 .cpuid_eax = 6, .cpuid_reg = R_EAX,
450 .tcg_features = TCG_6_EAX_FEATURES,
454 typedef struct X86RegisterInfo32 {
455 /* Name of register */
457 /* QAPI enum value register */
458 X86CPURegister32 qapi_enum;
461 #define REGISTER(reg) \
462 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
463 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
475 const ExtSaveArea x86_ext_save_areas[] = {
476 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
477 .offset = 0x240, .size = 0x100 },
478 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
479 .offset = 0x3c0, .size = 0x40 },
480 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
481 .offset = 0x400, .size = 0x40 },
482 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
483 .offset = 0x440, .size = 0x40 },
484 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
485 .offset = 0x480, .size = 0x200 },
486 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
487 .offset = 0x680, .size = 0x400 },
488 [9] = { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
489 .offset = 0xA80, .size = 0x8 },
492 const char *get_register_name_32(unsigned int reg)
494 if (reg >= CPU_NB_REGS32) {
497 return x86_reg_info_32[reg].name;
501 * Returns the set of feature flags that are supported and migratable by
502 * QEMU, for a given FeatureWord.
504 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
506 FeatureWordInfo *wi = &feature_word_info[w];
510 for (i = 0; i < 32; i++) {
511 uint32_t f = 1U << i;
512 /* If the feature name is unknown, it is not supported by QEMU yet */
513 if (!wi->feat_names[i]) {
516 /* Skip features known to QEMU, but explicitly marked as unmigratable */
517 if (wi->unmigratable_flags & f) {
525 void host_cpuid(uint32_t function, uint32_t count,
526 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
532 : "=a"(vec[0]), "=b"(vec[1]),
533 "=c"(vec[2]), "=d"(vec[3])
534 : "0"(function), "c"(count) : "cc");
535 #elif defined(__i386__)
536 asm volatile("pusha \n\t"
538 "mov %%eax, 0(%2) \n\t"
539 "mov %%ebx, 4(%2) \n\t"
540 "mov %%ecx, 8(%2) \n\t"
541 "mov %%edx, 12(%2) \n\t"
543 : : "a"(function), "c"(count), "S"(vec)
559 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
561 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
562 * a substring. ex if !NULL points to the first char after a substring,
563 * otherwise the string is assumed to sized by a terminating nul.
564 * Return lexical ordering of *s1:*s2.
566 static int sstrcmp(const char *s1, const char *e1,
567 const char *s2, const char *e2)
570 if (!*s1 || !*s2 || *s1 != *s2)
573 if (s1 == e1 && s2 == e2)
582 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
583 * '|' delimited (possibly empty) strings in which case search for a match
584 * within the alternatives proceeds left to right. Return 0 for success,
585 * non-zero otherwise.
587 static int altcmp(const char *s, const char *e, const char *altstr)
591 for (q = p = altstr; ; ) {
592 while (*p && *p != '|')
594 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
603 /* search featureset for flag *[s..e), if found set corresponding bit in
604 * *pval and return true, otherwise return false
606 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
607 const char **featureset)
613 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
614 if (*ppc && !altcmp(s, e, *ppc)) {
622 static void add_flagname_to_bitmaps(const char *flagname,
623 FeatureWordArray words,
627 for (w = 0; w < FEATURE_WORDS; w++) {
628 FeatureWordInfo *wi = &feature_word_info[w];
629 if (wi->feat_names &&
630 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
634 if (w == FEATURE_WORDS) {
635 error_setg(errp, "CPU feature %s not found", flagname);
639 /* CPU class name definitions: */
641 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
642 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
644 /* Return type name for a given CPU model name
645 * Caller is responsible for freeing the returned string.
647 static char *x86_cpu_type_name(const char *model_name)
649 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
652 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
657 if (cpu_model == NULL) {
661 typename = x86_cpu_type_name(cpu_model);
662 oc = object_class_by_name(typename);
667 struct X86CPUDefinition {
672 /* vendor is zero-terminated, 12 character ASCII string */
673 char vendor[CPUID_VENDOR_SZ + 1];
677 FeatureWordArray features;
681 static X86CPUDefinition builtin_x86_defs[] = {
685 .vendor = CPUID_VENDOR_AMD,
689 .features[FEAT_1_EDX] =
691 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
693 .features[FEAT_1_ECX] =
694 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
695 .features[FEAT_8000_0001_EDX] =
696 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
697 .features[FEAT_8000_0001_ECX] =
698 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
699 .xlevel = 0x8000000A,
704 .vendor = CPUID_VENDOR_AMD,
708 /* Missing: CPUID_HT */
709 .features[FEAT_1_EDX] =
711 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
712 CPUID_PSE36 | CPUID_VME,
713 .features[FEAT_1_ECX] =
714 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
716 .features[FEAT_8000_0001_EDX] =
717 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
718 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
719 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
720 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
722 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
723 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
724 .features[FEAT_8000_0001_ECX] =
725 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
726 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
727 /* Missing: CPUID_SVM_LBRV */
728 .features[FEAT_SVM] =
730 .xlevel = 0x8000001A,
731 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
736 .vendor = CPUID_VENDOR_INTEL,
740 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
741 .features[FEAT_1_EDX] =
743 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
744 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
745 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
746 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
747 .features[FEAT_1_ECX] =
748 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
750 .features[FEAT_8000_0001_EDX] =
751 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
752 .features[FEAT_8000_0001_ECX] =
754 .xlevel = 0x80000008,
755 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
760 .vendor = CPUID_VENDOR_INTEL,
764 /* Missing: CPUID_HT */
765 .features[FEAT_1_EDX] =
766 PPRO_FEATURES | CPUID_VME |
767 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
769 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
770 .features[FEAT_1_ECX] =
771 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
772 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
773 .features[FEAT_8000_0001_EDX] =
774 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
775 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
776 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
777 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
778 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
779 .features[FEAT_8000_0001_ECX] =
781 .xlevel = 0x80000008,
782 .model_id = "Common KVM processor"
787 .vendor = CPUID_VENDOR_INTEL,
791 .features[FEAT_1_EDX] =
793 .features[FEAT_1_ECX] =
795 .xlevel = 0x80000004,
800 .vendor = CPUID_VENDOR_INTEL,
804 .features[FEAT_1_EDX] =
805 PPRO_FEATURES | CPUID_VME |
806 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
807 .features[FEAT_1_ECX] =
809 .features[FEAT_8000_0001_ECX] =
811 .xlevel = 0x80000008,
812 .model_id = "Common 32-bit KVM processor"
817 .vendor = CPUID_VENDOR_INTEL,
821 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
822 .features[FEAT_1_EDX] =
823 PPRO_FEATURES | CPUID_VME |
824 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
826 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
827 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
828 .features[FEAT_1_ECX] =
829 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
830 .features[FEAT_8000_0001_EDX] =
832 .xlevel = 0x80000008,
833 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
838 .vendor = CPUID_VENDOR_INTEL,
842 .features[FEAT_1_EDX] =
849 .vendor = CPUID_VENDOR_INTEL,
853 .features[FEAT_1_EDX] =
860 .vendor = CPUID_VENDOR_INTEL,
864 .features[FEAT_1_EDX] =
871 .vendor = CPUID_VENDOR_INTEL,
875 .features[FEAT_1_EDX] =
882 .vendor = CPUID_VENDOR_AMD,
886 .features[FEAT_1_EDX] =
887 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
889 .features[FEAT_8000_0001_EDX] =
890 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
891 .xlevel = 0x80000008,
896 .vendor = CPUID_VENDOR_INTEL,
900 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
901 .features[FEAT_1_EDX] =
903 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
904 CPUID_ACPI | CPUID_SS,
905 /* Some CPUs got no CPUID_SEP */
906 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
908 .features[FEAT_1_ECX] =
909 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
911 .features[FEAT_8000_0001_EDX] =
913 .features[FEAT_8000_0001_ECX] =
915 .xlevel = 0x80000008,
916 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
921 .vendor = CPUID_VENDOR_INTEL,
925 .features[FEAT_1_EDX] =
926 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
927 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
928 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
929 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
930 CPUID_DE | CPUID_FP87,
931 .features[FEAT_1_ECX] =
932 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
933 .features[FEAT_8000_0001_EDX] =
934 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
935 .features[FEAT_8000_0001_ECX] =
937 .xlevel = 0x80000008,
938 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
943 .vendor = CPUID_VENDOR_INTEL,
947 .features[FEAT_1_EDX] =
948 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
949 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
950 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
951 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
952 CPUID_DE | CPUID_FP87,
953 .features[FEAT_1_ECX] =
954 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
956 .features[FEAT_8000_0001_EDX] =
957 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
958 .features[FEAT_8000_0001_ECX] =
960 .xlevel = 0x80000008,
961 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
966 .vendor = CPUID_VENDOR_INTEL,
970 .features[FEAT_1_EDX] =
971 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
972 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
973 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
974 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
975 CPUID_DE | CPUID_FP87,
976 .features[FEAT_1_ECX] =
977 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
978 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
979 .features[FEAT_8000_0001_EDX] =
980 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
981 .features[FEAT_8000_0001_ECX] =
983 .xlevel = 0x80000008,
984 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
989 .vendor = CPUID_VENDOR_INTEL,
993 .features[FEAT_1_EDX] =
994 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
995 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
996 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
997 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
998 CPUID_DE | CPUID_FP87,
999 .features[FEAT_1_ECX] =
1000 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1001 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1002 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1003 .features[FEAT_8000_0001_EDX] =
1004 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1005 .features[FEAT_8000_0001_ECX] =
1007 .features[FEAT_6_EAX] =
1009 .xlevel = 0x80000008,
1010 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1013 .name = "SandyBridge",
1015 .vendor = CPUID_VENDOR_INTEL,
1019 .features[FEAT_1_EDX] =
1020 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1021 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1022 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1023 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1024 CPUID_DE | CPUID_FP87,
1025 .features[FEAT_1_ECX] =
1026 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1027 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1028 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1029 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1031 .features[FEAT_8000_0001_EDX] =
1032 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1034 .features[FEAT_8000_0001_ECX] =
1036 .features[FEAT_XSAVE] =
1037 CPUID_XSAVE_XSAVEOPT,
1038 .features[FEAT_6_EAX] =
1040 .xlevel = 0x80000008,
1041 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1044 .name = "IvyBridge",
1046 .vendor = CPUID_VENDOR_INTEL,
1050 .features[FEAT_1_EDX] =
1051 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1052 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1053 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1054 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1055 CPUID_DE | CPUID_FP87,
1056 .features[FEAT_1_ECX] =
1057 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1058 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1059 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1060 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1061 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1062 .features[FEAT_7_0_EBX] =
1063 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1065 .features[FEAT_8000_0001_EDX] =
1066 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1068 .features[FEAT_8000_0001_ECX] =
1070 .features[FEAT_XSAVE] =
1071 CPUID_XSAVE_XSAVEOPT,
1072 .features[FEAT_6_EAX] =
1074 .xlevel = 0x80000008,
1075 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1078 .name = "Haswell-noTSX",
1080 .vendor = CPUID_VENDOR_INTEL,
1084 .features[FEAT_1_EDX] =
1085 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1086 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1087 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1088 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1089 CPUID_DE | CPUID_FP87,
1090 .features[FEAT_1_ECX] =
1091 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1092 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1093 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1094 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1095 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1096 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1097 .features[FEAT_8000_0001_EDX] =
1098 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1100 .features[FEAT_8000_0001_ECX] =
1101 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1102 .features[FEAT_7_0_EBX] =
1103 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1104 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1105 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1106 .features[FEAT_XSAVE] =
1107 CPUID_XSAVE_XSAVEOPT,
1108 .features[FEAT_6_EAX] =
1110 .xlevel = 0x80000008,
1111 .model_id = "Intel Core Processor (Haswell, no TSX)",
1115 .vendor = CPUID_VENDOR_INTEL,
1119 .features[FEAT_1_EDX] =
1120 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1121 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1122 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1123 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1124 CPUID_DE | CPUID_FP87,
1125 .features[FEAT_1_ECX] =
1126 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1127 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1128 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1129 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1130 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1131 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1132 .features[FEAT_8000_0001_EDX] =
1133 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1135 .features[FEAT_8000_0001_ECX] =
1136 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1137 .features[FEAT_7_0_EBX] =
1138 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1139 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1140 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1142 .features[FEAT_XSAVE] =
1143 CPUID_XSAVE_XSAVEOPT,
1144 .features[FEAT_6_EAX] =
1146 .xlevel = 0x80000008,
1147 .model_id = "Intel Core Processor (Haswell)",
1150 .name = "Broadwell-noTSX",
1152 .vendor = CPUID_VENDOR_INTEL,
1156 .features[FEAT_1_EDX] =
1157 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1158 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1159 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1160 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1161 CPUID_DE | CPUID_FP87,
1162 .features[FEAT_1_ECX] =
1163 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1164 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1165 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1166 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1167 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1168 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1169 .features[FEAT_8000_0001_EDX] =
1170 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1172 .features[FEAT_8000_0001_ECX] =
1173 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1174 .features[FEAT_7_0_EBX] =
1175 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1176 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1177 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1178 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1180 .features[FEAT_XSAVE] =
1181 CPUID_XSAVE_XSAVEOPT,
1182 .features[FEAT_6_EAX] =
1184 .xlevel = 0x80000008,
1185 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1188 .name = "Broadwell",
1190 .vendor = CPUID_VENDOR_INTEL,
1194 .features[FEAT_1_EDX] =
1195 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1196 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1197 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1198 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1199 CPUID_DE | CPUID_FP87,
1200 .features[FEAT_1_ECX] =
1201 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1202 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1203 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1204 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1205 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1206 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1207 .features[FEAT_8000_0001_EDX] =
1208 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1210 .features[FEAT_8000_0001_ECX] =
1211 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1212 .features[FEAT_7_0_EBX] =
1213 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1214 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1215 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1216 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1218 .features[FEAT_XSAVE] =
1219 CPUID_XSAVE_XSAVEOPT,
1220 .features[FEAT_6_EAX] =
1222 .xlevel = 0x80000008,
1223 .model_id = "Intel Core Processor (Broadwell)",
1226 .name = "Opteron_G1",
1228 .vendor = CPUID_VENDOR_AMD,
1232 .features[FEAT_1_EDX] =
1233 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1234 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1235 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1236 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1237 CPUID_DE | CPUID_FP87,
1238 .features[FEAT_1_ECX] =
1240 .features[FEAT_8000_0001_EDX] =
1241 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1242 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1243 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1244 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1245 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1246 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1247 .xlevel = 0x80000008,
1248 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1251 .name = "Opteron_G2",
1253 .vendor = CPUID_VENDOR_AMD,
1257 .features[FEAT_1_EDX] =
1258 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1259 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1260 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1261 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1262 CPUID_DE | CPUID_FP87,
1263 .features[FEAT_1_ECX] =
1264 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1265 /* Missing: CPUID_EXT2_RDTSCP */
1266 .features[FEAT_8000_0001_EDX] =
1267 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1268 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1269 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1270 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1271 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1272 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1273 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1274 .features[FEAT_8000_0001_ECX] =
1275 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1276 .xlevel = 0x80000008,
1277 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1280 .name = "Opteron_G3",
1282 .vendor = CPUID_VENDOR_AMD,
1286 .features[FEAT_1_EDX] =
1287 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1288 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1289 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1290 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1291 CPUID_DE | CPUID_FP87,
1292 .features[FEAT_1_ECX] =
1293 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1295 /* Missing: CPUID_EXT2_RDTSCP */
1296 .features[FEAT_8000_0001_EDX] =
1297 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1298 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1299 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1300 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1301 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1302 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1303 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1304 .features[FEAT_8000_0001_ECX] =
1305 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1306 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1307 .xlevel = 0x80000008,
1308 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1311 .name = "Opteron_G4",
1313 .vendor = CPUID_VENDOR_AMD,
1317 .features[FEAT_1_EDX] =
1318 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1319 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1320 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1321 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1322 CPUID_DE | CPUID_FP87,
1323 .features[FEAT_1_ECX] =
1324 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1325 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1326 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1328 /* Missing: CPUID_EXT2_RDTSCP */
1329 .features[FEAT_8000_0001_EDX] =
1331 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1332 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1333 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1334 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1335 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1336 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1337 .features[FEAT_8000_0001_ECX] =
1338 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1339 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1340 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1343 .xlevel = 0x8000001A,
1344 .model_id = "AMD Opteron 62xx class CPU",
1347 .name = "Opteron_G5",
1349 .vendor = CPUID_VENDOR_AMD,
1353 .features[FEAT_1_EDX] =
1354 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1355 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1356 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1357 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1358 CPUID_DE | CPUID_FP87,
1359 .features[FEAT_1_ECX] =
1360 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1361 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1362 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1363 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1364 /* Missing: CPUID_EXT2_RDTSCP */
1365 .features[FEAT_8000_0001_EDX] =
1367 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1368 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1369 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1370 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1371 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1372 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1373 .features[FEAT_8000_0001_ECX] =
1374 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1375 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1376 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1379 .xlevel = 0x8000001A,
1380 .model_id = "AMD Opteron 63xx class CPU",
1384 typedef struct PropValue {
1385 const char *prop, *value;
1388 /* KVM-specific features that are automatically added/removed
1389 * from all CPU models when KVM is enabled.
1391 static PropValue kvm_default_props[] = {
1392 { "kvmclock", "on" },
1393 { "kvm-nopiodelay", "on" },
1394 { "kvm-asyncpf", "on" },
1395 { "kvm-steal-time", "on" },
1396 { "kvm-pv-eoi", "on" },
1397 { "kvmclock-stable-bit", "on" },
1400 { "monitor", "off" },
1405 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1408 for (pv = kvm_default_props; pv->prop; pv++) {
1409 if (!strcmp(pv->prop, prop)) {
1415 /* It is valid to call this function only for properties that
1416 * are already present in the kvm_default_props table.
1421 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1422 bool migratable_only);
1426 static int cpu_x86_fill_model_id(char *str)
1428 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1431 for (i = 0; i < 3; i++) {
1432 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1433 memcpy(str + i * 16 + 0, &eax, 4);
1434 memcpy(str + i * 16 + 4, &ebx, 4);
1435 memcpy(str + i * 16 + 8, &ecx, 4);
1436 memcpy(str + i * 16 + 12, &edx, 4);
1441 static X86CPUDefinition host_cpudef;
1443 static Property host_x86_cpu_properties[] = {
1444 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1445 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1446 DEFINE_PROP_END_OF_LIST()
1449 /* class_init for the "host" CPU model
1451 * This function may be called before KVM is initialized.
1453 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1455 DeviceClass *dc = DEVICE_CLASS(oc);
1456 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1457 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1459 xcc->kvm_required = true;
1461 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1462 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1464 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1465 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1466 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1467 host_cpudef.stepping = eax & 0x0F;
1469 cpu_x86_fill_model_id(host_cpudef.model_id);
1471 xcc->cpu_def = &host_cpudef;
1473 /* level, xlevel, xlevel2, and the feature words are initialized on
1474 * instance_init, because they require KVM to be initialized.
1477 dc->props = host_x86_cpu_properties;
1478 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1479 dc->cannot_destroy_with_object_finalize_yet = true;
1482 static void host_x86_cpu_initfn(Object *obj)
1484 X86CPU *cpu = X86_CPU(obj);
1485 CPUX86State *env = &cpu->env;
1486 KVMState *s = kvm_state;
1488 assert(kvm_enabled());
1490 /* We can't fill the features array here because we don't know yet if
1491 * "migratable" is true or false.
1493 cpu->host_features = true;
1495 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1496 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1497 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1499 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1502 static const TypeInfo host_x86_cpu_type_info = {
1503 .name = X86_CPU_TYPE_NAME("host"),
1504 .parent = TYPE_X86_CPU,
1505 .instance_init = host_x86_cpu_initfn,
1506 .class_init = host_x86_cpu_class_init,
1511 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1513 FeatureWordInfo *f = &feature_word_info[w];
1516 for (i = 0; i < 32; ++i) {
1517 if ((1UL << i) & mask) {
1518 const char *reg = get_register_name_32(f->cpuid_reg);
1520 fprintf(stderr, "warning: %s doesn't support requested feature: "
1521 "CPUID.%02XH:%s%s%s [bit %d]\n",
1522 kvm_enabled() ? "host" : "TCG",
1524 f->feat_names[i] ? "." : "",
1525 f->feat_names[i] ? f->feat_names[i] : "", i);
1530 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1531 const char *name, void *opaque,
1534 X86CPU *cpu = X86_CPU(obj);
1535 CPUX86State *env = &cpu->env;
1538 value = (env->cpuid_version >> 8) & 0xf;
1540 value += (env->cpuid_version >> 20) & 0xff;
1542 visit_type_int(v, name, &value, errp);
1545 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1546 const char *name, void *opaque,
1549 X86CPU *cpu = X86_CPU(obj);
1550 CPUX86State *env = &cpu->env;
1551 const int64_t min = 0;
1552 const int64_t max = 0xff + 0xf;
1553 Error *local_err = NULL;
1556 visit_type_int(v, name, &value, &local_err);
1558 error_propagate(errp, local_err);
1561 if (value < min || value > max) {
1562 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1563 name ? name : "null", value, min, max);
1567 env->cpuid_version &= ~0xff00f00;
1569 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1571 env->cpuid_version |= value << 8;
1575 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1576 const char *name, void *opaque,
1579 X86CPU *cpu = X86_CPU(obj);
1580 CPUX86State *env = &cpu->env;
1583 value = (env->cpuid_version >> 4) & 0xf;
1584 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1585 visit_type_int(v, name, &value, errp);
1588 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1589 const char *name, void *opaque,
1592 X86CPU *cpu = X86_CPU(obj);
1593 CPUX86State *env = &cpu->env;
1594 const int64_t min = 0;
1595 const int64_t max = 0xff;
1596 Error *local_err = NULL;
1599 visit_type_int(v, name, &value, &local_err);
1601 error_propagate(errp, local_err);
1604 if (value < min || value > max) {
1605 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1606 name ? name : "null", value, min, max);
1610 env->cpuid_version &= ~0xf00f0;
1611 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1614 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1615 const char *name, void *opaque,
1618 X86CPU *cpu = X86_CPU(obj);
1619 CPUX86State *env = &cpu->env;
1622 value = env->cpuid_version & 0xf;
1623 visit_type_int(v, name, &value, errp);
1626 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1627 const char *name, void *opaque,
1630 X86CPU *cpu = X86_CPU(obj);
1631 CPUX86State *env = &cpu->env;
1632 const int64_t min = 0;
1633 const int64_t max = 0xf;
1634 Error *local_err = NULL;
1637 visit_type_int(v, name, &value, &local_err);
1639 error_propagate(errp, local_err);
1642 if (value < min || value > max) {
1643 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1644 name ? name : "null", value, min, max);
1648 env->cpuid_version &= ~0xf;
1649 env->cpuid_version |= value & 0xf;
1652 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1654 X86CPU *cpu = X86_CPU(obj);
1655 CPUX86State *env = &cpu->env;
1658 value = g_malloc(CPUID_VENDOR_SZ + 1);
1659 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1660 env->cpuid_vendor3);
1664 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1667 X86CPU *cpu = X86_CPU(obj);
1668 CPUX86State *env = &cpu->env;
1671 if (strlen(value) != CPUID_VENDOR_SZ) {
1672 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1676 env->cpuid_vendor1 = 0;
1677 env->cpuid_vendor2 = 0;
1678 env->cpuid_vendor3 = 0;
1679 for (i = 0; i < 4; i++) {
1680 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1681 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1682 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1686 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1688 X86CPU *cpu = X86_CPU(obj);
1689 CPUX86State *env = &cpu->env;
1693 value = g_malloc(48 + 1);
1694 for (i = 0; i < 48; i++) {
1695 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1701 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1704 X86CPU *cpu = X86_CPU(obj);
1705 CPUX86State *env = &cpu->env;
1708 if (model_id == NULL) {
1711 len = strlen(model_id);
1712 memset(env->cpuid_model, 0, 48);
1713 for (i = 0; i < 48; i++) {
1717 c = (uint8_t)model_id[i];
1719 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1723 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1724 void *opaque, Error **errp)
1726 X86CPU *cpu = X86_CPU(obj);
1729 value = cpu->env.tsc_khz * 1000;
1730 visit_type_int(v, name, &value, errp);
1733 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1734 void *opaque, Error **errp)
1736 X86CPU *cpu = X86_CPU(obj);
1737 const int64_t min = 0;
1738 const int64_t max = INT64_MAX;
1739 Error *local_err = NULL;
1742 visit_type_int(v, name, &value, &local_err);
1744 error_propagate(errp, local_err);
1747 if (value < min || value > max) {
1748 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1749 name ? name : "null", value, min, max);
1753 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1756 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
1757 void *opaque, Error **errp)
1759 X86CPU *cpu = X86_CPU(obj);
1760 int64_t value = cpu->apic_id;
1762 visit_type_int(v, name, &value, errp);
1765 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
1766 void *opaque, Error **errp)
1768 X86CPU *cpu = X86_CPU(obj);
1769 DeviceState *dev = DEVICE(obj);
1770 const int64_t min = 0;
1771 const int64_t max = UINT32_MAX;
1772 Error *error = NULL;
1775 if (dev->realized) {
1776 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1777 "it was realized", name, object_get_typename(obj));
1781 visit_type_int(v, name, &value, &error);
1783 error_propagate(errp, error);
1786 if (value < min || value > max) {
1787 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1788 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1789 object_get_typename(obj), name, value, min, max);
1793 if ((value != cpu->apic_id) && cpu_exists(value)) {
1794 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1797 cpu->apic_id = value;
1800 /* Generic getter for "feature-words" and "filtered-features" properties */
1801 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1802 const char *name, void *opaque,
1805 uint32_t *array = (uint32_t *)opaque;
1808 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1809 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1810 X86CPUFeatureWordInfoList *list = NULL;
1812 for (w = 0; w < FEATURE_WORDS; w++) {
1813 FeatureWordInfo *wi = &feature_word_info[w];
1814 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1815 qwi->cpuid_input_eax = wi->cpuid_eax;
1816 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1817 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1818 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1819 qwi->features = array[w];
1821 /* List will be in reverse order, but order shouldn't matter */
1822 list_entries[w].next = list;
1823 list_entries[w].value = &word_infos[w];
1824 list = &list_entries[w];
1827 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, &err);
1828 error_propagate(errp, err);
1831 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1832 void *opaque, Error **errp)
1834 X86CPU *cpu = X86_CPU(obj);
1835 int64_t value = cpu->hyperv_spinlock_attempts;
1837 visit_type_int(v, name, &value, errp);
1840 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1841 void *opaque, Error **errp)
1843 const int64_t min = 0xFFF;
1844 const int64_t max = UINT_MAX;
1845 X86CPU *cpu = X86_CPU(obj);
1849 visit_type_int(v, name, &value, &err);
1851 error_propagate(errp, err);
1855 if (value < min || value > max) {
1856 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1857 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1858 object_get_typename(obj), name ? name : "null",
1862 cpu->hyperv_spinlock_attempts = value;
1865 static PropertyInfo qdev_prop_spinlocks = {
1867 .get = x86_get_hv_spinlocks,
1868 .set = x86_set_hv_spinlocks,
1871 /* Convert all '_' in a feature string option name to '-', to make feature
1872 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1874 static inline void feat2prop(char *s)
1876 while ((s = strchr(s, '_'))) {
1881 /* Parse "+feature,-feature,feature=foo" CPU feature string
1883 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1886 X86CPU *cpu = X86_CPU(cs);
1887 char *featurestr; /* Single 'key=value" string being parsed */
1889 /* Features to be added */
1890 FeatureWordArray plus_features = { 0 };
1891 /* Features to be removed */
1892 FeatureWordArray minus_features = { 0 };
1894 CPUX86State *env = &cpu->env;
1895 Error *local_err = NULL;
1897 featurestr = features ? strtok(features, ",") : NULL;
1899 while (featurestr) {
1901 if (featurestr[0] == '+') {
1902 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1903 } else if (featurestr[0] == '-') {
1904 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1905 } else if ((val = strchr(featurestr, '='))) {
1907 feat2prop(featurestr);
1908 if (!strcmp(featurestr, "xlevel")) {
1912 numvalue = strtoul(val, &err, 0);
1913 if (!*val || *err) {
1914 error_setg(errp, "bad numerical value %s", val);
1917 if (numvalue < 0x80000000) {
1918 error_report("xlevel value shall always be >= 0x80000000"
1919 ", fixup will be removed in future versions");
1920 numvalue += 0x80000000;
1922 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1923 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1924 } else if (!strcmp(featurestr, "tsc-freq")) {
1929 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1930 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1931 if (tsc_freq < 0 || *err) {
1932 error_setg(errp, "bad numerical value %s", val);
1935 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1936 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1938 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1940 const int min = 0xFFF;
1942 numvalue = strtoul(val, &err, 0);
1943 if (!*val || *err) {
1944 error_setg(errp, "bad numerical value %s", val);
1947 if (numvalue < min) {
1948 error_report("hv-spinlocks value shall always be >= 0x%x"
1949 ", fixup will be removed in future versions",
1953 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1954 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1956 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1959 feat2prop(featurestr);
1960 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1963 error_propagate(errp, local_err);
1966 featurestr = strtok(NULL, ",");
1969 if (cpu->host_features) {
1970 for (w = 0; w < FEATURE_WORDS; w++) {
1972 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1976 for (w = 0; w < FEATURE_WORDS; w++) {
1977 env->features[w] |= plus_features[w];
1978 env->features[w] &= ~minus_features[w];
1982 /* Print all cpuid feature names in featureset
1984 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1989 for (bit = 0; bit < 32; bit++) {
1990 if (featureset[bit]) {
1991 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1997 /* generate CPU information. */
1998 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2000 X86CPUDefinition *def;
2004 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2005 def = &builtin_x86_defs[i];
2006 snprintf(buf, sizeof(buf), "%s", def->name);
2007 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2010 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2011 "KVM processor with all supported host features "
2012 "(only available in KVM mode)");
2015 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2016 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2017 FeatureWordInfo *fw = &feature_word_info[i];
2019 (*cpu_fprintf)(f, " ");
2020 listflags(f, cpu_fprintf, fw->feat_names);
2021 (*cpu_fprintf)(f, "\n");
2025 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2027 CpuDefinitionInfoList *cpu_list = NULL;
2028 X86CPUDefinition *def;
2031 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2032 CpuDefinitionInfoList *entry;
2033 CpuDefinitionInfo *info;
2035 def = &builtin_x86_defs[i];
2036 info = g_malloc0(sizeof(*info));
2037 info->name = g_strdup(def->name);
2039 entry = g_malloc0(sizeof(*entry));
2040 entry->value = info;
2041 entry->next = cpu_list;
2048 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2049 bool migratable_only)
2051 FeatureWordInfo *wi = &feature_word_info[w];
2054 if (kvm_enabled()) {
2055 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2058 } else if (tcg_enabled()) {
2059 r = wi->tcg_features;
2063 if (migratable_only) {
2064 r &= x86_cpu_get_migratable_flags(w);
2070 * Filters CPU feature words based on host availability of each feature.
2072 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2074 static int x86_cpu_filter_features(X86CPU *cpu)
2076 CPUX86State *env = &cpu->env;
2080 for (w = 0; w < FEATURE_WORDS; w++) {
2081 uint32_t host_feat =
2082 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2083 uint32_t requested_features = env->features[w];
2084 env->features[w] &= host_feat;
2085 cpu->filtered_features[w] = requested_features & ~env->features[w];
2086 if (cpu->filtered_features[w]) {
2087 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2088 report_unavailable_features(w, cpu->filtered_features[w]);
2097 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2100 for (pv = props; pv->prop; pv++) {
2104 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2109 /* Load data from X86CPUDefinition
2111 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2113 CPUX86State *env = &cpu->env;
2115 char host_vendor[CPUID_VENDOR_SZ + 1];
2118 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2119 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2120 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2121 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2122 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2123 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2124 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2125 for (w = 0; w < FEATURE_WORDS; w++) {
2126 env->features[w] = def->features[w];
2129 /* Special cases not set in the X86CPUDefinition structs: */
2130 if (kvm_enabled()) {
2131 x86_cpu_apply_props(cpu, kvm_default_props);
2134 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2136 /* sysenter isn't supported in compatibility mode on AMD,
2137 * syscall isn't supported in compatibility mode on Intel.
2138 * Normally we advertise the actual CPU vendor, but you can
2139 * override this using the 'vendor' property if you want to use
2140 * KVM's sysenter/syscall emulation in compatibility mode and
2141 * when doing cross vendor migration
2143 vendor = def->vendor;
2144 if (kvm_enabled()) {
2145 uint32_t ebx = 0, ecx = 0, edx = 0;
2146 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2147 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2148 vendor = host_vendor;
2151 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2155 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2160 gchar **model_pieces;
2161 char *name, *features;
2162 Error *error = NULL;
2164 model_pieces = g_strsplit(cpu_model, ",", 2);
2165 if (!model_pieces[0]) {
2166 error_setg(&error, "Invalid/empty CPU model name");
2169 name = model_pieces[0];
2170 features = model_pieces[1];
2172 oc = x86_cpu_class_by_name(name);
2174 error_setg(&error, "Unable to find CPU definition: %s", name);
2177 xcc = X86_CPU_CLASS(oc);
2179 if (xcc->kvm_required && !kvm_enabled()) {
2180 error_setg(&error, "CPU model '%s' requires KVM", name);
2184 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2186 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2192 if (error != NULL) {
2193 error_propagate(errp, error);
2195 object_unref(OBJECT(cpu));
2199 g_strfreev(model_pieces);
2203 X86CPU *cpu_x86_init(const char *cpu_model)
2205 Error *error = NULL;
2208 cpu = cpu_x86_create(cpu_model, &error);
2213 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2217 error_report_err(error);
2219 object_unref(OBJECT(cpu));
2226 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2228 X86CPUDefinition *cpudef = data;
2229 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2231 xcc->cpu_def = cpudef;
2234 static void x86_register_cpudef_type(X86CPUDefinition *def)
2236 char *typename = x86_cpu_type_name(def->name);
2239 .parent = TYPE_X86_CPU,
2240 .class_init = x86_cpu_cpudef_class_init,
2248 #if !defined(CONFIG_USER_ONLY)
2250 void cpu_clear_apic_feature(CPUX86State *env)
2252 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2255 #endif /* !CONFIG_USER_ONLY */
2257 /* Initialize list of CPU models, filling some non-static fields if necessary
2259 void x86_cpudef_setup(void)
2262 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2264 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2265 X86CPUDefinition *def = &builtin_x86_defs[i];
2267 /* Look for specific "cpudef" models that */
2268 /* have the QEMU version in .model_id */
2269 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2270 if (strcmp(model_with_versions[j], def->name) == 0) {
2271 pstrcpy(def->model_id, sizeof(def->model_id),
2272 "QEMU Virtual CPU version ");
2273 pstrcat(def->model_id, sizeof(def->model_id),
2281 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2282 uint32_t *eax, uint32_t *ebx,
2283 uint32_t *ecx, uint32_t *edx)
2285 X86CPU *cpu = x86_env_get_cpu(env);
2286 CPUState *cs = CPU(cpu);
2288 /* test if maximum index reached */
2289 if (index & 0x80000000) {
2290 if (index > env->cpuid_xlevel) {
2291 if (env->cpuid_xlevel2 > 0) {
2292 /* Handle the Centaur's CPUID instruction. */
2293 if (index > env->cpuid_xlevel2) {
2294 index = env->cpuid_xlevel2;
2295 } else if (index < 0xC0000000) {
2296 index = env->cpuid_xlevel;
2299 /* Intel documentation states that invalid EAX input will
2300 * return the same information as EAX=cpuid_level
2301 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2303 index = env->cpuid_level;
2307 if (index > env->cpuid_level)
2308 index = env->cpuid_level;
2313 *eax = env->cpuid_level;
2314 *ebx = env->cpuid_vendor1;
2315 *edx = env->cpuid_vendor2;
2316 *ecx = env->cpuid_vendor3;
2319 *eax = env->cpuid_version;
2320 *ebx = (cpu->apic_id << 24) |
2321 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2322 *ecx = env->features[FEAT_1_ECX];
2323 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2324 *ecx |= CPUID_EXT_OSXSAVE;
2326 *edx = env->features[FEAT_1_EDX];
2327 if (cs->nr_cores * cs->nr_threads > 1) {
2328 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2333 /* cache info: needed for Pentium Pro compatibility */
2334 if (cpu->cache_info_passthrough) {
2335 host_cpuid(index, 0, eax, ebx, ecx, edx);
2338 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2341 *edx = (L1D_DESCRIPTOR << 16) | \
2342 (L1I_DESCRIPTOR << 8) | \
2346 /* cache info: needed for Core compatibility */
2347 if (cpu->cache_info_passthrough) {
2348 host_cpuid(index, count, eax, ebx, ecx, edx);
2349 *eax &= ~0xFC000000;
2353 case 0: /* L1 dcache info */
2354 *eax |= CPUID_4_TYPE_DCACHE | \
2355 CPUID_4_LEVEL(1) | \
2356 CPUID_4_SELF_INIT_LEVEL;
2357 *ebx = (L1D_LINE_SIZE - 1) | \
2358 ((L1D_PARTITIONS - 1) << 12) | \
2359 ((L1D_ASSOCIATIVITY - 1) << 22);
2360 *ecx = L1D_SETS - 1;
2361 *edx = CPUID_4_NO_INVD_SHARING;
2363 case 1: /* L1 icache info */
2364 *eax |= CPUID_4_TYPE_ICACHE | \
2365 CPUID_4_LEVEL(1) | \
2366 CPUID_4_SELF_INIT_LEVEL;
2367 *ebx = (L1I_LINE_SIZE - 1) | \
2368 ((L1I_PARTITIONS - 1) << 12) | \
2369 ((L1I_ASSOCIATIVITY - 1) << 22);
2370 *ecx = L1I_SETS - 1;
2371 *edx = CPUID_4_NO_INVD_SHARING;
2373 case 2: /* L2 cache info */
2374 *eax |= CPUID_4_TYPE_UNIFIED | \
2375 CPUID_4_LEVEL(2) | \
2376 CPUID_4_SELF_INIT_LEVEL;
2377 if (cs->nr_threads > 1) {
2378 *eax |= (cs->nr_threads - 1) << 14;
2380 *ebx = (L2_LINE_SIZE - 1) | \
2381 ((L2_PARTITIONS - 1) << 12) | \
2382 ((L2_ASSOCIATIVITY - 1) << 22);
2384 *edx = CPUID_4_NO_INVD_SHARING;
2386 default: /* end of info */
2395 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2396 if ((*eax & 31) && cs->nr_cores > 1) {
2397 *eax |= (cs->nr_cores - 1) << 26;
2401 /* mwait info: needed for Core compatibility */
2402 *eax = 0; /* Smallest monitor-line size in bytes */
2403 *ebx = 0; /* Largest monitor-line size in bytes */
2404 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2408 /* Thermal and Power Leaf */
2409 *eax = env->features[FEAT_6_EAX];
2415 /* Structured Extended Feature Flags Enumeration Leaf */
2417 *eax = 0; /* Maximum ECX value for sub-leaves */
2418 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2419 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2420 *edx = 0; /* Reserved */
2429 /* Direct Cache Access Information Leaf */
2430 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2436 /* Architectural Performance Monitoring Leaf */
2437 if (kvm_enabled() && cpu->enable_pmu) {
2438 KVMState *s = cs->kvm_state;
2440 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2441 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2442 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2443 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2452 KVMState *s = cs->kvm_state;
2456 /* Processor Extended State */
2461 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2464 if (kvm_enabled()) {
2465 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2467 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2474 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2475 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2476 if ((env->features[esa->feature] & esa->bits) == esa->bits
2477 && ((ena_mask >> i) & 1) != 0) {
2481 *edx |= 1u << (i - 32);
2483 *ecx = MAX(*ecx, esa->offset + esa->size);
2486 *eax |= ena_mask & (XSTATE_FP | XSTATE_SSE);
2488 } else if (count == 1) {
2489 *eax = env->features[FEAT_XSAVE];
2490 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2491 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2492 if ((env->features[esa->feature] & esa->bits) == esa->bits
2493 && ((ena_mask >> count) & 1) != 0) {
2501 *eax = env->cpuid_xlevel;
2502 *ebx = env->cpuid_vendor1;
2503 *edx = env->cpuid_vendor2;
2504 *ecx = env->cpuid_vendor3;
2507 *eax = env->cpuid_version;
2509 *ecx = env->features[FEAT_8000_0001_ECX];
2510 *edx = env->features[FEAT_8000_0001_EDX];
2512 /* The Linux kernel checks for the CMPLegacy bit and
2513 * discards multiple thread information if it is set.
2514 * So dont set it here for Intel to make Linux guests happy.
2516 if (cs->nr_cores * cs->nr_threads > 1) {
2517 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2518 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2519 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2520 *ecx |= 1 << 1; /* CmpLegacy bit */
2527 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2528 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2529 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2530 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2533 /* cache info (L1 cache) */
2534 if (cpu->cache_info_passthrough) {
2535 host_cpuid(index, 0, eax, ebx, ecx, edx);
2538 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2539 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2540 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2541 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2542 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2543 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2544 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2545 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2548 /* cache info (L2 cache) */
2549 if (cpu->cache_info_passthrough) {
2550 host_cpuid(index, 0, eax, ebx, ecx, edx);
2553 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2554 (L2_DTLB_2M_ENTRIES << 16) | \
2555 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2556 (L2_ITLB_2M_ENTRIES);
2557 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2558 (L2_DTLB_4K_ENTRIES << 16) | \
2559 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2560 (L2_ITLB_4K_ENTRIES);
2561 *ecx = (L2_SIZE_KB_AMD << 16) | \
2562 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2563 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2564 *edx = ((L3_SIZE_KB/512) << 18) | \
2565 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2566 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2572 *edx = env->features[FEAT_8000_0007_EDX];
2575 /* virtual & phys address size in low 2 bytes. */
2576 /* XXX: This value must match the one used in the MMU code. */
2577 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2578 /* 64 bit processor */
2579 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2580 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2582 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2583 *eax = 0x00000024; /* 36 bits physical */
2585 *eax = 0x00000020; /* 32 bits physical */
2591 if (cs->nr_cores * cs->nr_threads > 1) {
2592 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2596 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2597 *eax = 0x00000001; /* SVM Revision */
2598 *ebx = 0x00000010; /* nr of ASIDs */
2600 *edx = env->features[FEAT_SVM]; /* optional features */
2609 *eax = env->cpuid_xlevel2;
2615 /* Support for VIA CPU's CPUID instruction */
2616 *eax = env->cpuid_version;
2619 *edx = env->features[FEAT_C000_0001_EDX];
2624 /* Reserved for the future, and now filled with zero */
2631 /* reserved values: zero */
2640 /* CPUClass::reset() */
2641 static void x86_cpu_reset(CPUState *s)
2643 X86CPU *cpu = X86_CPU(s);
2644 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2645 CPUX86State *env = &cpu->env;
2648 xcc->parent_reset(s);
2650 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2654 env->old_exception = -1;
2656 /* init to reset state */
2658 #ifdef CONFIG_SOFTMMU
2659 env->hflags |= HF_SOFTMMU_MASK;
2661 env->hflags2 |= HF2_GIF_MASK;
2663 cpu_x86_update_cr0(env, 0x60000010);
2664 env->a20_mask = ~0x0;
2665 env->smbase = 0x30000;
2667 env->idt.limit = 0xffff;
2668 env->gdt.limit = 0xffff;
2669 env->ldt.limit = 0xffff;
2670 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2671 env->tr.limit = 0xffff;
2672 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2674 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2675 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2676 DESC_R_MASK | DESC_A_MASK);
2677 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2678 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2680 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2681 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2683 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2684 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2686 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2687 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2689 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2690 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2694 env->regs[R_EDX] = env->cpuid_version;
2699 for (i = 0; i < 8; i++) {
2702 cpu_set_fpuc(env, 0x37f);
2704 env->mxcsr = 0x1f80;
2705 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2707 env->pat = 0x0007040600070406ULL;
2708 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2710 memset(env->dr, 0, sizeof(env->dr));
2711 env->dr[6] = DR6_FIXED_1;
2712 env->dr[7] = DR7_FIXED_1;
2713 cpu_breakpoint_remove_all(s, BP_CPU);
2714 cpu_watchpoint_remove_all(s, BP_CPU);
2719 * SDM 11.11.5 requires:
2720 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2721 * - IA32_MTRR_PHYSMASKn.V = 0
2722 * All other bits are undefined. For simplification, zero it all.
2724 env->mtrr_deftype = 0;
2725 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2726 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2728 #if !defined(CONFIG_USER_ONLY)
2729 /* We hard-wire the BSP to the first CPU. */
2730 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2732 s->halted = !cpu_is_bsp(cpu);
2734 if (kvm_enabled()) {
2735 kvm_arch_reset_vcpu(cpu);
2740 #ifndef CONFIG_USER_ONLY
2741 bool cpu_is_bsp(X86CPU *cpu)
2743 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2746 /* TODO: remove me, when reset over QOM tree is implemented */
2747 static void x86_cpu_machine_reset_cb(void *opaque)
2749 X86CPU *cpu = opaque;
2750 cpu_reset(CPU(cpu));
2754 static void mce_init(X86CPU *cpu)
2756 CPUX86State *cenv = &cpu->env;
2759 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2760 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2761 (CPUID_MCE | CPUID_MCA)) {
2762 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2763 cenv->mcg_ctl = ~(uint64_t)0;
2764 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2765 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2770 #ifndef CONFIG_USER_ONLY
2771 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2773 APICCommonState *apic;
2774 const char *apic_type = "apic";
2776 if (kvm_apic_in_kernel()) {
2777 apic_type = "kvm-apic";
2778 } else if (xen_enabled()) {
2779 apic_type = "xen-apic";
2782 cpu->apic_state = DEVICE(object_new(apic_type));
2784 object_property_add_child(OBJECT(cpu), "apic",
2785 OBJECT(cpu->apic_state), NULL);
2786 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2787 /* TODO: convert to link<> */
2788 apic = APIC_COMMON(cpu->apic_state);
2790 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2793 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2795 APICCommonState *apic;
2796 static bool apic_mmio_map_once;
2798 if (cpu->apic_state == NULL) {
2801 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2804 /* Map APIC MMIO area */
2805 apic = APIC_COMMON(cpu->apic_state);
2806 if (!apic_mmio_map_once) {
2807 memory_region_add_subregion_overlap(get_system_memory(),
2809 MSR_IA32_APICBASE_BASE,
2812 apic_mmio_map_once = true;
2816 static void x86_cpu_machine_done(Notifier *n, void *unused)
2818 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2819 MemoryRegion *smram =
2820 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2823 cpu->smram = g_new(MemoryRegion, 1);
2824 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2825 smram, 0, 1ull << 32);
2826 memory_region_set_enabled(cpu->smram, false);
2827 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2831 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2837 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2838 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2839 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2840 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2841 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2842 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2843 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2845 CPUState *cs = CPU(dev);
2846 X86CPU *cpu = X86_CPU(dev);
2847 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2848 CPUX86State *env = &cpu->env;
2849 Error *local_err = NULL;
2850 static bool ht_warned;
2852 if (cpu->apic_id < 0) {
2853 error_setg(errp, "apic-id property was not initialized properly");
2857 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2858 env->cpuid_level = 7;
2861 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2864 if (IS_AMD_CPU(env)) {
2865 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2866 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2867 & CPUID_EXT2_AMD_ALIASES);
2871 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2872 error_setg(&local_err,
2874 "Host doesn't support requested features" :
2875 "TCG doesn't support requested features");
2879 #ifndef CONFIG_USER_ONLY
2880 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2882 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2883 x86_cpu_apic_create(cpu, &local_err);
2884 if (local_err != NULL) {
2892 #ifndef CONFIG_USER_ONLY
2893 if (tcg_enabled()) {
2894 AddressSpace *newas = g_new(AddressSpace, 1);
2896 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2897 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2899 /* Outer container... */
2900 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2901 memory_region_set_enabled(cpu->cpu_as_root, true);
2903 /* ... with two regions inside: normal system memory with low
2906 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2907 get_system_memory(), 0, ~0ull);
2908 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2909 memory_region_set_enabled(cpu->cpu_as_mem, true);
2910 address_space_init(newas, cpu->cpu_as_root, "CPU");
2912 cpu_address_space_init(cs, newas, 0);
2914 /* ... SMRAM with higher priority, linked from /machine/smram. */
2915 cpu->machine_done.notify = x86_cpu_machine_done;
2916 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2922 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2923 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2924 * based on inputs (sockets,cores,threads), it is still better to gives
2927 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2928 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2930 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2931 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2932 " -smp options properly.");
2936 x86_cpu_apic_realize(cpu, &local_err);
2937 if (local_err != NULL) {
2942 xcc->parent_realize(dev, &local_err);
2945 if (local_err != NULL) {
2946 error_propagate(errp, local_err);
2951 typedef struct BitProperty {
2956 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
2957 void *opaque, Error **errp)
2959 BitProperty *fp = opaque;
2960 bool value = (*fp->ptr & fp->mask) == fp->mask;
2961 visit_type_bool(v, name, &value, errp);
2964 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
2965 void *opaque, Error **errp)
2967 DeviceState *dev = DEVICE(obj);
2968 BitProperty *fp = opaque;
2969 Error *local_err = NULL;
2972 if (dev->realized) {
2973 qdev_prop_set_after_realize(dev, name, errp);
2977 visit_type_bool(v, name, &value, &local_err);
2979 error_propagate(errp, local_err);
2984 *fp->ptr |= fp->mask;
2986 *fp->ptr &= ~fp->mask;
2990 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
2993 BitProperty *prop = opaque;
2997 /* Register a boolean property to get/set a single bit in a uint32_t field.
2999 * The same property name can be registered multiple times to make it affect
3000 * multiple bits in the same FeatureWord. In that case, the getter will return
3001 * true only if all bits are set.
3003 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3004 const char *prop_name,
3010 uint32_t mask = (1UL << bitnr);
3012 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3015 assert(fp->ptr == field);
3018 fp = g_new0(BitProperty, 1);
3021 object_property_add(OBJECT(cpu), prop_name, "bool",
3022 x86_cpu_get_bit_prop,
3023 x86_cpu_set_bit_prop,
3024 x86_cpu_release_bit_prop, fp, &error_abort);
3028 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3032 Object *obj = OBJECT(cpu);
3035 FeatureWordInfo *fi = &feature_word_info[w];
3037 if (!fi->feat_names) {
3040 if (!fi->feat_names[bitnr]) {
3044 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3046 feat2prop(names[0]);
3047 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3049 for (i = 1; names[i]; i++) {
3050 feat2prop(names[i]);
3051 object_property_add_alias(obj, names[i], obj, names[0],
3058 static void x86_cpu_initfn(Object *obj)
3060 CPUState *cs = CPU(obj);
3061 X86CPU *cpu = X86_CPU(obj);
3062 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3063 CPUX86State *env = &cpu->env;
3068 cpu_exec_init(cs, &error_abort);
3070 object_property_add(obj, "family", "int",
3071 x86_cpuid_version_get_family,
3072 x86_cpuid_version_set_family, NULL, NULL, NULL);
3073 object_property_add(obj, "model", "int",
3074 x86_cpuid_version_get_model,
3075 x86_cpuid_version_set_model, NULL, NULL, NULL);
3076 object_property_add(obj, "stepping", "int",
3077 x86_cpuid_version_get_stepping,
3078 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3079 object_property_add_str(obj, "vendor",
3080 x86_cpuid_get_vendor,
3081 x86_cpuid_set_vendor, NULL);
3082 object_property_add_str(obj, "model-id",
3083 x86_cpuid_get_model_id,
3084 x86_cpuid_set_model_id, NULL);
3085 object_property_add(obj, "tsc-frequency", "int",
3086 x86_cpuid_get_tsc_freq,
3087 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3088 object_property_add(obj, "apic-id", "int",
3089 x86_cpuid_get_apic_id,
3090 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3091 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3092 x86_cpu_get_feature_words,
3093 NULL, NULL, (void *)env->features, NULL);
3094 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3095 x86_cpu_get_feature_words,
3096 NULL, NULL, (void *)cpu->filtered_features, NULL);
3098 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3100 #ifndef CONFIG_USER_ONLY
3101 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3105 for (w = 0; w < FEATURE_WORDS; w++) {
3108 for (bitnr = 0; bitnr < 32; bitnr++) {
3109 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3113 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3115 /* init various static tables used in TCG mode */
3116 if (tcg_enabled() && !inited) {
3122 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3124 X86CPU *cpu = X86_CPU(cs);
3126 return cpu->apic_id;
3129 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3131 X86CPU *cpu = X86_CPU(cs);
3133 return cpu->env.cr[0] & CR0_PG_MASK;
3136 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3138 X86CPU *cpu = X86_CPU(cs);
3140 cpu->env.eip = value;
3143 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3145 X86CPU *cpu = X86_CPU(cs);
3147 cpu->env.eip = tb->pc - tb->cs_base;
3150 static bool x86_cpu_has_work(CPUState *cs)
3152 X86CPU *cpu = X86_CPU(cs);
3153 CPUX86State *env = &cpu->env;
3155 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3156 CPU_INTERRUPT_POLL)) &&
3157 (env->eflags & IF_MASK)) ||
3158 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3159 CPU_INTERRUPT_INIT |
3160 CPU_INTERRUPT_SIPI |
3161 CPU_INTERRUPT_MCE)) ||
3162 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3163 !(env->hflags & HF_SMM_MASK));
3166 static Property x86_cpu_properties[] = {
3167 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3168 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3169 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3170 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3171 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3172 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3173 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3174 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3175 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3176 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3177 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3178 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3179 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3180 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3181 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3182 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3183 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3184 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3185 DEFINE_PROP_END_OF_LIST()
3188 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3190 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3191 CPUClass *cc = CPU_CLASS(oc);
3192 DeviceClass *dc = DEVICE_CLASS(oc);
3194 xcc->parent_realize = dc->realize;
3195 dc->realize = x86_cpu_realizefn;
3196 dc->props = x86_cpu_properties;
3198 xcc->parent_reset = cc->reset;
3199 cc->reset = x86_cpu_reset;
3200 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3202 cc->class_by_name = x86_cpu_class_by_name;
3203 cc->parse_features = x86_cpu_parse_featurestr;
3204 cc->has_work = x86_cpu_has_work;
3205 cc->do_interrupt = x86_cpu_do_interrupt;
3206 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3207 cc->dump_state = x86_cpu_dump_state;
3208 cc->set_pc = x86_cpu_set_pc;
3209 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3210 cc->gdb_read_register = x86_cpu_gdb_read_register;
3211 cc->gdb_write_register = x86_cpu_gdb_write_register;
3212 cc->get_arch_id = x86_cpu_get_arch_id;
3213 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3214 #ifdef CONFIG_USER_ONLY
3215 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3217 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3218 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3219 cc->write_elf64_note = x86_cpu_write_elf64_note;
3220 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3221 cc->write_elf32_note = x86_cpu_write_elf32_note;
3222 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3223 cc->vmsd = &vmstate_x86_cpu;
3225 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3226 #ifndef CONFIG_USER_ONLY
3227 cc->debug_excp_handler = breakpoint_handler;
3229 cc->cpu_exec_enter = x86_cpu_exec_enter;
3230 cc->cpu_exec_exit = x86_cpu_exec_exit;
3233 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3234 * object in cpus -> dangling pointer after final object_unref().
3236 dc->cannot_destroy_with_object_finalize_yet = true;
3239 static const TypeInfo x86_cpu_type_info = {
3240 .name = TYPE_X86_CPU,
3242 .instance_size = sizeof(X86CPU),
3243 .instance_init = x86_cpu_initfn,
3245 .class_size = sizeof(X86CPUClass),
3246 .class_init = x86_cpu_common_class_init,
3249 static void x86_cpu_register_types(void)
3253 type_register_static(&x86_cpu_type_info);
3254 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3255 x86_register_cpudef_type(&builtin_x86_defs[i]);
3258 type_register_static(&host_x86_cpu_type_info);
3262 type_init(x86_cpu_register_types)