2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
23 #include "sysemu/kvm.h"
24 #include "sysemu/cpus.h"
27 #include "qemu/error-report.h"
28 #include "qemu/option.h"
29 #include "qemu/config-file.h"
30 #include "qapi/qmp/qerror.h"
32 #include "qapi-types.h"
33 #include "qapi-visit.h"
34 #include "qapi/visitor.h"
35 #include "sysemu/arch_init.h"
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #ifndef CONFIG_USER_ONLY
45 #include "exec/address-spaces.h"
46 #include "hw/xen/xen.h"
47 #include "hw/i386/apic_internal.h"
51 /* Cache topology CPUID constants: */
53 /* CPUID Leaf 2 Descriptors */
55 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
56 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
57 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60 /* CPUID Leaf 4 constants: */
63 #define CPUID_4_TYPE_DCACHE 1
64 #define CPUID_4_TYPE_ICACHE 2
65 #define CPUID_4_TYPE_UNIFIED 3
67 #define CPUID_4_LEVEL(l) ((l) << 5)
69 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
70 #define CPUID_4_FULLY_ASSOC (1 << 9)
73 #define CPUID_4_NO_INVD_SHARING (1 << 0)
74 #define CPUID_4_INCLUSIVE (1 << 1)
75 #define CPUID_4_COMPLEX_IDX (1 << 2)
77 #define ASSOC_FULL 0xFF
79 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
80 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
90 a == ASSOC_FULL ? 0xF : \
91 0 /* invalid value */)
94 /* Definitions of the hardcoded cache entries we expose: */
97 #define L1D_LINE_SIZE 64
98 #define L1D_ASSOCIATIVITY 8
100 #define L1D_PARTITIONS 1
101 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
102 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
103 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
104 #define L1D_LINES_PER_TAG 1
105 #define L1D_SIZE_KB_AMD 64
106 #define L1D_ASSOCIATIVITY_AMD 2
108 /* L1 instruction cache: */
109 #define L1I_LINE_SIZE 64
110 #define L1I_ASSOCIATIVITY 8
112 #define L1I_PARTITIONS 1
113 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
114 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
115 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
116 #define L1I_LINES_PER_TAG 1
117 #define L1I_SIZE_KB_AMD 64
118 #define L1I_ASSOCIATIVITY_AMD 2
120 /* Level 2 unified cache: */
121 #define L2_LINE_SIZE 64
122 #define L2_ASSOCIATIVITY 16
124 #define L2_PARTITIONS 1
125 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
126 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
127 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
128 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
129 #define L2_LINES_PER_TAG 1
130 #define L2_SIZE_KB_AMD 512
133 #define L3_SIZE_KB 0 /* disabled */
134 #define L3_ASSOCIATIVITY 0 /* disabled */
135 #define L3_LINES_PER_TAG 0 /* disabled */
136 #define L3_LINE_SIZE 0 /* disabled */
138 /* TLB definitions: */
140 #define L1_DTLB_2M_ASSOC 1
141 #define L1_DTLB_2M_ENTRIES 255
142 #define L1_DTLB_4K_ASSOC 1
143 #define L1_DTLB_4K_ENTRIES 255
145 #define L1_ITLB_2M_ASSOC 1
146 #define L1_ITLB_2M_ENTRIES 255
147 #define L1_ITLB_4K_ASSOC 1
148 #define L1_ITLB_4K_ENTRIES 255
150 #define L2_DTLB_2M_ASSOC 0 /* disabled */
151 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
152 #define L2_DTLB_4K_ASSOC 4
153 #define L2_DTLB_4K_ENTRIES 512
155 #define L2_ITLB_2M_ASSOC 0 /* disabled */
156 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
157 #define L2_ITLB_4K_ASSOC 4
158 #define L2_ITLB_4K_ENTRIES 512
162 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
163 uint32_t vendor2, uint32_t vendor3)
166 for (i = 0; i < 4; i++) {
167 dst[i] = vendor1 >> (8 * i);
168 dst[i + 4] = vendor2 >> (8 * i);
169 dst[i + 8] = vendor3 >> (8 * i);
171 dst[CPUID_VENDOR_SZ] = '\0';
174 /* feature flags taken from "Intel Processor Identification and the CPUID
175 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
176 * between feature naming conventions, aliases may be added.
178 static const char *feature_name[] = {
179 "fpu", "vme", "de", "pse",
180 "tsc", "msr", "pae", "mce",
181 "cx8", "apic", NULL, "sep",
182 "mtrr", "pge", "mca", "cmov",
183 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
184 NULL, "ds" /* Intel dts */, "acpi", "mmx",
185 "fxsr", "sse", "sse2", "ss",
186 "ht" /* Intel htt */, "tm", "ia64", "pbe",
188 static const char *ext_feature_name[] = {
189 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
190 "ds_cpl", "vmx", "smx", "est",
191 "tm2", "ssse3", "cid", NULL,
192 "fma", "cx16", "xtpr", "pdcm",
193 NULL, "pcid", "dca", "sse4.1|sse4_1",
194 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
195 "tsc-deadline", "aes", "xsave", "osxsave",
196 "avx", "f16c", "rdrand", "hypervisor",
198 /* Feature names that are already defined on feature_name[] but are set on
199 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
200 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
201 * if and only if CPU vendor is AMD.
203 static const char *ext2_feature_name[] = {
204 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
205 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
206 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
207 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
208 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
209 "nx|xd", NULL, "mmxext", NULL /* mmx */,
210 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
211 NULL, "lm|i64", "3dnowext", "3dnow",
213 static const char *ext3_feature_name[] = {
214 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
215 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
216 "3dnowprefetch", "osvw", "ibs", "xop",
217 "skinit", "wdt", NULL, "lwp",
218 "fma4", "tce", NULL, "nodeid_msr",
219 NULL, "tbm", "topoext", "perfctr_core",
220 "perfctr_nb", NULL, NULL, NULL,
221 NULL, NULL, NULL, NULL,
224 static const char *ext4_feature_name[] = {
225 NULL, NULL, "xstore", "xstore-en",
226 NULL, NULL, "xcrypt", "xcrypt-en",
227 "ace2", "ace2-en", "phe", "phe-en",
228 "pmm", "pmm-en", NULL, NULL,
229 NULL, NULL, NULL, NULL,
230 NULL, NULL, NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
235 static const char *kvm_feature_name[] = {
236 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
237 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
238 NULL, NULL, NULL, NULL,
239 NULL, NULL, NULL, NULL,
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 "kvmclock-stable-bit", NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
246 static const char *svm_feature_name[] = {
247 "npt", "lbrv", "svm_lock", "nrip_save",
248 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
249 NULL, NULL, "pause_filter", NULL,
250 "pfthreshold", NULL, NULL, NULL,
251 NULL, NULL, NULL, NULL,
252 NULL, NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
257 static const char *cpuid_7_0_ebx_feature_name[] = {
258 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
259 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
260 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
261 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
264 static const char *cpuid_7_0_ecx_feature_name[] = {
265 NULL, NULL, NULL, "pku",
266 "ospke", NULL, NULL, NULL,
267 NULL, NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
275 static const char *cpuid_apm_edx_feature_name[] = {
276 NULL, NULL, NULL, NULL,
277 NULL, NULL, NULL, NULL,
278 "invtsc", NULL, NULL, NULL,
279 NULL, NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
286 static const char *cpuid_xsave_feature_name[] = {
287 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
288 NULL, NULL, NULL, NULL,
289 NULL, NULL, NULL, NULL,
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
297 static const char *cpuid_6_feature_name[] = {
298 NULL, NULL, "arat", NULL,
299 NULL, NULL, NULL, NULL,
300 NULL, NULL, NULL, NULL,
301 NULL, NULL, NULL, NULL,
302 NULL, NULL, NULL, NULL,
303 NULL, NULL, NULL, NULL,
304 NULL, NULL, NULL, NULL,
305 NULL, NULL, NULL, NULL,
308 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
309 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
310 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
311 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
312 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
313 CPUID_PSE36 | CPUID_FXSR)
314 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
315 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
316 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
317 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
318 CPUID_PAE | CPUID_SEP | CPUID_APIC)
320 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
321 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
322 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
323 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
324 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
325 /* partly implemented:
326 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
328 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
329 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
330 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
331 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
332 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
333 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
335 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
336 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
337 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
338 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
339 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
342 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
344 #define TCG_EXT2_X86_64_FEATURES 0
347 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
348 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
349 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
350 TCG_EXT2_X86_64_FEATURES)
351 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
352 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
353 #define TCG_EXT4_FEATURES 0
354 #define TCG_SVM_FEATURES 0
355 #define TCG_KVM_FEATURES 0
356 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
357 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
358 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
359 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE)
361 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
362 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
363 CPUID_7_0_EBX_RDSEED */
364 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
365 #define TCG_APM_FEATURES 0
366 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
367 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
369 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
371 typedef struct FeatureWordInfo {
372 const char **feat_names;
373 uint32_t cpuid_eax; /* Input EAX for CPUID */
374 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
375 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
376 int cpuid_reg; /* output register (R_* constant) */
377 uint32_t tcg_features; /* Feature flags supported by TCG */
378 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
381 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
383 .feat_names = feature_name,
384 .cpuid_eax = 1, .cpuid_reg = R_EDX,
385 .tcg_features = TCG_FEATURES,
388 .feat_names = ext_feature_name,
389 .cpuid_eax = 1, .cpuid_reg = R_ECX,
390 .tcg_features = TCG_EXT_FEATURES,
392 [FEAT_8000_0001_EDX] = {
393 .feat_names = ext2_feature_name,
394 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
395 .tcg_features = TCG_EXT2_FEATURES,
397 [FEAT_8000_0001_ECX] = {
398 .feat_names = ext3_feature_name,
399 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
400 .tcg_features = TCG_EXT3_FEATURES,
402 [FEAT_C000_0001_EDX] = {
403 .feat_names = ext4_feature_name,
404 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
405 .tcg_features = TCG_EXT4_FEATURES,
408 .feat_names = kvm_feature_name,
409 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
410 .tcg_features = TCG_KVM_FEATURES,
413 .feat_names = svm_feature_name,
414 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
415 .tcg_features = TCG_SVM_FEATURES,
418 .feat_names = cpuid_7_0_ebx_feature_name,
420 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
422 .tcg_features = TCG_7_0_EBX_FEATURES,
425 .feat_names = cpuid_7_0_ecx_feature_name,
427 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
429 .tcg_features = TCG_7_0_ECX_FEATURES,
431 [FEAT_8000_0007_EDX] = {
432 .feat_names = cpuid_apm_edx_feature_name,
433 .cpuid_eax = 0x80000007,
435 .tcg_features = TCG_APM_FEATURES,
436 .unmigratable_flags = CPUID_APM_INVTSC,
439 .feat_names = cpuid_xsave_feature_name,
441 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
443 .tcg_features = TCG_XSAVE_FEATURES,
446 .feat_names = cpuid_6_feature_name,
447 .cpuid_eax = 6, .cpuid_reg = R_EAX,
448 .tcg_features = TCG_6_EAX_FEATURES,
452 typedef struct X86RegisterInfo32 {
453 /* Name of register */
455 /* QAPI enum value register */
456 X86CPURegister32 qapi_enum;
459 #define REGISTER(reg) \
460 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
461 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
473 const ExtSaveArea x86_ext_save_areas[] = {
475 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
476 .offset = 0x240, .size = 0x100 },
477 [XSTATE_BNDREGS_BIT] =
478 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
479 .offset = 0x3c0, .size = 0x40 },
480 [XSTATE_BNDCSR_BIT] =
481 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
482 .offset = 0x400, .size = 0x40 },
483 [XSTATE_OPMASK_BIT] =
484 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
485 .offset = 0x440, .size = 0x40 },
486 [XSTATE_ZMM_Hi256_BIT] =
487 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
488 .offset = 0x480, .size = 0x200 },
489 [XSTATE_Hi16_ZMM_BIT] =
490 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
491 .offset = 0x680, .size = 0x400 },
493 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
494 .offset = 0xA80, .size = 0x8 },
497 const char *get_register_name_32(unsigned int reg)
499 if (reg >= CPU_NB_REGS32) {
502 return x86_reg_info_32[reg].name;
506 * Returns the set of feature flags that are supported and migratable by
507 * QEMU, for a given FeatureWord.
509 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
511 FeatureWordInfo *wi = &feature_word_info[w];
515 for (i = 0; i < 32; i++) {
516 uint32_t f = 1U << i;
517 /* If the feature name is unknown, it is not supported by QEMU yet */
518 if (!wi->feat_names[i]) {
521 /* Skip features known to QEMU, but explicitly marked as unmigratable */
522 if (wi->unmigratable_flags & f) {
530 void host_cpuid(uint32_t function, uint32_t count,
531 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
537 : "=a"(vec[0]), "=b"(vec[1]),
538 "=c"(vec[2]), "=d"(vec[3])
539 : "0"(function), "c"(count) : "cc");
540 #elif defined(__i386__)
541 asm volatile("pusha \n\t"
543 "mov %%eax, 0(%2) \n\t"
544 "mov %%ebx, 4(%2) \n\t"
545 "mov %%ecx, 8(%2) \n\t"
546 "mov %%edx, 12(%2) \n\t"
548 : : "a"(function), "c"(count), "S"(vec)
564 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
566 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
567 * a substring. ex if !NULL points to the first char after a substring,
568 * otherwise the string is assumed to sized by a terminating nul.
569 * Return lexical ordering of *s1:*s2.
571 static int sstrcmp(const char *s1, const char *e1,
572 const char *s2, const char *e2)
575 if (!*s1 || !*s2 || *s1 != *s2)
578 if (s1 == e1 && s2 == e2)
587 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
588 * '|' delimited (possibly empty) strings in which case search for a match
589 * within the alternatives proceeds left to right. Return 0 for success,
590 * non-zero otherwise.
592 static int altcmp(const char *s, const char *e, const char *altstr)
596 for (q = p = altstr; ; ) {
597 while (*p && *p != '|')
599 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
608 /* search featureset for flag *[s..e), if found set corresponding bit in
609 * *pval and return true, otherwise return false
611 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
612 const char **featureset)
618 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
619 if (*ppc && !altcmp(s, e, *ppc)) {
627 static void add_flagname_to_bitmaps(const char *flagname,
628 FeatureWordArray words,
632 for (w = 0; w < FEATURE_WORDS; w++) {
633 FeatureWordInfo *wi = &feature_word_info[w];
634 if (wi->feat_names &&
635 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
639 if (w == FEATURE_WORDS) {
640 error_setg(errp, "CPU feature %s not found", flagname);
644 /* CPU class name definitions: */
646 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
647 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
649 /* Return type name for a given CPU model name
650 * Caller is responsible for freeing the returned string.
652 static char *x86_cpu_type_name(const char *model_name)
654 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
657 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
662 if (cpu_model == NULL) {
666 typename = x86_cpu_type_name(cpu_model);
667 oc = object_class_by_name(typename);
672 struct X86CPUDefinition {
677 /* vendor is zero-terminated, 12 character ASCII string */
678 char vendor[CPUID_VENDOR_SZ + 1];
682 FeatureWordArray features;
686 static X86CPUDefinition builtin_x86_defs[] = {
690 .vendor = CPUID_VENDOR_AMD,
694 .features[FEAT_1_EDX] =
696 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
698 .features[FEAT_1_ECX] =
699 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
700 .features[FEAT_8000_0001_EDX] =
701 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
702 .features[FEAT_8000_0001_ECX] =
703 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
704 .xlevel = 0x8000000A,
709 .vendor = CPUID_VENDOR_AMD,
713 /* Missing: CPUID_HT */
714 .features[FEAT_1_EDX] =
716 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
717 CPUID_PSE36 | CPUID_VME,
718 .features[FEAT_1_ECX] =
719 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
721 .features[FEAT_8000_0001_EDX] =
722 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
723 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
724 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
725 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
727 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
728 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
729 .features[FEAT_8000_0001_ECX] =
730 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
731 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
732 /* Missing: CPUID_SVM_LBRV */
733 .features[FEAT_SVM] =
735 .xlevel = 0x8000001A,
736 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
741 .vendor = CPUID_VENDOR_INTEL,
745 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
746 .features[FEAT_1_EDX] =
748 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
749 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
750 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
751 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
752 .features[FEAT_1_ECX] =
753 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
755 .features[FEAT_8000_0001_EDX] =
756 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
757 .features[FEAT_8000_0001_ECX] =
759 .xlevel = 0x80000008,
760 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
765 .vendor = CPUID_VENDOR_INTEL,
769 /* Missing: CPUID_HT */
770 .features[FEAT_1_EDX] =
771 PPRO_FEATURES | CPUID_VME |
772 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
774 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
775 .features[FEAT_1_ECX] =
776 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
777 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
778 .features[FEAT_8000_0001_EDX] =
779 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
780 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
781 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
782 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
783 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
784 .features[FEAT_8000_0001_ECX] =
786 .xlevel = 0x80000008,
787 .model_id = "Common KVM processor"
792 .vendor = CPUID_VENDOR_INTEL,
796 .features[FEAT_1_EDX] =
798 .features[FEAT_1_ECX] =
800 .xlevel = 0x80000004,
805 .vendor = CPUID_VENDOR_INTEL,
809 .features[FEAT_1_EDX] =
810 PPRO_FEATURES | CPUID_VME |
811 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
812 .features[FEAT_1_ECX] =
814 .features[FEAT_8000_0001_ECX] =
816 .xlevel = 0x80000008,
817 .model_id = "Common 32-bit KVM processor"
822 .vendor = CPUID_VENDOR_INTEL,
826 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
827 .features[FEAT_1_EDX] =
828 PPRO_FEATURES | CPUID_VME |
829 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
831 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
832 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
833 .features[FEAT_1_ECX] =
834 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
835 .features[FEAT_8000_0001_EDX] =
837 .xlevel = 0x80000008,
838 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
843 .vendor = CPUID_VENDOR_INTEL,
847 .features[FEAT_1_EDX] =
854 .vendor = CPUID_VENDOR_INTEL,
858 .features[FEAT_1_EDX] =
865 .vendor = CPUID_VENDOR_INTEL,
869 .features[FEAT_1_EDX] =
876 .vendor = CPUID_VENDOR_INTEL,
880 .features[FEAT_1_EDX] =
887 .vendor = CPUID_VENDOR_AMD,
891 .features[FEAT_1_EDX] =
892 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
894 .features[FEAT_8000_0001_EDX] =
895 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
896 .xlevel = 0x80000008,
901 .vendor = CPUID_VENDOR_INTEL,
905 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
906 .features[FEAT_1_EDX] =
908 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
909 CPUID_ACPI | CPUID_SS,
910 /* Some CPUs got no CPUID_SEP */
911 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
913 .features[FEAT_1_ECX] =
914 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
916 .features[FEAT_8000_0001_EDX] =
918 .features[FEAT_8000_0001_ECX] =
920 .xlevel = 0x80000008,
921 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
926 .vendor = CPUID_VENDOR_INTEL,
930 .features[FEAT_1_EDX] =
931 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
932 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
933 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
934 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
935 CPUID_DE | CPUID_FP87,
936 .features[FEAT_1_ECX] =
937 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
938 .features[FEAT_8000_0001_EDX] =
939 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
940 .features[FEAT_8000_0001_ECX] =
942 .xlevel = 0x80000008,
943 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
948 .vendor = CPUID_VENDOR_INTEL,
952 .features[FEAT_1_EDX] =
953 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
954 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
955 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
956 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
957 CPUID_DE | CPUID_FP87,
958 .features[FEAT_1_ECX] =
959 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
961 .features[FEAT_8000_0001_EDX] =
962 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
963 .features[FEAT_8000_0001_ECX] =
965 .xlevel = 0x80000008,
966 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
971 .vendor = CPUID_VENDOR_INTEL,
975 .features[FEAT_1_EDX] =
976 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
977 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
978 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
979 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
980 CPUID_DE | CPUID_FP87,
981 .features[FEAT_1_ECX] =
982 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
983 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
984 .features[FEAT_8000_0001_EDX] =
985 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
986 .features[FEAT_8000_0001_ECX] =
988 .xlevel = 0x80000008,
989 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
994 .vendor = CPUID_VENDOR_INTEL,
998 .features[FEAT_1_EDX] =
999 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1000 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1001 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1002 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1003 CPUID_DE | CPUID_FP87,
1004 .features[FEAT_1_ECX] =
1005 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1006 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1007 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1008 .features[FEAT_8000_0001_EDX] =
1009 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1010 .features[FEAT_8000_0001_ECX] =
1012 .features[FEAT_6_EAX] =
1014 .xlevel = 0x80000008,
1015 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1018 .name = "SandyBridge",
1020 .vendor = CPUID_VENDOR_INTEL,
1024 .features[FEAT_1_EDX] =
1025 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1026 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1027 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1028 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1029 CPUID_DE | CPUID_FP87,
1030 .features[FEAT_1_ECX] =
1031 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1032 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1033 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1034 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1036 .features[FEAT_8000_0001_EDX] =
1037 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1039 .features[FEAT_8000_0001_ECX] =
1041 .features[FEAT_XSAVE] =
1042 CPUID_XSAVE_XSAVEOPT,
1043 .features[FEAT_6_EAX] =
1045 .xlevel = 0x80000008,
1046 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1049 .name = "IvyBridge",
1051 .vendor = CPUID_VENDOR_INTEL,
1055 .features[FEAT_1_EDX] =
1056 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1057 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1058 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1059 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1060 CPUID_DE | CPUID_FP87,
1061 .features[FEAT_1_ECX] =
1062 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1063 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1064 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1065 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1066 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1067 .features[FEAT_7_0_EBX] =
1068 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1070 .features[FEAT_8000_0001_EDX] =
1071 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1073 .features[FEAT_8000_0001_ECX] =
1075 .features[FEAT_XSAVE] =
1076 CPUID_XSAVE_XSAVEOPT,
1077 .features[FEAT_6_EAX] =
1079 .xlevel = 0x80000008,
1080 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1083 .name = "Haswell-noTSX",
1085 .vendor = CPUID_VENDOR_INTEL,
1089 .features[FEAT_1_EDX] =
1090 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1091 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1092 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1093 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1094 CPUID_DE | CPUID_FP87,
1095 .features[FEAT_1_ECX] =
1096 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1097 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1098 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1099 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1100 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1101 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1102 .features[FEAT_8000_0001_EDX] =
1103 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1105 .features[FEAT_8000_0001_ECX] =
1106 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1107 .features[FEAT_7_0_EBX] =
1108 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1109 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1110 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1111 .features[FEAT_XSAVE] =
1112 CPUID_XSAVE_XSAVEOPT,
1113 .features[FEAT_6_EAX] =
1115 .xlevel = 0x80000008,
1116 .model_id = "Intel Core Processor (Haswell, no TSX)",
1120 .vendor = CPUID_VENDOR_INTEL,
1124 .features[FEAT_1_EDX] =
1125 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1126 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1127 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1128 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1129 CPUID_DE | CPUID_FP87,
1130 .features[FEAT_1_ECX] =
1131 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1132 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1133 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1134 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1135 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1136 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1137 .features[FEAT_8000_0001_EDX] =
1138 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1140 .features[FEAT_8000_0001_ECX] =
1141 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1142 .features[FEAT_7_0_EBX] =
1143 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1144 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1145 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1147 .features[FEAT_XSAVE] =
1148 CPUID_XSAVE_XSAVEOPT,
1149 .features[FEAT_6_EAX] =
1151 .xlevel = 0x80000008,
1152 .model_id = "Intel Core Processor (Haswell)",
1155 .name = "Broadwell-noTSX",
1157 .vendor = CPUID_VENDOR_INTEL,
1161 .features[FEAT_1_EDX] =
1162 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1163 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1164 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1165 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1166 CPUID_DE | CPUID_FP87,
1167 .features[FEAT_1_ECX] =
1168 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1169 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1170 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1171 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1172 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1173 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1174 .features[FEAT_8000_0001_EDX] =
1175 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1177 .features[FEAT_8000_0001_ECX] =
1178 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1179 .features[FEAT_7_0_EBX] =
1180 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1181 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1182 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1183 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1185 .features[FEAT_XSAVE] =
1186 CPUID_XSAVE_XSAVEOPT,
1187 .features[FEAT_6_EAX] =
1189 .xlevel = 0x80000008,
1190 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1193 .name = "Broadwell",
1195 .vendor = CPUID_VENDOR_INTEL,
1199 .features[FEAT_1_EDX] =
1200 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1201 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1202 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1203 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1204 CPUID_DE | CPUID_FP87,
1205 .features[FEAT_1_ECX] =
1206 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1207 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1208 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1209 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1210 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1211 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1212 .features[FEAT_8000_0001_EDX] =
1213 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1215 .features[FEAT_8000_0001_ECX] =
1216 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1217 .features[FEAT_7_0_EBX] =
1218 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1219 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1220 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1221 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1223 .features[FEAT_XSAVE] =
1224 CPUID_XSAVE_XSAVEOPT,
1225 .features[FEAT_6_EAX] =
1227 .xlevel = 0x80000008,
1228 .model_id = "Intel Core Processor (Broadwell)",
1231 .name = "Opteron_G1",
1233 .vendor = CPUID_VENDOR_AMD,
1237 .features[FEAT_1_EDX] =
1238 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1239 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1240 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1241 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1242 CPUID_DE | CPUID_FP87,
1243 .features[FEAT_1_ECX] =
1245 .features[FEAT_8000_0001_EDX] =
1246 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1247 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1248 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1249 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1250 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1251 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1252 .xlevel = 0x80000008,
1253 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1256 .name = "Opteron_G2",
1258 .vendor = CPUID_VENDOR_AMD,
1262 .features[FEAT_1_EDX] =
1263 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1264 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1265 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1266 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1267 CPUID_DE | CPUID_FP87,
1268 .features[FEAT_1_ECX] =
1269 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1270 /* Missing: CPUID_EXT2_RDTSCP */
1271 .features[FEAT_8000_0001_EDX] =
1272 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1273 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1274 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1275 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1276 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1277 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1278 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1279 .features[FEAT_8000_0001_ECX] =
1280 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1281 .xlevel = 0x80000008,
1282 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1285 .name = "Opteron_G3",
1287 .vendor = CPUID_VENDOR_AMD,
1291 .features[FEAT_1_EDX] =
1292 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1293 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1294 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1295 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1296 CPUID_DE | CPUID_FP87,
1297 .features[FEAT_1_ECX] =
1298 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1300 /* Missing: CPUID_EXT2_RDTSCP */
1301 .features[FEAT_8000_0001_EDX] =
1302 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1303 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1304 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1305 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1306 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1307 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1308 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1309 .features[FEAT_8000_0001_ECX] =
1310 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1311 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1312 .xlevel = 0x80000008,
1313 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1316 .name = "Opteron_G4",
1318 .vendor = CPUID_VENDOR_AMD,
1322 .features[FEAT_1_EDX] =
1323 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1324 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1325 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1326 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1327 CPUID_DE | CPUID_FP87,
1328 .features[FEAT_1_ECX] =
1329 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1330 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1331 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1333 /* Missing: CPUID_EXT2_RDTSCP */
1334 .features[FEAT_8000_0001_EDX] =
1336 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1337 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1338 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1339 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1340 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1341 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1342 .features[FEAT_8000_0001_ECX] =
1343 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1344 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1345 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1348 .xlevel = 0x8000001A,
1349 .model_id = "AMD Opteron 62xx class CPU",
1352 .name = "Opteron_G5",
1354 .vendor = CPUID_VENDOR_AMD,
1358 .features[FEAT_1_EDX] =
1359 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1360 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1361 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1362 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1363 CPUID_DE | CPUID_FP87,
1364 .features[FEAT_1_ECX] =
1365 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1366 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1367 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1368 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1369 /* Missing: CPUID_EXT2_RDTSCP */
1370 .features[FEAT_8000_0001_EDX] =
1372 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1373 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1374 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1375 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1376 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1377 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1378 .features[FEAT_8000_0001_ECX] =
1379 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1380 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1381 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1384 .xlevel = 0x8000001A,
1385 .model_id = "AMD Opteron 63xx class CPU",
1389 typedef struct PropValue {
1390 const char *prop, *value;
1393 /* KVM-specific features that are automatically added/removed
1394 * from all CPU models when KVM is enabled.
1396 static PropValue kvm_default_props[] = {
1397 { "kvmclock", "on" },
1398 { "kvm-nopiodelay", "on" },
1399 { "kvm-asyncpf", "on" },
1400 { "kvm-steal-time", "on" },
1401 { "kvm-pv-eoi", "on" },
1402 { "kvmclock-stable-bit", "on" },
1405 { "monitor", "off" },
1410 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1413 for (pv = kvm_default_props; pv->prop; pv++) {
1414 if (!strcmp(pv->prop, prop)) {
1420 /* It is valid to call this function only for properties that
1421 * are already present in the kvm_default_props table.
1426 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1427 bool migratable_only);
1431 static int cpu_x86_fill_model_id(char *str)
1433 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1436 for (i = 0; i < 3; i++) {
1437 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1438 memcpy(str + i * 16 + 0, &eax, 4);
1439 memcpy(str + i * 16 + 4, &ebx, 4);
1440 memcpy(str + i * 16 + 8, &ecx, 4);
1441 memcpy(str + i * 16 + 12, &edx, 4);
1446 static X86CPUDefinition host_cpudef;
1448 static Property host_x86_cpu_properties[] = {
1449 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1450 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1451 DEFINE_PROP_END_OF_LIST()
1454 /* class_init for the "host" CPU model
1456 * This function may be called before KVM is initialized.
1458 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1460 DeviceClass *dc = DEVICE_CLASS(oc);
1461 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1462 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1464 xcc->kvm_required = true;
1466 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1467 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1469 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1470 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1471 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1472 host_cpudef.stepping = eax & 0x0F;
1474 cpu_x86_fill_model_id(host_cpudef.model_id);
1476 xcc->cpu_def = &host_cpudef;
1478 /* level, xlevel, xlevel2, and the feature words are initialized on
1479 * instance_init, because they require KVM to be initialized.
1482 dc->props = host_x86_cpu_properties;
1483 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1484 dc->cannot_destroy_with_object_finalize_yet = true;
1487 static void host_x86_cpu_initfn(Object *obj)
1489 X86CPU *cpu = X86_CPU(obj);
1490 CPUX86State *env = &cpu->env;
1491 KVMState *s = kvm_state;
1493 assert(kvm_enabled());
1495 /* We can't fill the features array here because we don't know yet if
1496 * "migratable" is true or false.
1498 cpu->host_features = true;
1500 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1501 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1502 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1504 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1507 static const TypeInfo host_x86_cpu_type_info = {
1508 .name = X86_CPU_TYPE_NAME("host"),
1509 .parent = TYPE_X86_CPU,
1510 .instance_init = host_x86_cpu_initfn,
1511 .class_init = host_x86_cpu_class_init,
1516 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1518 FeatureWordInfo *f = &feature_word_info[w];
1521 for (i = 0; i < 32; ++i) {
1522 if ((1UL << i) & mask) {
1523 const char *reg = get_register_name_32(f->cpuid_reg);
1525 fprintf(stderr, "warning: %s doesn't support requested feature: "
1526 "CPUID.%02XH:%s%s%s [bit %d]\n",
1527 kvm_enabled() ? "host" : "TCG",
1529 f->feat_names[i] ? "." : "",
1530 f->feat_names[i] ? f->feat_names[i] : "", i);
1535 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1536 const char *name, void *opaque,
1539 X86CPU *cpu = X86_CPU(obj);
1540 CPUX86State *env = &cpu->env;
1543 value = (env->cpuid_version >> 8) & 0xf;
1545 value += (env->cpuid_version >> 20) & 0xff;
1547 visit_type_int(v, name, &value, errp);
1550 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1551 const char *name, void *opaque,
1554 X86CPU *cpu = X86_CPU(obj);
1555 CPUX86State *env = &cpu->env;
1556 const int64_t min = 0;
1557 const int64_t max = 0xff + 0xf;
1558 Error *local_err = NULL;
1561 visit_type_int(v, name, &value, &local_err);
1563 error_propagate(errp, local_err);
1566 if (value < min || value > max) {
1567 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1568 name ? name : "null", value, min, max);
1572 env->cpuid_version &= ~0xff00f00;
1574 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1576 env->cpuid_version |= value << 8;
1580 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1581 const char *name, void *opaque,
1584 X86CPU *cpu = X86_CPU(obj);
1585 CPUX86State *env = &cpu->env;
1588 value = (env->cpuid_version >> 4) & 0xf;
1589 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1590 visit_type_int(v, name, &value, errp);
1593 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1594 const char *name, void *opaque,
1597 X86CPU *cpu = X86_CPU(obj);
1598 CPUX86State *env = &cpu->env;
1599 const int64_t min = 0;
1600 const int64_t max = 0xff;
1601 Error *local_err = NULL;
1604 visit_type_int(v, name, &value, &local_err);
1606 error_propagate(errp, local_err);
1609 if (value < min || value > max) {
1610 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1611 name ? name : "null", value, min, max);
1615 env->cpuid_version &= ~0xf00f0;
1616 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1619 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1620 const char *name, void *opaque,
1623 X86CPU *cpu = X86_CPU(obj);
1624 CPUX86State *env = &cpu->env;
1627 value = env->cpuid_version & 0xf;
1628 visit_type_int(v, name, &value, errp);
1631 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1632 const char *name, void *opaque,
1635 X86CPU *cpu = X86_CPU(obj);
1636 CPUX86State *env = &cpu->env;
1637 const int64_t min = 0;
1638 const int64_t max = 0xf;
1639 Error *local_err = NULL;
1642 visit_type_int(v, name, &value, &local_err);
1644 error_propagate(errp, local_err);
1647 if (value < min || value > max) {
1648 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1649 name ? name : "null", value, min, max);
1653 env->cpuid_version &= ~0xf;
1654 env->cpuid_version |= value & 0xf;
1657 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1659 X86CPU *cpu = X86_CPU(obj);
1660 CPUX86State *env = &cpu->env;
1663 value = g_malloc(CPUID_VENDOR_SZ + 1);
1664 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1665 env->cpuid_vendor3);
1669 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1672 X86CPU *cpu = X86_CPU(obj);
1673 CPUX86State *env = &cpu->env;
1676 if (strlen(value) != CPUID_VENDOR_SZ) {
1677 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1681 env->cpuid_vendor1 = 0;
1682 env->cpuid_vendor2 = 0;
1683 env->cpuid_vendor3 = 0;
1684 for (i = 0; i < 4; i++) {
1685 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1686 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1687 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1691 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1693 X86CPU *cpu = X86_CPU(obj);
1694 CPUX86State *env = &cpu->env;
1698 value = g_malloc(48 + 1);
1699 for (i = 0; i < 48; i++) {
1700 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1706 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1709 X86CPU *cpu = X86_CPU(obj);
1710 CPUX86State *env = &cpu->env;
1713 if (model_id == NULL) {
1716 len = strlen(model_id);
1717 memset(env->cpuid_model, 0, 48);
1718 for (i = 0; i < 48; i++) {
1722 c = (uint8_t)model_id[i];
1724 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1728 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1729 void *opaque, Error **errp)
1731 X86CPU *cpu = X86_CPU(obj);
1734 value = cpu->env.tsc_khz * 1000;
1735 visit_type_int(v, name, &value, errp);
1738 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1739 void *opaque, Error **errp)
1741 X86CPU *cpu = X86_CPU(obj);
1742 const int64_t min = 0;
1743 const int64_t max = INT64_MAX;
1744 Error *local_err = NULL;
1747 visit_type_int(v, name, &value, &local_err);
1749 error_propagate(errp, local_err);
1752 if (value < min || value > max) {
1753 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1754 name ? name : "null", value, min, max);
1758 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1761 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
1762 void *opaque, Error **errp)
1764 X86CPU *cpu = X86_CPU(obj);
1765 int64_t value = cpu->apic_id;
1767 visit_type_int(v, name, &value, errp);
1770 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
1771 void *opaque, Error **errp)
1773 X86CPU *cpu = X86_CPU(obj);
1774 DeviceState *dev = DEVICE(obj);
1775 const int64_t min = 0;
1776 const int64_t max = UINT32_MAX;
1777 Error *error = NULL;
1780 if (dev->realized) {
1781 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1782 "it was realized", name, object_get_typename(obj));
1786 visit_type_int(v, name, &value, &error);
1788 error_propagate(errp, error);
1791 if (value < min || value > max) {
1792 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1793 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1794 object_get_typename(obj), name, value, min, max);
1798 if ((value != cpu->apic_id) && cpu_exists(value)) {
1799 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1802 cpu->apic_id = value;
1805 /* Generic getter for "feature-words" and "filtered-features" properties */
1806 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1807 const char *name, void *opaque,
1810 uint32_t *array = (uint32_t *)opaque;
1813 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1814 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1815 X86CPUFeatureWordInfoList *list = NULL;
1817 for (w = 0; w < FEATURE_WORDS; w++) {
1818 FeatureWordInfo *wi = &feature_word_info[w];
1819 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1820 qwi->cpuid_input_eax = wi->cpuid_eax;
1821 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1822 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1823 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1824 qwi->features = array[w];
1826 /* List will be in reverse order, but order shouldn't matter */
1827 list_entries[w].next = list;
1828 list_entries[w].value = &word_infos[w];
1829 list = &list_entries[w];
1832 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, &err);
1833 error_propagate(errp, err);
1836 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1837 void *opaque, Error **errp)
1839 X86CPU *cpu = X86_CPU(obj);
1840 int64_t value = cpu->hyperv_spinlock_attempts;
1842 visit_type_int(v, name, &value, errp);
1845 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1846 void *opaque, Error **errp)
1848 const int64_t min = 0xFFF;
1849 const int64_t max = UINT_MAX;
1850 X86CPU *cpu = X86_CPU(obj);
1854 visit_type_int(v, name, &value, &err);
1856 error_propagate(errp, err);
1860 if (value < min || value > max) {
1861 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1862 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1863 object_get_typename(obj), name ? name : "null",
1867 cpu->hyperv_spinlock_attempts = value;
1870 static PropertyInfo qdev_prop_spinlocks = {
1872 .get = x86_get_hv_spinlocks,
1873 .set = x86_set_hv_spinlocks,
1876 /* Convert all '_' in a feature string option name to '-', to make feature
1877 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1879 static inline void feat2prop(char *s)
1881 while ((s = strchr(s, '_'))) {
1886 /* Parse "+feature,-feature,feature=foo" CPU feature string
1888 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1891 X86CPU *cpu = X86_CPU(cs);
1892 char *featurestr; /* Single 'key=value" string being parsed */
1894 /* Features to be added */
1895 FeatureWordArray plus_features = { 0 };
1896 /* Features to be removed */
1897 FeatureWordArray minus_features = { 0 };
1899 CPUX86State *env = &cpu->env;
1900 Error *local_err = NULL;
1902 featurestr = features ? strtok(features, ",") : NULL;
1904 while (featurestr) {
1906 if (featurestr[0] == '+') {
1907 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1908 } else if (featurestr[0] == '-') {
1909 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1910 } else if ((val = strchr(featurestr, '='))) {
1912 feat2prop(featurestr);
1913 if (!strcmp(featurestr, "xlevel")) {
1917 numvalue = strtoul(val, &err, 0);
1918 if (!*val || *err) {
1919 error_setg(errp, "bad numerical value %s", val);
1922 if (numvalue < 0x80000000) {
1923 error_report("xlevel value shall always be >= 0x80000000"
1924 ", fixup will be removed in future versions");
1925 numvalue += 0x80000000;
1927 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1928 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1929 } else if (!strcmp(featurestr, "tsc-freq")) {
1934 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1935 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1936 if (tsc_freq < 0 || *err) {
1937 error_setg(errp, "bad numerical value %s", val);
1940 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1941 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1943 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1945 const int min = 0xFFF;
1947 numvalue = strtoul(val, &err, 0);
1948 if (!*val || *err) {
1949 error_setg(errp, "bad numerical value %s", val);
1952 if (numvalue < min) {
1953 error_report("hv-spinlocks value shall always be >= 0x%x"
1954 ", fixup will be removed in future versions",
1958 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1959 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1961 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1964 feat2prop(featurestr);
1965 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1968 error_propagate(errp, local_err);
1971 featurestr = strtok(NULL, ",");
1974 if (cpu->host_features) {
1975 for (w = 0; w < FEATURE_WORDS; w++) {
1977 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1981 for (w = 0; w < FEATURE_WORDS; w++) {
1982 env->features[w] |= plus_features[w];
1983 env->features[w] &= ~minus_features[w];
1987 /* Print all cpuid feature names in featureset
1989 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1994 for (bit = 0; bit < 32; bit++) {
1995 if (featureset[bit]) {
1996 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2002 /* generate CPU information. */
2003 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2005 X86CPUDefinition *def;
2009 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2010 def = &builtin_x86_defs[i];
2011 snprintf(buf, sizeof(buf), "%s", def->name);
2012 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2015 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2016 "KVM processor with all supported host features "
2017 "(only available in KVM mode)");
2020 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2021 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2022 FeatureWordInfo *fw = &feature_word_info[i];
2024 (*cpu_fprintf)(f, " ");
2025 listflags(f, cpu_fprintf, fw->feat_names);
2026 (*cpu_fprintf)(f, "\n");
2030 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2032 CpuDefinitionInfoList *cpu_list = NULL;
2033 X86CPUDefinition *def;
2036 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2037 CpuDefinitionInfoList *entry;
2038 CpuDefinitionInfo *info;
2040 def = &builtin_x86_defs[i];
2041 info = g_malloc0(sizeof(*info));
2042 info->name = g_strdup(def->name);
2044 entry = g_malloc0(sizeof(*entry));
2045 entry->value = info;
2046 entry->next = cpu_list;
2053 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2054 bool migratable_only)
2056 FeatureWordInfo *wi = &feature_word_info[w];
2059 if (kvm_enabled()) {
2060 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2063 } else if (tcg_enabled()) {
2064 r = wi->tcg_features;
2068 if (migratable_only) {
2069 r &= x86_cpu_get_migratable_flags(w);
2075 * Filters CPU feature words based on host availability of each feature.
2077 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2079 static int x86_cpu_filter_features(X86CPU *cpu)
2081 CPUX86State *env = &cpu->env;
2085 for (w = 0; w < FEATURE_WORDS; w++) {
2086 uint32_t host_feat =
2087 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2088 uint32_t requested_features = env->features[w];
2089 env->features[w] &= host_feat;
2090 cpu->filtered_features[w] = requested_features & ~env->features[w];
2091 if (cpu->filtered_features[w]) {
2092 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2093 report_unavailable_features(w, cpu->filtered_features[w]);
2102 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2105 for (pv = props; pv->prop; pv++) {
2109 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2114 /* Load data from X86CPUDefinition
2116 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2118 CPUX86State *env = &cpu->env;
2120 char host_vendor[CPUID_VENDOR_SZ + 1];
2123 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2124 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2125 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2126 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2127 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2128 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2129 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2130 for (w = 0; w < FEATURE_WORDS; w++) {
2131 env->features[w] = def->features[w];
2134 /* Special cases not set in the X86CPUDefinition structs: */
2135 if (kvm_enabled()) {
2136 if (!kvm_irqchip_in_kernel()) {
2137 x86_cpu_change_kvm_default("x2apic", "off");
2140 x86_cpu_apply_props(cpu, kvm_default_props);
2143 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2145 /* sysenter isn't supported in compatibility mode on AMD,
2146 * syscall isn't supported in compatibility mode on Intel.
2147 * Normally we advertise the actual CPU vendor, but you can
2148 * override this using the 'vendor' property if you want to use
2149 * KVM's sysenter/syscall emulation in compatibility mode and
2150 * when doing cross vendor migration
2152 vendor = def->vendor;
2153 if (kvm_enabled()) {
2154 uint32_t ebx = 0, ecx = 0, edx = 0;
2155 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2156 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2157 vendor = host_vendor;
2160 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2164 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2169 gchar **model_pieces;
2170 char *name, *features;
2171 Error *error = NULL;
2173 model_pieces = g_strsplit(cpu_model, ",", 2);
2174 if (!model_pieces[0]) {
2175 error_setg(&error, "Invalid/empty CPU model name");
2178 name = model_pieces[0];
2179 features = model_pieces[1];
2181 oc = x86_cpu_class_by_name(name);
2183 error_setg(&error, "Unable to find CPU definition: %s", name);
2186 xcc = X86_CPU_CLASS(oc);
2188 if (xcc->kvm_required && !kvm_enabled()) {
2189 error_setg(&error, "CPU model '%s' requires KVM", name);
2193 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2195 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2201 if (error != NULL) {
2202 error_propagate(errp, error);
2204 object_unref(OBJECT(cpu));
2208 g_strfreev(model_pieces);
2212 X86CPU *cpu_x86_init(const char *cpu_model)
2214 Error *error = NULL;
2217 cpu = cpu_x86_create(cpu_model, &error);
2222 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2226 error_report_err(error);
2228 object_unref(OBJECT(cpu));
2235 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2237 X86CPUDefinition *cpudef = data;
2238 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2240 xcc->cpu_def = cpudef;
2243 static void x86_register_cpudef_type(X86CPUDefinition *def)
2245 char *typename = x86_cpu_type_name(def->name);
2248 .parent = TYPE_X86_CPU,
2249 .class_init = x86_cpu_cpudef_class_init,
2257 #if !defined(CONFIG_USER_ONLY)
2259 void cpu_clear_apic_feature(CPUX86State *env)
2261 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2264 #endif /* !CONFIG_USER_ONLY */
2266 /* Initialize list of CPU models, filling some non-static fields if necessary
2268 void x86_cpudef_setup(void)
2271 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2273 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2274 X86CPUDefinition *def = &builtin_x86_defs[i];
2276 /* Look for specific "cpudef" models that */
2277 /* have the QEMU version in .model_id */
2278 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2279 if (strcmp(model_with_versions[j], def->name) == 0) {
2280 pstrcpy(def->model_id, sizeof(def->model_id),
2281 "QEMU Virtual CPU version ");
2282 pstrcat(def->model_id, sizeof(def->model_id),
2290 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2291 uint32_t *eax, uint32_t *ebx,
2292 uint32_t *ecx, uint32_t *edx)
2294 X86CPU *cpu = x86_env_get_cpu(env);
2295 CPUState *cs = CPU(cpu);
2297 /* test if maximum index reached */
2298 if (index & 0x80000000) {
2299 if (index > env->cpuid_xlevel) {
2300 if (env->cpuid_xlevel2 > 0) {
2301 /* Handle the Centaur's CPUID instruction. */
2302 if (index > env->cpuid_xlevel2) {
2303 index = env->cpuid_xlevel2;
2304 } else if (index < 0xC0000000) {
2305 index = env->cpuid_xlevel;
2308 /* Intel documentation states that invalid EAX input will
2309 * return the same information as EAX=cpuid_level
2310 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2312 index = env->cpuid_level;
2316 if (index > env->cpuid_level)
2317 index = env->cpuid_level;
2322 *eax = env->cpuid_level;
2323 *ebx = env->cpuid_vendor1;
2324 *edx = env->cpuid_vendor2;
2325 *ecx = env->cpuid_vendor3;
2328 *eax = env->cpuid_version;
2329 *ebx = (cpu->apic_id << 24) |
2330 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2331 *ecx = env->features[FEAT_1_ECX];
2332 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2333 *ecx |= CPUID_EXT_OSXSAVE;
2335 *edx = env->features[FEAT_1_EDX];
2336 if (cs->nr_cores * cs->nr_threads > 1) {
2337 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2342 /* cache info: needed for Pentium Pro compatibility */
2343 if (cpu->cache_info_passthrough) {
2344 host_cpuid(index, 0, eax, ebx, ecx, edx);
2347 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2350 *edx = (L1D_DESCRIPTOR << 16) | \
2351 (L1I_DESCRIPTOR << 8) | \
2355 /* cache info: needed for Core compatibility */
2356 if (cpu->cache_info_passthrough) {
2357 host_cpuid(index, count, eax, ebx, ecx, edx);
2358 *eax &= ~0xFC000000;
2362 case 0: /* L1 dcache info */
2363 *eax |= CPUID_4_TYPE_DCACHE | \
2364 CPUID_4_LEVEL(1) | \
2365 CPUID_4_SELF_INIT_LEVEL;
2366 *ebx = (L1D_LINE_SIZE - 1) | \
2367 ((L1D_PARTITIONS - 1) << 12) | \
2368 ((L1D_ASSOCIATIVITY - 1) << 22);
2369 *ecx = L1D_SETS - 1;
2370 *edx = CPUID_4_NO_INVD_SHARING;
2372 case 1: /* L1 icache info */
2373 *eax |= CPUID_4_TYPE_ICACHE | \
2374 CPUID_4_LEVEL(1) | \
2375 CPUID_4_SELF_INIT_LEVEL;
2376 *ebx = (L1I_LINE_SIZE - 1) | \
2377 ((L1I_PARTITIONS - 1) << 12) | \
2378 ((L1I_ASSOCIATIVITY - 1) << 22);
2379 *ecx = L1I_SETS - 1;
2380 *edx = CPUID_4_NO_INVD_SHARING;
2382 case 2: /* L2 cache info */
2383 *eax |= CPUID_4_TYPE_UNIFIED | \
2384 CPUID_4_LEVEL(2) | \
2385 CPUID_4_SELF_INIT_LEVEL;
2386 if (cs->nr_threads > 1) {
2387 *eax |= (cs->nr_threads - 1) << 14;
2389 *ebx = (L2_LINE_SIZE - 1) | \
2390 ((L2_PARTITIONS - 1) << 12) | \
2391 ((L2_ASSOCIATIVITY - 1) << 22);
2393 *edx = CPUID_4_NO_INVD_SHARING;
2395 default: /* end of info */
2404 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2405 if ((*eax & 31) && cs->nr_cores > 1) {
2406 *eax |= (cs->nr_cores - 1) << 26;
2410 /* mwait info: needed for Core compatibility */
2411 *eax = 0; /* Smallest monitor-line size in bytes */
2412 *ebx = 0; /* Largest monitor-line size in bytes */
2413 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2417 /* Thermal and Power Leaf */
2418 *eax = env->features[FEAT_6_EAX];
2424 /* Structured Extended Feature Flags Enumeration Leaf */
2426 *eax = 0; /* Maximum ECX value for sub-leaves */
2427 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2428 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2429 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2430 *ecx |= CPUID_7_0_ECX_OSPKE;
2432 *edx = 0; /* Reserved */
2441 /* Direct Cache Access Information Leaf */
2442 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2448 /* Architectural Performance Monitoring Leaf */
2449 if (kvm_enabled() && cpu->enable_pmu) {
2450 KVMState *s = cs->kvm_state;
2452 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2453 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2454 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2455 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2464 KVMState *s = cs->kvm_state;
2468 /* Processor Extended State */
2473 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2476 if (kvm_enabled()) {
2477 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2479 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2486 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2487 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2488 if ((env->features[esa->feature] & esa->bits) == esa->bits
2489 && ((ena_mask >> i) & 1) != 0) {
2493 *edx |= 1u << (i - 32);
2495 *ecx = MAX(*ecx, esa->offset + esa->size);
2498 *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2500 } else if (count == 1) {
2501 *eax = env->features[FEAT_XSAVE];
2502 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2503 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2504 if ((env->features[esa->feature] & esa->bits) == esa->bits
2505 && ((ena_mask >> count) & 1) != 0) {
2513 *eax = env->cpuid_xlevel;
2514 *ebx = env->cpuid_vendor1;
2515 *edx = env->cpuid_vendor2;
2516 *ecx = env->cpuid_vendor3;
2519 *eax = env->cpuid_version;
2521 *ecx = env->features[FEAT_8000_0001_ECX];
2522 *edx = env->features[FEAT_8000_0001_EDX];
2524 /* The Linux kernel checks for the CMPLegacy bit and
2525 * discards multiple thread information if it is set.
2526 * So dont set it here for Intel to make Linux guests happy.
2528 if (cs->nr_cores * cs->nr_threads > 1) {
2529 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2530 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2531 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2532 *ecx |= 1 << 1; /* CmpLegacy bit */
2539 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2540 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2541 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2542 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2545 /* cache info (L1 cache) */
2546 if (cpu->cache_info_passthrough) {
2547 host_cpuid(index, 0, eax, ebx, ecx, edx);
2550 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2551 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2552 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2553 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2554 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2555 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2556 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2557 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2560 /* cache info (L2 cache) */
2561 if (cpu->cache_info_passthrough) {
2562 host_cpuid(index, 0, eax, ebx, ecx, edx);
2565 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2566 (L2_DTLB_2M_ENTRIES << 16) | \
2567 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2568 (L2_ITLB_2M_ENTRIES);
2569 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2570 (L2_DTLB_4K_ENTRIES << 16) | \
2571 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2572 (L2_ITLB_4K_ENTRIES);
2573 *ecx = (L2_SIZE_KB_AMD << 16) | \
2574 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2575 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2576 *edx = ((L3_SIZE_KB/512) << 18) | \
2577 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2578 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2584 *edx = env->features[FEAT_8000_0007_EDX];
2587 /* virtual & phys address size in low 2 bytes. */
2588 /* XXX: This value must match the one used in the MMU code. */
2589 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2590 /* 64 bit processor */
2591 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2592 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2594 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2595 *eax = 0x00000024; /* 36 bits physical */
2597 *eax = 0x00000020; /* 32 bits physical */
2603 if (cs->nr_cores * cs->nr_threads > 1) {
2604 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2608 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2609 *eax = 0x00000001; /* SVM Revision */
2610 *ebx = 0x00000010; /* nr of ASIDs */
2612 *edx = env->features[FEAT_SVM]; /* optional features */
2621 *eax = env->cpuid_xlevel2;
2627 /* Support for VIA CPU's CPUID instruction */
2628 *eax = env->cpuid_version;
2631 *edx = env->features[FEAT_C000_0001_EDX];
2636 /* Reserved for the future, and now filled with zero */
2643 /* reserved values: zero */
2652 /* CPUClass::reset() */
2653 static void x86_cpu_reset(CPUState *s)
2655 X86CPU *cpu = X86_CPU(s);
2656 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2657 CPUX86State *env = &cpu->env;
2662 xcc->parent_reset(s);
2664 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2668 env->old_exception = -1;
2670 /* init to reset state */
2672 #ifdef CONFIG_SOFTMMU
2673 env->hflags |= HF_SOFTMMU_MASK;
2675 env->hflags2 |= HF2_GIF_MASK;
2677 cpu_x86_update_cr0(env, 0x60000010);
2678 env->a20_mask = ~0x0;
2679 env->smbase = 0x30000;
2681 env->idt.limit = 0xffff;
2682 env->gdt.limit = 0xffff;
2683 env->ldt.limit = 0xffff;
2684 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2685 env->tr.limit = 0xffff;
2686 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2688 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2689 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2690 DESC_R_MASK | DESC_A_MASK);
2691 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2692 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2694 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2695 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2697 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2698 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2700 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2701 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2703 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2704 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2708 env->regs[R_EDX] = env->cpuid_version;
2713 for (i = 0; i < 8; i++) {
2716 cpu_set_fpuc(env, 0x37f);
2718 env->mxcsr = 0x1f80;
2719 /* All units are in INIT state. */
2722 env->pat = 0x0007040600070406ULL;
2723 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2725 memset(env->dr, 0, sizeof(env->dr));
2726 env->dr[6] = DR6_FIXED_1;
2727 env->dr[7] = DR7_FIXED_1;
2728 cpu_breakpoint_remove_all(s, BP_CPU);
2729 cpu_watchpoint_remove_all(s, BP_CPU);
2732 xcr0 = XSTATE_FP_MASK;
2734 #ifdef CONFIG_USER_ONLY
2735 /* Enable all the features for user-mode. */
2736 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2737 xcr0 |= XSTATE_SSE_MASK;
2739 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2740 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2741 if ((env->features[esa->feature] & esa->bits) == esa->bits) {
2746 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2747 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2749 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2750 cr4 |= CR4_FSGSBASE_MASK;
2755 cpu_x86_update_cr4(env, cr4);
2758 * SDM 11.11.5 requires:
2759 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2760 * - IA32_MTRR_PHYSMASKn.V = 0
2761 * All other bits are undefined. For simplification, zero it all.
2763 env->mtrr_deftype = 0;
2764 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2765 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2767 #if !defined(CONFIG_USER_ONLY)
2768 /* We hard-wire the BSP to the first CPU. */
2769 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2771 s->halted = !cpu_is_bsp(cpu);
2773 if (kvm_enabled()) {
2774 kvm_arch_reset_vcpu(cpu);
2779 #ifndef CONFIG_USER_ONLY
2780 bool cpu_is_bsp(X86CPU *cpu)
2782 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2785 /* TODO: remove me, when reset over QOM tree is implemented */
2786 static void x86_cpu_machine_reset_cb(void *opaque)
2788 X86CPU *cpu = opaque;
2789 cpu_reset(CPU(cpu));
2793 static void mce_init(X86CPU *cpu)
2795 CPUX86State *cenv = &cpu->env;
2798 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2799 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2800 (CPUID_MCE | CPUID_MCA)) {
2801 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2802 cenv->mcg_ctl = ~(uint64_t)0;
2803 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2804 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2809 #ifndef CONFIG_USER_ONLY
2810 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2812 APICCommonState *apic;
2813 const char *apic_type = "apic";
2815 if (kvm_apic_in_kernel()) {
2816 apic_type = "kvm-apic";
2817 } else if (xen_enabled()) {
2818 apic_type = "xen-apic";
2821 cpu->apic_state = DEVICE(object_new(apic_type));
2823 object_property_add_child(OBJECT(cpu), "apic",
2824 OBJECT(cpu->apic_state), NULL);
2825 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2826 /* TODO: convert to link<> */
2827 apic = APIC_COMMON(cpu->apic_state);
2829 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2832 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2834 APICCommonState *apic;
2835 static bool apic_mmio_map_once;
2837 if (cpu->apic_state == NULL) {
2840 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2843 /* Map APIC MMIO area */
2844 apic = APIC_COMMON(cpu->apic_state);
2845 if (!apic_mmio_map_once) {
2846 memory_region_add_subregion_overlap(get_system_memory(),
2848 MSR_IA32_APICBASE_BASE,
2851 apic_mmio_map_once = true;
2855 static void x86_cpu_machine_done(Notifier *n, void *unused)
2857 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2858 MemoryRegion *smram =
2859 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2862 cpu->smram = g_new(MemoryRegion, 1);
2863 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2864 smram, 0, 1ull << 32);
2865 memory_region_set_enabled(cpu->smram, false);
2866 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2870 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2876 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2877 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2878 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2879 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2880 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2881 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2882 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2884 CPUState *cs = CPU(dev);
2885 X86CPU *cpu = X86_CPU(dev);
2886 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2887 CPUX86State *env = &cpu->env;
2888 Error *local_err = NULL;
2889 static bool ht_warned;
2891 if (cpu->apic_id < 0) {
2892 error_setg(errp, "apic-id property was not initialized properly");
2896 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2897 env->cpuid_level = 7;
2900 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2901 error_setg(&local_err,
2903 "Host doesn't support requested features" :
2904 "TCG doesn't support requested features");
2908 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2911 if (IS_AMD_CPU(env)) {
2912 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2913 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2914 & CPUID_EXT2_AMD_ALIASES);
2918 #ifndef CONFIG_USER_ONLY
2919 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2921 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2922 x86_cpu_apic_create(cpu, &local_err);
2923 if (local_err != NULL) {
2931 #ifndef CONFIG_USER_ONLY
2932 if (tcg_enabled()) {
2933 AddressSpace *newas = g_new(AddressSpace, 1);
2935 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2936 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2938 /* Outer container... */
2939 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2940 memory_region_set_enabled(cpu->cpu_as_root, true);
2942 /* ... with two regions inside: normal system memory with low
2945 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2946 get_system_memory(), 0, ~0ull);
2947 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2948 memory_region_set_enabled(cpu->cpu_as_mem, true);
2949 address_space_init(newas, cpu->cpu_as_root, "CPU");
2951 cpu_address_space_init(cs, newas, 0);
2953 /* ... SMRAM with higher priority, linked from /machine/smram. */
2954 cpu->machine_done.notify = x86_cpu_machine_done;
2955 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2961 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2962 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2963 * based on inputs (sockets,cores,threads), it is still better to gives
2966 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2967 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2969 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2970 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2971 " -smp options properly.");
2975 x86_cpu_apic_realize(cpu, &local_err);
2976 if (local_err != NULL) {
2981 xcc->parent_realize(dev, &local_err);
2984 if (local_err != NULL) {
2985 error_propagate(errp, local_err);
2990 typedef struct BitProperty {
2995 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
2996 void *opaque, Error **errp)
2998 BitProperty *fp = opaque;
2999 bool value = (*fp->ptr & fp->mask) == fp->mask;
3000 visit_type_bool(v, name, &value, errp);
3003 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3004 void *opaque, Error **errp)
3006 DeviceState *dev = DEVICE(obj);
3007 BitProperty *fp = opaque;
3008 Error *local_err = NULL;
3011 if (dev->realized) {
3012 qdev_prop_set_after_realize(dev, name, errp);
3016 visit_type_bool(v, name, &value, &local_err);
3018 error_propagate(errp, local_err);
3023 *fp->ptr |= fp->mask;
3025 *fp->ptr &= ~fp->mask;
3029 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3032 BitProperty *prop = opaque;
3036 /* Register a boolean property to get/set a single bit in a uint32_t field.
3038 * The same property name can be registered multiple times to make it affect
3039 * multiple bits in the same FeatureWord. In that case, the getter will return
3040 * true only if all bits are set.
3042 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3043 const char *prop_name,
3049 uint32_t mask = (1UL << bitnr);
3051 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3054 assert(fp->ptr == field);
3057 fp = g_new0(BitProperty, 1);
3060 object_property_add(OBJECT(cpu), prop_name, "bool",
3061 x86_cpu_get_bit_prop,
3062 x86_cpu_set_bit_prop,
3063 x86_cpu_release_bit_prop, fp, &error_abort);
3067 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3071 Object *obj = OBJECT(cpu);
3074 FeatureWordInfo *fi = &feature_word_info[w];
3076 if (!fi->feat_names) {
3079 if (!fi->feat_names[bitnr]) {
3083 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3085 feat2prop(names[0]);
3086 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3088 for (i = 1; names[i]; i++) {
3089 feat2prop(names[i]);
3090 object_property_add_alias(obj, names[i], obj, names[0],
3097 static void x86_cpu_initfn(Object *obj)
3099 CPUState *cs = CPU(obj);
3100 X86CPU *cpu = X86_CPU(obj);
3101 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3102 CPUX86State *env = &cpu->env;
3107 cpu_exec_init(cs, &error_abort);
3109 object_property_add(obj, "family", "int",
3110 x86_cpuid_version_get_family,
3111 x86_cpuid_version_set_family, NULL, NULL, NULL);
3112 object_property_add(obj, "model", "int",
3113 x86_cpuid_version_get_model,
3114 x86_cpuid_version_set_model, NULL, NULL, NULL);
3115 object_property_add(obj, "stepping", "int",
3116 x86_cpuid_version_get_stepping,
3117 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3118 object_property_add_str(obj, "vendor",
3119 x86_cpuid_get_vendor,
3120 x86_cpuid_set_vendor, NULL);
3121 object_property_add_str(obj, "model-id",
3122 x86_cpuid_get_model_id,
3123 x86_cpuid_set_model_id, NULL);
3124 object_property_add(obj, "tsc-frequency", "int",
3125 x86_cpuid_get_tsc_freq,
3126 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3127 object_property_add(obj, "apic-id", "int",
3128 x86_cpuid_get_apic_id,
3129 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3130 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3131 x86_cpu_get_feature_words,
3132 NULL, NULL, (void *)env->features, NULL);
3133 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3134 x86_cpu_get_feature_words,
3135 NULL, NULL, (void *)cpu->filtered_features, NULL);
3137 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3139 #ifndef CONFIG_USER_ONLY
3140 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3144 for (w = 0; w < FEATURE_WORDS; w++) {
3147 for (bitnr = 0; bitnr < 32; bitnr++) {
3148 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3152 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3154 /* init various static tables used in TCG mode */
3155 if (tcg_enabled() && !inited) {
3161 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3163 X86CPU *cpu = X86_CPU(cs);
3165 return cpu->apic_id;
3168 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3170 X86CPU *cpu = X86_CPU(cs);
3172 return cpu->env.cr[0] & CR0_PG_MASK;
3175 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3177 X86CPU *cpu = X86_CPU(cs);
3179 cpu->env.eip = value;
3182 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3184 X86CPU *cpu = X86_CPU(cs);
3186 cpu->env.eip = tb->pc - tb->cs_base;
3189 static bool x86_cpu_has_work(CPUState *cs)
3191 X86CPU *cpu = X86_CPU(cs);
3192 CPUX86State *env = &cpu->env;
3194 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3195 CPU_INTERRUPT_POLL)) &&
3196 (env->eflags & IF_MASK)) ||
3197 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3198 CPU_INTERRUPT_INIT |
3199 CPU_INTERRUPT_SIPI |
3200 CPU_INTERRUPT_MCE)) ||
3201 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3202 !(env->hflags & HF_SMM_MASK));
3205 static Property x86_cpu_properties[] = {
3206 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3207 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3208 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3209 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3210 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3211 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3212 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3213 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3214 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3215 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3216 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3217 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3218 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3219 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3220 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3221 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3222 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3223 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3224 DEFINE_PROP_END_OF_LIST()
3227 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3229 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3230 CPUClass *cc = CPU_CLASS(oc);
3231 DeviceClass *dc = DEVICE_CLASS(oc);
3233 xcc->parent_realize = dc->realize;
3234 dc->realize = x86_cpu_realizefn;
3235 dc->props = x86_cpu_properties;
3237 xcc->parent_reset = cc->reset;
3238 cc->reset = x86_cpu_reset;
3239 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3241 cc->class_by_name = x86_cpu_class_by_name;
3242 cc->parse_features = x86_cpu_parse_featurestr;
3243 cc->has_work = x86_cpu_has_work;
3244 cc->do_interrupt = x86_cpu_do_interrupt;
3245 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3246 cc->dump_state = x86_cpu_dump_state;
3247 cc->set_pc = x86_cpu_set_pc;
3248 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3249 cc->gdb_read_register = x86_cpu_gdb_read_register;
3250 cc->gdb_write_register = x86_cpu_gdb_write_register;
3251 cc->get_arch_id = x86_cpu_get_arch_id;
3252 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3253 #ifdef CONFIG_USER_ONLY
3254 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3256 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3257 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3258 cc->write_elf64_note = x86_cpu_write_elf64_note;
3259 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3260 cc->write_elf32_note = x86_cpu_write_elf32_note;
3261 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3262 cc->vmsd = &vmstate_x86_cpu;
3264 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3265 #ifndef CONFIG_USER_ONLY
3266 cc->debug_excp_handler = breakpoint_handler;
3268 cc->cpu_exec_enter = x86_cpu_exec_enter;
3269 cc->cpu_exec_exit = x86_cpu_exec_exit;
3272 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3273 * object in cpus -> dangling pointer after final object_unref().
3275 dc->cannot_destroy_with_object_finalize_yet = true;
3278 static const TypeInfo x86_cpu_type_info = {
3279 .name = TYPE_X86_CPU,
3281 .instance_size = sizeof(X86CPU),
3282 .instance_init = x86_cpu_initfn,
3284 .class_size = sizeof(X86CPUClass),
3285 .class_init = x86_cpu_common_class_init,
3288 static void x86_cpu_register_types(void)
3292 type_register_static(&x86_cpu_type_info);
3293 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3294 x86_register_cpudef_type(&builtin_x86_defs[i]);
3297 type_register_static(&host_x86_cpu_type_info);
3301 type_init(x86_cpu_register_types)