2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #ifndef CONFIG_USER_ONLY
45 #include "exec/address-spaces.h"
47 #include "hw/xen/xen.h"
48 #include "hw/i386/apic_internal.h"
52 /* Cache topology CPUID constants: */
54 /* CPUID Leaf 2 Descriptors */
56 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
57 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
58 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
61 /* CPUID Leaf 4 constants: */
64 #define CPUID_4_TYPE_DCACHE 1
65 #define CPUID_4_TYPE_ICACHE 2
66 #define CPUID_4_TYPE_UNIFIED 3
68 #define CPUID_4_LEVEL(l) ((l) << 5)
70 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
71 #define CPUID_4_FULLY_ASSOC (1 << 9)
74 #define CPUID_4_NO_INVD_SHARING (1 << 0)
75 #define CPUID_4_INCLUSIVE (1 << 1)
76 #define CPUID_4_COMPLEX_IDX (1 << 2)
78 #define ASSOC_FULL 0xFF
80 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
81 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
91 a == ASSOC_FULL ? 0xF : \
92 0 /* invalid value */)
95 /* Definitions of the hardcoded cache entries we expose: */
98 #define L1D_LINE_SIZE 64
99 #define L1D_ASSOCIATIVITY 8
101 #define L1D_PARTITIONS 1
102 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
103 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
104 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
105 #define L1D_LINES_PER_TAG 1
106 #define L1D_SIZE_KB_AMD 64
107 #define L1D_ASSOCIATIVITY_AMD 2
109 /* L1 instruction cache: */
110 #define L1I_LINE_SIZE 64
111 #define L1I_ASSOCIATIVITY 8
113 #define L1I_PARTITIONS 1
114 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
115 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
116 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
117 #define L1I_LINES_PER_TAG 1
118 #define L1I_SIZE_KB_AMD 64
119 #define L1I_ASSOCIATIVITY_AMD 2
121 /* Level 2 unified cache: */
122 #define L2_LINE_SIZE 64
123 #define L2_ASSOCIATIVITY 16
125 #define L2_PARTITIONS 1
126 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
127 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
128 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
129 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
130 #define L2_LINES_PER_TAG 1
131 #define L2_SIZE_KB_AMD 512
134 #define L3_SIZE_KB 0 /* disabled */
135 #define L3_ASSOCIATIVITY 0 /* disabled */
136 #define L3_LINES_PER_TAG 0 /* disabled */
137 #define L3_LINE_SIZE 0 /* disabled */
139 /* TLB definitions: */
141 #define L1_DTLB_2M_ASSOC 1
142 #define L1_DTLB_2M_ENTRIES 255
143 #define L1_DTLB_4K_ASSOC 1
144 #define L1_DTLB_4K_ENTRIES 255
146 #define L1_ITLB_2M_ASSOC 1
147 #define L1_ITLB_2M_ENTRIES 255
148 #define L1_ITLB_4K_ASSOC 1
149 #define L1_ITLB_4K_ENTRIES 255
151 #define L2_DTLB_2M_ASSOC 0 /* disabled */
152 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
153 #define L2_DTLB_4K_ASSOC 4
154 #define L2_DTLB_4K_ENTRIES 512
156 #define L2_ITLB_2M_ASSOC 0 /* disabled */
157 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
158 #define L2_ITLB_4K_ASSOC 4
159 #define L2_ITLB_4K_ENTRIES 512
163 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
164 uint32_t vendor2, uint32_t vendor3)
167 for (i = 0; i < 4; i++) {
168 dst[i] = vendor1 >> (8 * i);
169 dst[i + 4] = vendor2 >> (8 * i);
170 dst[i + 8] = vendor3 >> (8 * i);
172 dst[CPUID_VENDOR_SZ] = '\0';
175 /* feature flags taken from "Intel Processor Identification and the CPUID
176 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
177 * between feature naming conventions, aliases may be added.
179 static const char *feature_name[] = {
180 "fpu", "vme", "de", "pse",
181 "tsc", "msr", "pae", "mce",
182 "cx8", "apic", NULL, "sep",
183 "mtrr", "pge", "mca", "cmov",
184 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
185 NULL, "ds" /* Intel dts */, "acpi", "mmx",
186 "fxsr", "sse", "sse2", "ss",
187 "ht" /* Intel htt */, "tm", "ia64", "pbe",
189 static const char *ext_feature_name[] = {
190 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
191 "ds_cpl", "vmx", "smx", "est",
192 "tm2", "ssse3", "cid", NULL,
193 "fma", "cx16", "xtpr", "pdcm",
194 NULL, "pcid", "dca", "sse4.1|sse4_1",
195 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
196 "tsc-deadline", "aes", "xsave", "osxsave",
197 "avx", "f16c", "rdrand", "hypervisor",
199 /* Feature names that are already defined on feature_name[] but are set on
200 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
201 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
202 * if and only if CPU vendor is AMD.
204 static const char *ext2_feature_name[] = {
205 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
206 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
207 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
208 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
209 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
210 "nx|xd", NULL, "mmxext", NULL /* mmx */,
211 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
212 NULL, "lm|i64", "3dnowext", "3dnow",
214 static const char *ext3_feature_name[] = {
215 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
216 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
217 "3dnowprefetch", "osvw", "ibs", "xop",
218 "skinit", "wdt", NULL, "lwp",
219 "fma4", "tce", NULL, "nodeid_msr",
220 NULL, "tbm", "topoext", "perfctr_core",
221 "perfctr_nb", NULL, NULL, NULL,
222 NULL, NULL, NULL, NULL,
225 static const char *ext4_feature_name[] = {
226 NULL, NULL, "xstore", "xstore-en",
227 NULL, NULL, "xcrypt", "xcrypt-en",
228 "ace2", "ace2-en", "phe", "phe-en",
229 "pmm", "pmm-en", NULL, NULL,
230 NULL, NULL, NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
236 static const char *kvm_feature_name[] = {
237 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
238 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
239 NULL, NULL, NULL, NULL,
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 "kvmclock-stable-bit", NULL, NULL, NULL,
244 NULL, NULL, NULL, NULL,
247 static const char *svm_feature_name[] = {
248 "npt", "lbrv", "svm_lock", "nrip_save",
249 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
250 NULL, NULL, "pause_filter", NULL,
251 "pfthreshold", NULL, NULL, NULL,
252 NULL, NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
258 static const char *cpuid_7_0_ebx_feature_name[] = {
259 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
260 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
261 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
262 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
265 static const char *cpuid_7_0_ecx_feature_name[] = {
266 NULL, NULL, NULL, "pku",
267 "ospke", NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
276 static const char *cpuid_apm_edx_feature_name[] = {
277 NULL, NULL, NULL, NULL,
278 NULL, NULL, NULL, NULL,
279 "invtsc", NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
287 static const char *cpuid_xsave_feature_name[] = {
288 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
289 NULL, NULL, NULL, NULL,
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
298 static const char *cpuid_6_feature_name[] = {
299 NULL, NULL, "arat", NULL,
300 NULL, NULL, NULL, NULL,
301 NULL, NULL, NULL, NULL,
302 NULL, NULL, NULL, NULL,
303 NULL, NULL, NULL, NULL,
304 NULL, NULL, NULL, NULL,
305 NULL, NULL, NULL, NULL,
306 NULL, NULL, NULL, NULL,
309 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
310 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
311 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
312 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
313 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
314 CPUID_PSE36 | CPUID_FXSR)
315 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
316 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
317 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
318 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
319 CPUID_PAE | CPUID_SEP | CPUID_APIC)
321 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
322 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
323 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
324 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
325 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
326 /* partly implemented:
327 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
329 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
330 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
331 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
332 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
333 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
334 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
336 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
337 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
338 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
339 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
340 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
343 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
345 #define TCG_EXT2_X86_64_FEATURES 0
348 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
349 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
350 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
351 TCG_EXT2_X86_64_FEATURES)
352 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
353 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
354 #define TCG_EXT4_FEATURES 0
355 #define TCG_SVM_FEATURES 0
356 #define TCG_KVM_FEATURES 0
357 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
358 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
359 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
360 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE)
362 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
363 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
364 CPUID_7_0_EBX_RDSEED */
365 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
366 #define TCG_APM_FEATURES 0
367 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
368 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
370 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
372 typedef struct FeatureWordInfo {
373 const char **feat_names;
374 uint32_t cpuid_eax; /* Input EAX for CPUID */
375 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
376 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
377 int cpuid_reg; /* output register (R_* constant) */
378 uint32_t tcg_features; /* Feature flags supported by TCG */
379 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
382 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
384 .feat_names = feature_name,
385 .cpuid_eax = 1, .cpuid_reg = R_EDX,
386 .tcg_features = TCG_FEATURES,
389 .feat_names = ext_feature_name,
390 .cpuid_eax = 1, .cpuid_reg = R_ECX,
391 .tcg_features = TCG_EXT_FEATURES,
393 [FEAT_8000_0001_EDX] = {
394 .feat_names = ext2_feature_name,
395 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
396 .tcg_features = TCG_EXT2_FEATURES,
398 [FEAT_8000_0001_ECX] = {
399 .feat_names = ext3_feature_name,
400 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
401 .tcg_features = TCG_EXT3_FEATURES,
403 [FEAT_C000_0001_EDX] = {
404 .feat_names = ext4_feature_name,
405 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
406 .tcg_features = TCG_EXT4_FEATURES,
409 .feat_names = kvm_feature_name,
410 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
411 .tcg_features = TCG_KVM_FEATURES,
414 .feat_names = svm_feature_name,
415 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
416 .tcg_features = TCG_SVM_FEATURES,
419 .feat_names = cpuid_7_0_ebx_feature_name,
421 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
423 .tcg_features = TCG_7_0_EBX_FEATURES,
426 .feat_names = cpuid_7_0_ecx_feature_name,
428 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
430 .tcg_features = TCG_7_0_ECX_FEATURES,
432 [FEAT_8000_0007_EDX] = {
433 .feat_names = cpuid_apm_edx_feature_name,
434 .cpuid_eax = 0x80000007,
436 .tcg_features = TCG_APM_FEATURES,
437 .unmigratable_flags = CPUID_APM_INVTSC,
440 .feat_names = cpuid_xsave_feature_name,
442 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
444 .tcg_features = TCG_XSAVE_FEATURES,
447 .feat_names = cpuid_6_feature_name,
448 .cpuid_eax = 6, .cpuid_reg = R_EAX,
449 .tcg_features = TCG_6_EAX_FEATURES,
453 typedef struct X86RegisterInfo32 {
454 /* Name of register */
456 /* QAPI enum value register */
457 X86CPURegister32 qapi_enum;
460 #define REGISTER(reg) \
461 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
462 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
474 const ExtSaveArea x86_ext_save_areas[] = {
476 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
477 .offset = offsetof(X86XSaveArea, avx_state),
478 .size = sizeof(XSaveAVX) },
479 [XSTATE_BNDREGS_BIT] =
480 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
481 .offset = offsetof(X86XSaveArea, bndreg_state),
482 .size = sizeof(XSaveBNDREG) },
483 [XSTATE_BNDCSR_BIT] =
484 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
485 .offset = offsetof(X86XSaveArea, bndcsr_state),
486 .size = sizeof(XSaveBNDCSR) },
487 [XSTATE_OPMASK_BIT] =
488 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
489 .offset = offsetof(X86XSaveArea, opmask_state),
490 .size = sizeof(XSaveOpmask) },
491 [XSTATE_ZMM_Hi256_BIT] =
492 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
493 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
494 .size = sizeof(XSaveZMM_Hi256) },
495 [XSTATE_Hi16_ZMM_BIT] =
496 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
497 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
498 .size = sizeof(XSaveHi16_ZMM) },
500 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
501 .offset = offsetof(X86XSaveArea, pkru_state),
502 .size = sizeof(XSavePKRU) },
505 const char *get_register_name_32(unsigned int reg)
507 if (reg >= CPU_NB_REGS32) {
510 return x86_reg_info_32[reg].name;
514 * Returns the set of feature flags that are supported and migratable by
515 * QEMU, for a given FeatureWord.
517 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
519 FeatureWordInfo *wi = &feature_word_info[w];
523 for (i = 0; i < 32; i++) {
524 uint32_t f = 1U << i;
525 /* If the feature name is unknown, it is not supported by QEMU yet */
526 if (!wi->feat_names[i]) {
529 /* Skip features known to QEMU, but explicitly marked as unmigratable */
530 if (wi->unmigratable_flags & f) {
538 void host_cpuid(uint32_t function, uint32_t count,
539 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
545 : "=a"(vec[0]), "=b"(vec[1]),
546 "=c"(vec[2]), "=d"(vec[3])
547 : "0"(function), "c"(count) : "cc");
548 #elif defined(__i386__)
549 asm volatile("pusha \n\t"
551 "mov %%eax, 0(%2) \n\t"
552 "mov %%ebx, 4(%2) \n\t"
553 "mov %%ecx, 8(%2) \n\t"
554 "mov %%edx, 12(%2) \n\t"
556 : : "a"(function), "c"(count), "S"(vec)
572 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
574 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
575 * a substring. ex if !NULL points to the first char after a substring,
576 * otherwise the string is assumed to sized by a terminating nul.
577 * Return lexical ordering of *s1:*s2.
579 static int sstrcmp(const char *s1, const char *e1,
580 const char *s2, const char *e2)
583 if (!*s1 || !*s2 || *s1 != *s2)
586 if (s1 == e1 && s2 == e2)
595 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
596 * '|' delimited (possibly empty) strings in which case search for a match
597 * within the alternatives proceeds left to right. Return 0 for success,
598 * non-zero otherwise.
600 static int altcmp(const char *s, const char *e, const char *altstr)
604 for (q = p = altstr; ; ) {
605 while (*p && *p != '|')
607 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
616 /* search featureset for flag *[s..e), if found set corresponding bit in
617 * *pval and return true, otherwise return false
619 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
620 const char **featureset)
626 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
627 if (*ppc && !altcmp(s, e, *ppc)) {
635 static void add_flagname_to_bitmaps(const char *flagname,
636 FeatureWordArray words,
640 for (w = 0; w < FEATURE_WORDS; w++) {
641 FeatureWordInfo *wi = &feature_word_info[w];
642 if (wi->feat_names &&
643 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
647 if (w == FEATURE_WORDS) {
648 error_setg(errp, "CPU feature %s not found", flagname);
652 /* CPU class name definitions: */
654 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
655 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
657 /* Return type name for a given CPU model name
658 * Caller is responsible for freeing the returned string.
660 static char *x86_cpu_type_name(const char *model_name)
662 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
665 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
670 if (cpu_model == NULL) {
674 typename = x86_cpu_type_name(cpu_model);
675 oc = object_class_by_name(typename);
680 struct X86CPUDefinition {
685 /* vendor is zero-terminated, 12 character ASCII string */
686 char vendor[CPUID_VENDOR_SZ + 1];
690 FeatureWordArray features;
694 static X86CPUDefinition builtin_x86_defs[] = {
698 .vendor = CPUID_VENDOR_AMD,
702 .features[FEAT_1_EDX] =
704 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
706 .features[FEAT_1_ECX] =
707 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
708 .features[FEAT_8000_0001_EDX] =
709 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
710 .features[FEAT_8000_0001_ECX] =
711 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
712 .xlevel = 0x8000000A,
713 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
718 .vendor = CPUID_VENDOR_AMD,
722 /* Missing: CPUID_HT */
723 .features[FEAT_1_EDX] =
725 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
726 CPUID_PSE36 | CPUID_VME,
727 .features[FEAT_1_ECX] =
728 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
730 .features[FEAT_8000_0001_EDX] =
731 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
732 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
733 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
734 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
736 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
737 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
738 .features[FEAT_8000_0001_ECX] =
739 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
740 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
741 /* Missing: CPUID_SVM_LBRV */
742 .features[FEAT_SVM] =
744 .xlevel = 0x8000001A,
745 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
750 .vendor = CPUID_VENDOR_INTEL,
754 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
755 .features[FEAT_1_EDX] =
757 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
758 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
759 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
760 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
761 .features[FEAT_1_ECX] =
762 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
764 .features[FEAT_8000_0001_EDX] =
765 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
766 .features[FEAT_8000_0001_ECX] =
768 .xlevel = 0x80000008,
769 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
774 .vendor = CPUID_VENDOR_INTEL,
778 /* Missing: CPUID_HT */
779 .features[FEAT_1_EDX] =
780 PPRO_FEATURES | CPUID_VME |
781 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
783 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
784 .features[FEAT_1_ECX] =
785 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
786 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
787 .features[FEAT_8000_0001_EDX] =
788 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
789 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
790 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
791 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
792 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
793 .features[FEAT_8000_0001_ECX] =
795 .xlevel = 0x80000008,
796 .model_id = "Common KVM processor"
801 .vendor = CPUID_VENDOR_INTEL,
805 .features[FEAT_1_EDX] =
807 .features[FEAT_1_ECX] =
809 .xlevel = 0x80000004,
810 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
815 .vendor = CPUID_VENDOR_INTEL,
819 .features[FEAT_1_EDX] =
820 PPRO_FEATURES | CPUID_VME |
821 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
822 .features[FEAT_1_ECX] =
824 .features[FEAT_8000_0001_ECX] =
826 .xlevel = 0x80000008,
827 .model_id = "Common 32-bit KVM processor"
832 .vendor = CPUID_VENDOR_INTEL,
836 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
837 .features[FEAT_1_EDX] =
838 PPRO_FEATURES | CPUID_VME |
839 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
841 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
842 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
843 .features[FEAT_1_ECX] =
844 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
845 .features[FEAT_8000_0001_EDX] =
847 .xlevel = 0x80000008,
848 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
853 .vendor = CPUID_VENDOR_INTEL,
857 .features[FEAT_1_EDX] =
864 .vendor = CPUID_VENDOR_INTEL,
868 .features[FEAT_1_EDX] =
875 .vendor = CPUID_VENDOR_INTEL,
879 .features[FEAT_1_EDX] =
886 .vendor = CPUID_VENDOR_INTEL,
890 .features[FEAT_1_EDX] =
897 .vendor = CPUID_VENDOR_AMD,
901 .features[FEAT_1_EDX] =
902 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
904 .features[FEAT_8000_0001_EDX] =
905 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
906 .xlevel = 0x80000008,
907 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
912 .vendor = CPUID_VENDOR_INTEL,
916 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
917 .features[FEAT_1_EDX] =
919 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
920 CPUID_ACPI | CPUID_SS,
921 /* Some CPUs got no CPUID_SEP */
922 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
924 .features[FEAT_1_ECX] =
925 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
927 .features[FEAT_8000_0001_EDX] =
929 .features[FEAT_8000_0001_ECX] =
931 .xlevel = 0x80000008,
932 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
937 .vendor = CPUID_VENDOR_INTEL,
941 .features[FEAT_1_EDX] =
942 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
943 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
944 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
945 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
946 CPUID_DE | CPUID_FP87,
947 .features[FEAT_1_ECX] =
948 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
949 .features[FEAT_8000_0001_EDX] =
950 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
951 .features[FEAT_8000_0001_ECX] =
953 .xlevel = 0x80000008,
954 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
959 .vendor = CPUID_VENDOR_INTEL,
963 .features[FEAT_1_EDX] =
964 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
965 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
966 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
967 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
968 CPUID_DE | CPUID_FP87,
969 .features[FEAT_1_ECX] =
970 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
972 .features[FEAT_8000_0001_EDX] =
973 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
974 .features[FEAT_8000_0001_ECX] =
976 .xlevel = 0x80000008,
977 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
982 .vendor = CPUID_VENDOR_INTEL,
986 .features[FEAT_1_EDX] =
987 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
988 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
989 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
990 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
991 CPUID_DE | CPUID_FP87,
992 .features[FEAT_1_ECX] =
993 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
994 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
995 .features[FEAT_8000_0001_EDX] =
996 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
997 .features[FEAT_8000_0001_ECX] =
999 .xlevel = 0x80000008,
1000 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1005 .vendor = CPUID_VENDOR_INTEL,
1009 .features[FEAT_1_EDX] =
1010 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1011 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1012 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1013 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1014 CPUID_DE | CPUID_FP87,
1015 .features[FEAT_1_ECX] =
1016 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1017 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1018 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1019 .features[FEAT_8000_0001_EDX] =
1020 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1021 .features[FEAT_8000_0001_ECX] =
1023 .features[FEAT_6_EAX] =
1025 .xlevel = 0x80000008,
1026 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1029 .name = "SandyBridge",
1031 .vendor = CPUID_VENDOR_INTEL,
1035 .features[FEAT_1_EDX] =
1036 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1037 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1038 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1039 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1040 CPUID_DE | CPUID_FP87,
1041 .features[FEAT_1_ECX] =
1042 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1043 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1044 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1045 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1047 .features[FEAT_8000_0001_EDX] =
1048 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1050 .features[FEAT_8000_0001_ECX] =
1052 .features[FEAT_XSAVE] =
1053 CPUID_XSAVE_XSAVEOPT,
1054 .features[FEAT_6_EAX] =
1056 .xlevel = 0x80000008,
1057 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1060 .name = "IvyBridge",
1062 .vendor = CPUID_VENDOR_INTEL,
1066 .features[FEAT_1_EDX] =
1067 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1068 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1069 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1070 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1071 CPUID_DE | CPUID_FP87,
1072 .features[FEAT_1_ECX] =
1073 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1074 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1075 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1076 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1077 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1078 .features[FEAT_7_0_EBX] =
1079 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1081 .features[FEAT_8000_0001_EDX] =
1082 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1084 .features[FEAT_8000_0001_ECX] =
1086 .features[FEAT_XSAVE] =
1087 CPUID_XSAVE_XSAVEOPT,
1088 .features[FEAT_6_EAX] =
1090 .xlevel = 0x80000008,
1091 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1094 .name = "Haswell-noTSX",
1096 .vendor = CPUID_VENDOR_INTEL,
1100 .features[FEAT_1_EDX] =
1101 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1102 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1103 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1104 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1105 CPUID_DE | CPUID_FP87,
1106 .features[FEAT_1_ECX] =
1107 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1108 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1109 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1110 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1111 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1112 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1113 .features[FEAT_8000_0001_EDX] =
1114 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1116 .features[FEAT_8000_0001_ECX] =
1117 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1118 .features[FEAT_7_0_EBX] =
1119 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1120 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1121 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1122 .features[FEAT_XSAVE] =
1123 CPUID_XSAVE_XSAVEOPT,
1124 .features[FEAT_6_EAX] =
1126 .xlevel = 0x80000008,
1127 .model_id = "Intel Core Processor (Haswell, no TSX)",
1131 .vendor = CPUID_VENDOR_INTEL,
1135 .features[FEAT_1_EDX] =
1136 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1137 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1138 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1139 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1140 CPUID_DE | CPUID_FP87,
1141 .features[FEAT_1_ECX] =
1142 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1143 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1144 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1145 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1146 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1147 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1148 .features[FEAT_8000_0001_EDX] =
1149 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1151 .features[FEAT_8000_0001_ECX] =
1152 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1153 .features[FEAT_7_0_EBX] =
1154 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1155 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1156 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1158 .features[FEAT_XSAVE] =
1159 CPUID_XSAVE_XSAVEOPT,
1160 .features[FEAT_6_EAX] =
1162 .xlevel = 0x80000008,
1163 .model_id = "Intel Core Processor (Haswell)",
1166 .name = "Broadwell-noTSX",
1168 .vendor = CPUID_VENDOR_INTEL,
1172 .features[FEAT_1_EDX] =
1173 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1174 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1175 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1176 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1177 CPUID_DE | CPUID_FP87,
1178 .features[FEAT_1_ECX] =
1179 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1180 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1181 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1182 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1183 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1184 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1185 .features[FEAT_8000_0001_EDX] =
1186 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1188 .features[FEAT_8000_0001_ECX] =
1189 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1190 .features[FEAT_7_0_EBX] =
1191 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1192 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1193 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1194 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1196 .features[FEAT_XSAVE] =
1197 CPUID_XSAVE_XSAVEOPT,
1198 .features[FEAT_6_EAX] =
1200 .xlevel = 0x80000008,
1201 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1204 .name = "Broadwell",
1206 .vendor = CPUID_VENDOR_INTEL,
1210 .features[FEAT_1_EDX] =
1211 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1212 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1213 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1214 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1215 CPUID_DE | CPUID_FP87,
1216 .features[FEAT_1_ECX] =
1217 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1218 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1219 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1220 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1221 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1222 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1223 .features[FEAT_8000_0001_EDX] =
1224 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1226 .features[FEAT_8000_0001_ECX] =
1227 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1228 .features[FEAT_7_0_EBX] =
1229 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1230 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1231 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1232 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1234 .features[FEAT_XSAVE] =
1235 CPUID_XSAVE_XSAVEOPT,
1236 .features[FEAT_6_EAX] =
1238 .xlevel = 0x80000008,
1239 .model_id = "Intel Core Processor (Broadwell)",
1242 .name = "Opteron_G1",
1244 .vendor = CPUID_VENDOR_AMD,
1248 .features[FEAT_1_EDX] =
1249 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1250 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1251 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1252 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1253 CPUID_DE | CPUID_FP87,
1254 .features[FEAT_1_ECX] =
1256 .features[FEAT_8000_0001_EDX] =
1257 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1258 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1259 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1260 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1261 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1262 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1263 .xlevel = 0x80000008,
1264 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1267 .name = "Opteron_G2",
1269 .vendor = CPUID_VENDOR_AMD,
1273 .features[FEAT_1_EDX] =
1274 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1275 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1276 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1277 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1278 CPUID_DE | CPUID_FP87,
1279 .features[FEAT_1_ECX] =
1280 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1281 /* Missing: CPUID_EXT2_RDTSCP */
1282 .features[FEAT_8000_0001_EDX] =
1283 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1284 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1285 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1286 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1287 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1288 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1289 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1290 .features[FEAT_8000_0001_ECX] =
1291 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1292 .xlevel = 0x80000008,
1293 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1296 .name = "Opteron_G3",
1298 .vendor = CPUID_VENDOR_AMD,
1302 .features[FEAT_1_EDX] =
1303 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1304 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1305 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1306 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1307 CPUID_DE | CPUID_FP87,
1308 .features[FEAT_1_ECX] =
1309 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1311 /* Missing: CPUID_EXT2_RDTSCP */
1312 .features[FEAT_8000_0001_EDX] =
1313 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1314 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1315 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1316 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1317 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1318 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1319 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1320 .features[FEAT_8000_0001_ECX] =
1321 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1322 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1323 .xlevel = 0x80000008,
1324 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1327 .name = "Opteron_G4",
1329 .vendor = CPUID_VENDOR_AMD,
1333 .features[FEAT_1_EDX] =
1334 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1335 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1336 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1337 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1338 CPUID_DE | CPUID_FP87,
1339 .features[FEAT_1_ECX] =
1340 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1341 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1342 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1344 /* Missing: CPUID_EXT2_RDTSCP */
1345 .features[FEAT_8000_0001_EDX] =
1347 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1348 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1349 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1350 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1351 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1352 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1353 .features[FEAT_8000_0001_ECX] =
1354 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1355 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1356 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1359 .xlevel = 0x8000001A,
1360 .model_id = "AMD Opteron 62xx class CPU",
1363 .name = "Opteron_G5",
1365 .vendor = CPUID_VENDOR_AMD,
1369 .features[FEAT_1_EDX] =
1370 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1371 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1372 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1373 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1374 CPUID_DE | CPUID_FP87,
1375 .features[FEAT_1_ECX] =
1376 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1377 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1378 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1379 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1380 /* Missing: CPUID_EXT2_RDTSCP */
1381 .features[FEAT_8000_0001_EDX] =
1383 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1384 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1385 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1386 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1387 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1388 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1389 .features[FEAT_8000_0001_ECX] =
1390 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1391 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1392 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1395 .xlevel = 0x8000001A,
1396 .model_id = "AMD Opteron 63xx class CPU",
1400 typedef struct PropValue {
1401 const char *prop, *value;
1404 /* KVM-specific features that are automatically added/removed
1405 * from all CPU models when KVM is enabled.
1407 static PropValue kvm_default_props[] = {
1408 { "kvmclock", "on" },
1409 { "kvm-nopiodelay", "on" },
1410 { "kvm-asyncpf", "on" },
1411 { "kvm-steal-time", "on" },
1412 { "kvm-pv-eoi", "on" },
1413 { "kvmclock-stable-bit", "on" },
1416 { "monitor", "off" },
1421 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1424 for (pv = kvm_default_props; pv->prop; pv++) {
1425 if (!strcmp(pv->prop, prop)) {
1431 /* It is valid to call this function only for properties that
1432 * are already present in the kvm_default_props table.
1437 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1438 bool migratable_only);
1442 static int cpu_x86_fill_model_id(char *str)
1444 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1447 for (i = 0; i < 3; i++) {
1448 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1449 memcpy(str + i * 16 + 0, &eax, 4);
1450 memcpy(str + i * 16 + 4, &ebx, 4);
1451 memcpy(str + i * 16 + 8, &ecx, 4);
1452 memcpy(str + i * 16 + 12, &edx, 4);
1457 static X86CPUDefinition host_cpudef;
1459 static Property host_x86_cpu_properties[] = {
1460 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1461 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1462 DEFINE_PROP_END_OF_LIST()
1465 /* class_init for the "host" CPU model
1467 * This function may be called before KVM is initialized.
1469 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1471 DeviceClass *dc = DEVICE_CLASS(oc);
1472 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1473 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1475 xcc->kvm_required = true;
1477 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1478 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1480 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1481 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1482 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1483 host_cpudef.stepping = eax & 0x0F;
1485 cpu_x86_fill_model_id(host_cpudef.model_id);
1487 xcc->cpu_def = &host_cpudef;
1489 /* level, xlevel, xlevel2, and the feature words are initialized on
1490 * instance_init, because they require KVM to be initialized.
1493 dc->props = host_x86_cpu_properties;
1494 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1495 dc->cannot_destroy_with_object_finalize_yet = true;
1498 static void host_x86_cpu_initfn(Object *obj)
1500 X86CPU *cpu = X86_CPU(obj);
1501 CPUX86State *env = &cpu->env;
1502 KVMState *s = kvm_state;
1504 assert(kvm_enabled());
1506 /* We can't fill the features array here because we don't know yet if
1507 * "migratable" is true or false.
1509 cpu->host_features = true;
1511 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1512 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1513 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1515 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1518 static const TypeInfo host_x86_cpu_type_info = {
1519 .name = X86_CPU_TYPE_NAME("host"),
1520 .parent = TYPE_X86_CPU,
1521 .instance_init = host_x86_cpu_initfn,
1522 .class_init = host_x86_cpu_class_init,
1527 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1529 FeatureWordInfo *f = &feature_word_info[w];
1532 for (i = 0; i < 32; ++i) {
1533 if ((1UL << i) & mask) {
1534 const char *reg = get_register_name_32(f->cpuid_reg);
1536 fprintf(stderr, "warning: %s doesn't support requested feature: "
1537 "CPUID.%02XH:%s%s%s [bit %d]\n",
1538 kvm_enabled() ? "host" : "TCG",
1540 f->feat_names[i] ? "." : "",
1541 f->feat_names[i] ? f->feat_names[i] : "", i);
1546 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1547 const char *name, void *opaque,
1550 X86CPU *cpu = X86_CPU(obj);
1551 CPUX86State *env = &cpu->env;
1554 value = (env->cpuid_version >> 8) & 0xf;
1556 value += (env->cpuid_version >> 20) & 0xff;
1558 visit_type_int(v, name, &value, errp);
1561 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1562 const char *name, void *opaque,
1565 X86CPU *cpu = X86_CPU(obj);
1566 CPUX86State *env = &cpu->env;
1567 const int64_t min = 0;
1568 const int64_t max = 0xff + 0xf;
1569 Error *local_err = NULL;
1572 visit_type_int(v, name, &value, &local_err);
1574 error_propagate(errp, local_err);
1577 if (value < min || value > max) {
1578 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1579 name ? name : "null", value, min, max);
1583 env->cpuid_version &= ~0xff00f00;
1585 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1587 env->cpuid_version |= value << 8;
1591 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1592 const char *name, void *opaque,
1595 X86CPU *cpu = X86_CPU(obj);
1596 CPUX86State *env = &cpu->env;
1599 value = (env->cpuid_version >> 4) & 0xf;
1600 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1601 visit_type_int(v, name, &value, errp);
1604 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1605 const char *name, void *opaque,
1608 X86CPU *cpu = X86_CPU(obj);
1609 CPUX86State *env = &cpu->env;
1610 const int64_t min = 0;
1611 const int64_t max = 0xff;
1612 Error *local_err = NULL;
1615 visit_type_int(v, name, &value, &local_err);
1617 error_propagate(errp, local_err);
1620 if (value < min || value > max) {
1621 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1622 name ? name : "null", value, min, max);
1626 env->cpuid_version &= ~0xf00f0;
1627 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1630 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1631 const char *name, void *opaque,
1634 X86CPU *cpu = X86_CPU(obj);
1635 CPUX86State *env = &cpu->env;
1638 value = env->cpuid_version & 0xf;
1639 visit_type_int(v, name, &value, errp);
1642 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1643 const char *name, void *opaque,
1646 X86CPU *cpu = X86_CPU(obj);
1647 CPUX86State *env = &cpu->env;
1648 const int64_t min = 0;
1649 const int64_t max = 0xf;
1650 Error *local_err = NULL;
1653 visit_type_int(v, name, &value, &local_err);
1655 error_propagate(errp, local_err);
1658 if (value < min || value > max) {
1659 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1660 name ? name : "null", value, min, max);
1664 env->cpuid_version &= ~0xf;
1665 env->cpuid_version |= value & 0xf;
1668 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1670 X86CPU *cpu = X86_CPU(obj);
1671 CPUX86State *env = &cpu->env;
1674 value = g_malloc(CPUID_VENDOR_SZ + 1);
1675 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1676 env->cpuid_vendor3);
1680 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1683 X86CPU *cpu = X86_CPU(obj);
1684 CPUX86State *env = &cpu->env;
1687 if (strlen(value) != CPUID_VENDOR_SZ) {
1688 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1692 env->cpuid_vendor1 = 0;
1693 env->cpuid_vendor2 = 0;
1694 env->cpuid_vendor3 = 0;
1695 for (i = 0; i < 4; i++) {
1696 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1697 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1698 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1702 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1704 X86CPU *cpu = X86_CPU(obj);
1705 CPUX86State *env = &cpu->env;
1709 value = g_malloc(48 + 1);
1710 for (i = 0; i < 48; i++) {
1711 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1717 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1720 X86CPU *cpu = X86_CPU(obj);
1721 CPUX86State *env = &cpu->env;
1724 if (model_id == NULL) {
1727 len = strlen(model_id);
1728 memset(env->cpuid_model, 0, 48);
1729 for (i = 0; i < 48; i++) {
1733 c = (uint8_t)model_id[i];
1735 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1739 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1740 void *opaque, Error **errp)
1742 X86CPU *cpu = X86_CPU(obj);
1745 value = cpu->env.tsc_khz * 1000;
1746 visit_type_int(v, name, &value, errp);
1749 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1750 void *opaque, Error **errp)
1752 X86CPU *cpu = X86_CPU(obj);
1753 const int64_t min = 0;
1754 const int64_t max = INT64_MAX;
1755 Error *local_err = NULL;
1758 visit_type_int(v, name, &value, &local_err);
1760 error_propagate(errp, local_err);
1763 if (value < min || value > max) {
1764 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1765 name ? name : "null", value, min, max);
1769 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1772 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
1773 void *opaque, Error **errp)
1775 X86CPU *cpu = X86_CPU(obj);
1776 int64_t value = cpu->apic_id;
1778 visit_type_int(v, name, &value, errp);
1781 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
1782 void *opaque, Error **errp)
1784 X86CPU *cpu = X86_CPU(obj);
1785 DeviceState *dev = DEVICE(obj);
1786 const int64_t min = 0;
1787 const int64_t max = UINT32_MAX;
1788 Error *error = NULL;
1791 if (dev->realized) {
1792 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1793 "it was realized", name, object_get_typename(obj));
1797 visit_type_int(v, name, &value, &error);
1799 error_propagate(errp, error);
1802 if (value < min || value > max) {
1803 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1804 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1805 object_get_typename(obj), name, value, min, max);
1809 if ((value != cpu->apic_id) && cpu_exists(value)) {
1810 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1813 cpu->apic_id = value;
1816 /* Generic getter for "feature-words" and "filtered-features" properties */
1817 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1818 const char *name, void *opaque,
1821 uint32_t *array = (uint32_t *)opaque;
1824 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1825 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1826 X86CPUFeatureWordInfoList *list = NULL;
1828 for (w = 0; w < FEATURE_WORDS; w++) {
1829 FeatureWordInfo *wi = &feature_word_info[w];
1830 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1831 qwi->cpuid_input_eax = wi->cpuid_eax;
1832 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1833 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1834 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1835 qwi->features = array[w];
1837 /* List will be in reverse order, but order shouldn't matter */
1838 list_entries[w].next = list;
1839 list_entries[w].value = &word_infos[w];
1840 list = &list_entries[w];
1843 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, &err);
1844 error_propagate(errp, err);
1847 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1848 void *opaque, Error **errp)
1850 X86CPU *cpu = X86_CPU(obj);
1851 int64_t value = cpu->hyperv_spinlock_attempts;
1853 visit_type_int(v, name, &value, errp);
1856 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1857 void *opaque, Error **errp)
1859 const int64_t min = 0xFFF;
1860 const int64_t max = UINT_MAX;
1861 X86CPU *cpu = X86_CPU(obj);
1865 visit_type_int(v, name, &value, &err);
1867 error_propagate(errp, err);
1871 if (value < min || value > max) {
1872 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1873 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1874 object_get_typename(obj), name ? name : "null",
1878 cpu->hyperv_spinlock_attempts = value;
1881 static PropertyInfo qdev_prop_spinlocks = {
1883 .get = x86_get_hv_spinlocks,
1884 .set = x86_set_hv_spinlocks,
1887 /* Convert all '_' in a feature string option name to '-', to make feature
1888 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1890 static inline void feat2prop(char *s)
1892 while ((s = strchr(s, '_'))) {
1897 /* Parse "+feature,-feature,feature=foo" CPU feature string
1899 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1902 X86CPU *cpu = X86_CPU(cs);
1903 char *featurestr; /* Single 'key=value" string being parsed */
1905 /* Features to be added */
1906 FeatureWordArray plus_features = { 0 };
1907 /* Features to be removed */
1908 FeatureWordArray minus_features = { 0 };
1910 CPUX86State *env = &cpu->env;
1911 Error *local_err = NULL;
1913 featurestr = features ? strtok(features, ",") : NULL;
1915 while (featurestr) {
1917 if (featurestr[0] == '+') {
1918 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1919 } else if (featurestr[0] == '-') {
1920 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1921 } else if ((val = strchr(featurestr, '='))) {
1923 feat2prop(featurestr);
1924 if (!strcmp(featurestr, "xlevel")) {
1928 numvalue = strtoul(val, &err, 0);
1929 if (!*val || *err) {
1930 error_setg(errp, "bad numerical value %s", val);
1933 if (numvalue < 0x80000000) {
1934 error_report("xlevel value shall always be >= 0x80000000"
1935 ", fixup will be removed in future versions");
1936 numvalue += 0x80000000;
1938 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1939 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1940 } else if (!strcmp(featurestr, "tsc-freq")) {
1945 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1946 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1947 if (tsc_freq < 0 || *err) {
1948 error_setg(errp, "bad numerical value %s", val);
1951 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1952 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1954 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1956 const int min = 0xFFF;
1958 numvalue = strtoul(val, &err, 0);
1959 if (!*val || *err) {
1960 error_setg(errp, "bad numerical value %s", val);
1963 if (numvalue < min) {
1964 error_report("hv-spinlocks value shall always be >= 0x%x"
1965 ", fixup will be removed in future versions",
1969 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1970 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1972 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1975 feat2prop(featurestr);
1976 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1979 error_propagate(errp, local_err);
1982 featurestr = strtok(NULL, ",");
1985 if (cpu->host_features) {
1986 for (w = 0; w < FEATURE_WORDS; w++) {
1988 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1992 for (w = 0; w < FEATURE_WORDS; w++) {
1993 env->features[w] |= plus_features[w];
1994 env->features[w] &= ~minus_features[w];
1998 /* Print all cpuid feature names in featureset
2000 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2005 for (bit = 0; bit < 32; bit++) {
2006 if (featureset[bit]) {
2007 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2013 /* generate CPU information. */
2014 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2016 X86CPUDefinition *def;
2020 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2021 def = &builtin_x86_defs[i];
2022 snprintf(buf, sizeof(buf), "%s", def->name);
2023 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2026 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2027 "KVM processor with all supported host features "
2028 "(only available in KVM mode)");
2031 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2032 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2033 FeatureWordInfo *fw = &feature_word_info[i];
2035 (*cpu_fprintf)(f, " ");
2036 listflags(f, cpu_fprintf, fw->feat_names);
2037 (*cpu_fprintf)(f, "\n");
2041 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2043 CpuDefinitionInfoList *cpu_list = NULL;
2044 X86CPUDefinition *def;
2047 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2048 CpuDefinitionInfoList *entry;
2049 CpuDefinitionInfo *info;
2051 def = &builtin_x86_defs[i];
2052 info = g_malloc0(sizeof(*info));
2053 info->name = g_strdup(def->name);
2055 entry = g_malloc0(sizeof(*entry));
2056 entry->value = info;
2057 entry->next = cpu_list;
2064 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2065 bool migratable_only)
2067 FeatureWordInfo *wi = &feature_word_info[w];
2070 if (kvm_enabled()) {
2071 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2074 } else if (tcg_enabled()) {
2075 r = wi->tcg_features;
2079 if (migratable_only) {
2080 r &= x86_cpu_get_migratable_flags(w);
2086 * Filters CPU feature words based on host availability of each feature.
2088 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2090 static int x86_cpu_filter_features(X86CPU *cpu)
2092 CPUX86State *env = &cpu->env;
2096 for (w = 0; w < FEATURE_WORDS; w++) {
2097 uint32_t host_feat =
2098 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2099 uint32_t requested_features = env->features[w];
2100 env->features[w] &= host_feat;
2101 cpu->filtered_features[w] = requested_features & ~env->features[w];
2102 if (cpu->filtered_features[w]) {
2103 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2104 report_unavailable_features(w, cpu->filtered_features[w]);
2113 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2116 for (pv = props; pv->prop; pv++) {
2120 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2125 /* Load data from X86CPUDefinition
2127 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2129 CPUX86State *env = &cpu->env;
2131 char host_vendor[CPUID_VENDOR_SZ + 1];
2134 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2135 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2136 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2137 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2138 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2139 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2140 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2141 for (w = 0; w < FEATURE_WORDS; w++) {
2142 env->features[w] = def->features[w];
2145 /* Special cases not set in the X86CPUDefinition structs: */
2146 if (kvm_enabled()) {
2147 if (!kvm_irqchip_in_kernel()) {
2148 x86_cpu_change_kvm_default("x2apic", "off");
2151 x86_cpu_apply_props(cpu, kvm_default_props);
2154 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2156 /* sysenter isn't supported in compatibility mode on AMD,
2157 * syscall isn't supported in compatibility mode on Intel.
2158 * Normally we advertise the actual CPU vendor, but you can
2159 * override this using the 'vendor' property if you want to use
2160 * KVM's sysenter/syscall emulation in compatibility mode and
2161 * when doing cross vendor migration
2163 vendor = def->vendor;
2164 if (kvm_enabled()) {
2165 uint32_t ebx = 0, ecx = 0, edx = 0;
2166 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2167 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2168 vendor = host_vendor;
2171 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2175 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2180 gchar **model_pieces;
2181 char *name, *features;
2182 Error *error = NULL;
2184 model_pieces = g_strsplit(cpu_model, ",", 2);
2185 if (!model_pieces[0]) {
2186 error_setg(&error, "Invalid/empty CPU model name");
2189 name = model_pieces[0];
2190 features = model_pieces[1];
2192 oc = x86_cpu_class_by_name(name);
2194 error_setg(&error, "Unable to find CPU definition: %s", name);
2197 xcc = X86_CPU_CLASS(oc);
2199 if (xcc->kvm_required && !kvm_enabled()) {
2200 error_setg(&error, "CPU model '%s' requires KVM", name);
2204 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2206 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2212 if (error != NULL) {
2213 error_propagate(errp, error);
2215 object_unref(OBJECT(cpu));
2219 g_strfreev(model_pieces);
2223 X86CPU *cpu_x86_init(const char *cpu_model)
2225 Error *error = NULL;
2228 cpu = cpu_x86_create(cpu_model, &error);
2233 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2237 error_report_err(error);
2239 object_unref(OBJECT(cpu));
2246 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2248 X86CPUDefinition *cpudef = data;
2249 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2251 xcc->cpu_def = cpudef;
2254 static void x86_register_cpudef_type(X86CPUDefinition *def)
2256 char *typename = x86_cpu_type_name(def->name);
2259 .parent = TYPE_X86_CPU,
2260 .class_init = x86_cpu_cpudef_class_init,
2268 #if !defined(CONFIG_USER_ONLY)
2270 void cpu_clear_apic_feature(CPUX86State *env)
2272 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2275 #endif /* !CONFIG_USER_ONLY */
2277 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2278 uint32_t *eax, uint32_t *ebx,
2279 uint32_t *ecx, uint32_t *edx)
2281 X86CPU *cpu = x86_env_get_cpu(env);
2282 CPUState *cs = CPU(cpu);
2284 /* test if maximum index reached */
2285 if (index & 0x80000000) {
2286 if (index > env->cpuid_xlevel) {
2287 if (env->cpuid_xlevel2 > 0) {
2288 /* Handle the Centaur's CPUID instruction. */
2289 if (index > env->cpuid_xlevel2) {
2290 index = env->cpuid_xlevel2;
2291 } else if (index < 0xC0000000) {
2292 index = env->cpuid_xlevel;
2295 /* Intel documentation states that invalid EAX input will
2296 * return the same information as EAX=cpuid_level
2297 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2299 index = env->cpuid_level;
2303 if (index > env->cpuid_level)
2304 index = env->cpuid_level;
2309 *eax = env->cpuid_level;
2310 *ebx = env->cpuid_vendor1;
2311 *edx = env->cpuid_vendor2;
2312 *ecx = env->cpuid_vendor3;
2315 *eax = env->cpuid_version;
2316 *ebx = (cpu->apic_id << 24) |
2317 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2318 *ecx = env->features[FEAT_1_ECX];
2319 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2320 *ecx |= CPUID_EXT_OSXSAVE;
2322 *edx = env->features[FEAT_1_EDX];
2323 if (cs->nr_cores * cs->nr_threads > 1) {
2324 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2329 /* cache info: needed for Pentium Pro compatibility */
2330 if (cpu->cache_info_passthrough) {
2331 host_cpuid(index, 0, eax, ebx, ecx, edx);
2334 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2337 *edx = (L1D_DESCRIPTOR << 16) | \
2338 (L1I_DESCRIPTOR << 8) | \
2342 /* cache info: needed for Core compatibility */
2343 if (cpu->cache_info_passthrough) {
2344 host_cpuid(index, count, eax, ebx, ecx, edx);
2345 *eax &= ~0xFC000000;
2349 case 0: /* L1 dcache info */
2350 *eax |= CPUID_4_TYPE_DCACHE | \
2351 CPUID_4_LEVEL(1) | \
2352 CPUID_4_SELF_INIT_LEVEL;
2353 *ebx = (L1D_LINE_SIZE - 1) | \
2354 ((L1D_PARTITIONS - 1) << 12) | \
2355 ((L1D_ASSOCIATIVITY - 1) << 22);
2356 *ecx = L1D_SETS - 1;
2357 *edx = CPUID_4_NO_INVD_SHARING;
2359 case 1: /* L1 icache info */
2360 *eax |= CPUID_4_TYPE_ICACHE | \
2361 CPUID_4_LEVEL(1) | \
2362 CPUID_4_SELF_INIT_LEVEL;
2363 *ebx = (L1I_LINE_SIZE - 1) | \
2364 ((L1I_PARTITIONS - 1) << 12) | \
2365 ((L1I_ASSOCIATIVITY - 1) << 22);
2366 *ecx = L1I_SETS - 1;
2367 *edx = CPUID_4_NO_INVD_SHARING;
2369 case 2: /* L2 cache info */
2370 *eax |= CPUID_4_TYPE_UNIFIED | \
2371 CPUID_4_LEVEL(2) | \
2372 CPUID_4_SELF_INIT_LEVEL;
2373 if (cs->nr_threads > 1) {
2374 *eax |= (cs->nr_threads - 1) << 14;
2376 *ebx = (L2_LINE_SIZE - 1) | \
2377 ((L2_PARTITIONS - 1) << 12) | \
2378 ((L2_ASSOCIATIVITY - 1) << 22);
2380 *edx = CPUID_4_NO_INVD_SHARING;
2382 default: /* end of info */
2391 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2392 if ((*eax & 31) && cs->nr_cores > 1) {
2393 *eax |= (cs->nr_cores - 1) << 26;
2397 /* mwait info: needed for Core compatibility */
2398 *eax = 0; /* Smallest monitor-line size in bytes */
2399 *ebx = 0; /* Largest monitor-line size in bytes */
2400 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2404 /* Thermal and Power Leaf */
2405 *eax = env->features[FEAT_6_EAX];
2411 /* Structured Extended Feature Flags Enumeration Leaf */
2413 *eax = 0; /* Maximum ECX value for sub-leaves */
2414 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2415 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2416 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2417 *ecx |= CPUID_7_0_ECX_OSPKE;
2419 *edx = 0; /* Reserved */
2428 /* Direct Cache Access Information Leaf */
2429 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2435 /* Architectural Performance Monitoring Leaf */
2436 if (kvm_enabled() && cpu->enable_pmu) {
2437 KVMState *s = cs->kvm_state;
2439 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2440 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2441 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2442 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2451 KVMState *s = cs->kvm_state;
2455 /* Processor Extended State */
2460 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2463 if (kvm_enabled()) {
2464 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2466 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2473 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2474 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2475 if ((env->features[esa->feature] & esa->bits) == esa->bits
2476 && ((ena_mask >> i) & 1) != 0) {
2480 *edx |= 1u << (i - 32);
2482 *ecx = MAX(*ecx, esa->offset + esa->size);
2485 *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2487 } else if (count == 1) {
2488 *eax = env->features[FEAT_XSAVE];
2489 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2490 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2491 if ((env->features[esa->feature] & esa->bits) == esa->bits
2492 && ((ena_mask >> count) & 1) != 0) {
2500 *eax = env->cpuid_xlevel;
2501 *ebx = env->cpuid_vendor1;
2502 *edx = env->cpuid_vendor2;
2503 *ecx = env->cpuid_vendor3;
2506 *eax = env->cpuid_version;
2508 *ecx = env->features[FEAT_8000_0001_ECX];
2509 *edx = env->features[FEAT_8000_0001_EDX];
2511 /* The Linux kernel checks for the CMPLegacy bit and
2512 * discards multiple thread information if it is set.
2513 * So don't set it here for Intel to make Linux guests happy.
2515 if (cs->nr_cores * cs->nr_threads > 1) {
2516 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2517 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2518 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2519 *ecx |= 1 << 1; /* CmpLegacy bit */
2526 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2527 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2528 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2529 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2532 /* cache info (L1 cache) */
2533 if (cpu->cache_info_passthrough) {
2534 host_cpuid(index, 0, eax, ebx, ecx, edx);
2537 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2538 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2539 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2540 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2541 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2542 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2543 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2544 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2547 /* cache info (L2 cache) */
2548 if (cpu->cache_info_passthrough) {
2549 host_cpuid(index, 0, eax, ebx, ecx, edx);
2552 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2553 (L2_DTLB_2M_ENTRIES << 16) | \
2554 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2555 (L2_ITLB_2M_ENTRIES);
2556 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2557 (L2_DTLB_4K_ENTRIES << 16) | \
2558 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2559 (L2_ITLB_4K_ENTRIES);
2560 *ecx = (L2_SIZE_KB_AMD << 16) | \
2561 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2562 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2563 *edx = ((L3_SIZE_KB/512) << 18) | \
2564 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2565 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2571 *edx = env->features[FEAT_8000_0007_EDX];
2574 /* virtual & phys address size in low 2 bytes. */
2575 /* XXX: This value must match the one used in the MMU code. */
2576 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2577 /* 64 bit processor */
2578 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2579 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2581 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2582 *eax = 0x00000024; /* 36 bits physical */
2584 *eax = 0x00000020; /* 32 bits physical */
2590 if (cs->nr_cores * cs->nr_threads > 1) {
2591 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2595 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2596 *eax = 0x00000001; /* SVM Revision */
2597 *ebx = 0x00000010; /* nr of ASIDs */
2599 *edx = env->features[FEAT_SVM]; /* optional features */
2608 *eax = env->cpuid_xlevel2;
2614 /* Support for VIA CPU's CPUID instruction */
2615 *eax = env->cpuid_version;
2618 *edx = env->features[FEAT_C000_0001_EDX];
2623 /* Reserved for the future, and now filled with zero */
2630 /* reserved values: zero */
2639 /* CPUClass::reset() */
2640 static void x86_cpu_reset(CPUState *s)
2642 X86CPU *cpu = X86_CPU(s);
2643 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2644 CPUX86State *env = &cpu->env;
2649 xcc->parent_reset(s);
2651 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2655 env->old_exception = -1;
2657 /* init to reset state */
2659 #ifdef CONFIG_SOFTMMU
2660 env->hflags |= HF_SOFTMMU_MASK;
2662 env->hflags2 |= HF2_GIF_MASK;
2664 cpu_x86_update_cr0(env, 0x60000010);
2665 env->a20_mask = ~0x0;
2666 env->smbase = 0x30000;
2668 env->idt.limit = 0xffff;
2669 env->gdt.limit = 0xffff;
2670 env->ldt.limit = 0xffff;
2671 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2672 env->tr.limit = 0xffff;
2673 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2675 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2676 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2677 DESC_R_MASK | DESC_A_MASK);
2678 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2679 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2681 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2682 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2684 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2685 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2687 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2688 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2690 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2691 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2695 env->regs[R_EDX] = env->cpuid_version;
2700 for (i = 0; i < 8; i++) {
2703 cpu_set_fpuc(env, 0x37f);
2705 env->mxcsr = 0x1f80;
2706 /* All units are in INIT state. */
2709 env->pat = 0x0007040600070406ULL;
2710 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2712 memset(env->dr, 0, sizeof(env->dr));
2713 env->dr[6] = DR6_FIXED_1;
2714 env->dr[7] = DR7_FIXED_1;
2715 cpu_breakpoint_remove_all(s, BP_CPU);
2716 cpu_watchpoint_remove_all(s, BP_CPU);
2719 xcr0 = XSTATE_FP_MASK;
2721 #ifdef CONFIG_USER_ONLY
2722 /* Enable all the features for user-mode. */
2723 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2724 xcr0 |= XSTATE_SSE_MASK;
2726 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2727 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2728 if ((env->features[esa->feature] & esa->bits) == esa->bits) {
2733 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2734 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2736 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2737 cr4 |= CR4_FSGSBASE_MASK;
2742 cpu_x86_update_cr4(env, cr4);
2745 * SDM 11.11.5 requires:
2746 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2747 * - IA32_MTRR_PHYSMASKn.V = 0
2748 * All other bits are undefined. For simplification, zero it all.
2750 env->mtrr_deftype = 0;
2751 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2752 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2754 #if !defined(CONFIG_USER_ONLY)
2755 /* We hard-wire the BSP to the first CPU. */
2756 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2758 s->halted = !cpu_is_bsp(cpu);
2760 if (kvm_enabled()) {
2761 kvm_arch_reset_vcpu(cpu);
2766 #ifndef CONFIG_USER_ONLY
2767 bool cpu_is_bsp(X86CPU *cpu)
2769 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2772 /* TODO: remove me, when reset over QOM tree is implemented */
2773 static void x86_cpu_machine_reset_cb(void *opaque)
2775 X86CPU *cpu = opaque;
2776 cpu_reset(CPU(cpu));
2780 static void mce_init(X86CPU *cpu)
2782 CPUX86State *cenv = &cpu->env;
2785 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2786 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2787 (CPUID_MCE | CPUID_MCA)) {
2788 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2789 cenv->mcg_ctl = ~(uint64_t)0;
2790 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2791 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2796 #ifndef CONFIG_USER_ONLY
2797 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2799 APICCommonState *apic;
2800 const char *apic_type = "apic";
2802 if (kvm_apic_in_kernel()) {
2803 apic_type = "kvm-apic";
2804 } else if (xen_enabled()) {
2805 apic_type = "xen-apic";
2808 cpu->apic_state = DEVICE(object_new(apic_type));
2810 object_property_add_child(OBJECT(cpu), "apic",
2811 OBJECT(cpu->apic_state), NULL);
2812 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2813 /* TODO: convert to link<> */
2814 apic = APIC_COMMON(cpu->apic_state);
2816 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2819 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2821 APICCommonState *apic;
2822 static bool apic_mmio_map_once;
2824 if (cpu->apic_state == NULL) {
2827 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2830 /* Map APIC MMIO area */
2831 apic = APIC_COMMON(cpu->apic_state);
2832 if (!apic_mmio_map_once) {
2833 memory_region_add_subregion_overlap(get_system_memory(),
2835 MSR_IA32_APICBASE_BASE,
2838 apic_mmio_map_once = true;
2842 static void x86_cpu_machine_done(Notifier *n, void *unused)
2844 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2845 MemoryRegion *smram =
2846 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2849 cpu->smram = g_new(MemoryRegion, 1);
2850 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2851 smram, 0, 1ull << 32);
2852 memory_region_set_enabled(cpu->smram, false);
2853 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2857 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2863 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2864 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2865 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2866 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2867 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2868 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2869 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2871 CPUState *cs = CPU(dev);
2872 X86CPU *cpu = X86_CPU(dev);
2873 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2874 CPUX86State *env = &cpu->env;
2875 Error *local_err = NULL;
2876 static bool ht_warned;
2878 if (cpu->apic_id < 0) {
2879 error_setg(errp, "apic-id property was not initialized properly");
2883 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2884 env->cpuid_level = 7;
2887 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2888 error_setg(&local_err,
2890 "Host doesn't support requested features" :
2891 "TCG doesn't support requested features");
2895 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2898 if (IS_AMD_CPU(env)) {
2899 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2900 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2901 & CPUID_EXT2_AMD_ALIASES);
2905 cpu_exec_init(cs, &error_abort);
2907 if (tcg_enabled()) {
2911 #ifndef CONFIG_USER_ONLY
2912 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2914 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2915 x86_cpu_apic_create(cpu, &local_err);
2916 if (local_err != NULL) {
2924 #ifndef CONFIG_USER_ONLY
2925 if (tcg_enabled()) {
2926 AddressSpace *newas = g_new(AddressSpace, 1);
2928 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2929 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2931 /* Outer container... */
2932 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2933 memory_region_set_enabled(cpu->cpu_as_root, true);
2935 /* ... with two regions inside: normal system memory with low
2938 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2939 get_system_memory(), 0, ~0ull);
2940 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2941 memory_region_set_enabled(cpu->cpu_as_mem, true);
2942 address_space_init(newas, cpu->cpu_as_root, "CPU");
2944 cpu_address_space_init(cs, newas, 0);
2946 /* ... SMRAM with higher priority, linked from /machine/smram. */
2947 cpu->machine_done.notify = x86_cpu_machine_done;
2948 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2954 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2955 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2956 * based on inputs (sockets,cores,threads), it is still better to gives
2959 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2960 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2962 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2963 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2964 " -smp options properly.");
2968 x86_cpu_apic_realize(cpu, &local_err);
2969 if (local_err != NULL) {
2974 xcc->parent_realize(dev, &local_err);
2977 if (local_err != NULL) {
2978 error_propagate(errp, local_err);
2983 typedef struct BitProperty {
2988 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
2989 void *opaque, Error **errp)
2991 BitProperty *fp = opaque;
2992 bool value = (*fp->ptr & fp->mask) == fp->mask;
2993 visit_type_bool(v, name, &value, errp);
2996 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
2997 void *opaque, Error **errp)
2999 DeviceState *dev = DEVICE(obj);
3000 BitProperty *fp = opaque;
3001 Error *local_err = NULL;
3004 if (dev->realized) {
3005 qdev_prop_set_after_realize(dev, name, errp);
3009 visit_type_bool(v, name, &value, &local_err);
3011 error_propagate(errp, local_err);
3016 *fp->ptr |= fp->mask;
3018 *fp->ptr &= ~fp->mask;
3022 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3025 BitProperty *prop = opaque;
3029 /* Register a boolean property to get/set a single bit in a uint32_t field.
3031 * The same property name can be registered multiple times to make it affect
3032 * multiple bits in the same FeatureWord. In that case, the getter will return
3033 * true only if all bits are set.
3035 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3036 const char *prop_name,
3042 uint32_t mask = (1UL << bitnr);
3044 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3047 assert(fp->ptr == field);
3050 fp = g_new0(BitProperty, 1);
3053 object_property_add(OBJECT(cpu), prop_name, "bool",
3054 x86_cpu_get_bit_prop,
3055 x86_cpu_set_bit_prop,
3056 x86_cpu_release_bit_prop, fp, &error_abort);
3060 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3064 Object *obj = OBJECT(cpu);
3067 FeatureWordInfo *fi = &feature_word_info[w];
3069 if (!fi->feat_names) {
3072 if (!fi->feat_names[bitnr]) {
3076 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3078 feat2prop(names[0]);
3079 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3081 for (i = 1; names[i]; i++) {
3082 feat2prop(names[i]);
3083 object_property_add_alias(obj, names[i], obj, names[0],
3090 static void x86_cpu_initfn(Object *obj)
3092 CPUState *cs = CPU(obj);
3093 X86CPU *cpu = X86_CPU(obj);
3094 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3095 CPUX86State *env = &cpu->env;
3100 object_property_add(obj, "family", "int",
3101 x86_cpuid_version_get_family,
3102 x86_cpuid_version_set_family, NULL, NULL, NULL);
3103 object_property_add(obj, "model", "int",
3104 x86_cpuid_version_get_model,
3105 x86_cpuid_version_set_model, NULL, NULL, NULL);
3106 object_property_add(obj, "stepping", "int",
3107 x86_cpuid_version_get_stepping,
3108 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3109 object_property_add_str(obj, "vendor",
3110 x86_cpuid_get_vendor,
3111 x86_cpuid_set_vendor, NULL);
3112 object_property_add_str(obj, "model-id",
3113 x86_cpuid_get_model_id,
3114 x86_cpuid_set_model_id, NULL);
3115 object_property_add(obj, "tsc-frequency", "int",
3116 x86_cpuid_get_tsc_freq,
3117 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3118 object_property_add(obj, "apic-id", "int",
3119 x86_cpuid_get_apic_id,
3120 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3121 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3122 x86_cpu_get_feature_words,
3123 NULL, NULL, (void *)env->features, NULL);
3124 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3125 x86_cpu_get_feature_words,
3126 NULL, NULL, (void *)cpu->filtered_features, NULL);
3128 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3130 #ifndef CONFIG_USER_ONLY
3131 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3135 for (w = 0; w < FEATURE_WORDS; w++) {
3138 for (bitnr = 0; bitnr < 32; bitnr++) {
3139 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3143 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3146 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3148 X86CPU *cpu = X86_CPU(cs);
3150 return cpu->apic_id;
3153 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3155 X86CPU *cpu = X86_CPU(cs);
3157 return cpu->env.cr[0] & CR0_PG_MASK;
3160 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3162 X86CPU *cpu = X86_CPU(cs);
3164 cpu->env.eip = value;
3167 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3169 X86CPU *cpu = X86_CPU(cs);
3171 cpu->env.eip = tb->pc - tb->cs_base;
3174 static bool x86_cpu_has_work(CPUState *cs)
3176 X86CPU *cpu = X86_CPU(cs);
3177 CPUX86State *env = &cpu->env;
3179 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3180 CPU_INTERRUPT_POLL)) &&
3181 (env->eflags & IF_MASK)) ||
3182 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3183 CPU_INTERRUPT_INIT |
3184 CPU_INTERRUPT_SIPI |
3185 CPU_INTERRUPT_MCE)) ||
3186 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3187 !(env->hflags & HF_SMM_MASK));
3190 static Property x86_cpu_properties[] = {
3191 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3192 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3193 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3194 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3195 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3196 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3197 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3198 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3199 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3200 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3201 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3202 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3203 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3204 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3205 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3206 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3207 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3208 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3209 DEFINE_PROP_END_OF_LIST()
3212 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3214 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3215 CPUClass *cc = CPU_CLASS(oc);
3216 DeviceClass *dc = DEVICE_CLASS(oc);
3218 xcc->parent_realize = dc->realize;
3219 dc->realize = x86_cpu_realizefn;
3220 dc->props = x86_cpu_properties;
3222 xcc->parent_reset = cc->reset;
3223 cc->reset = x86_cpu_reset;
3224 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3226 cc->class_by_name = x86_cpu_class_by_name;
3227 cc->parse_features = x86_cpu_parse_featurestr;
3228 cc->has_work = x86_cpu_has_work;
3229 cc->do_interrupt = x86_cpu_do_interrupt;
3230 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3231 cc->dump_state = x86_cpu_dump_state;
3232 cc->set_pc = x86_cpu_set_pc;
3233 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3234 cc->gdb_read_register = x86_cpu_gdb_read_register;
3235 cc->gdb_write_register = x86_cpu_gdb_write_register;
3236 cc->get_arch_id = x86_cpu_get_arch_id;
3237 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3238 #ifdef CONFIG_USER_ONLY
3239 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3241 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3242 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3243 cc->write_elf64_note = x86_cpu_write_elf64_note;
3244 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3245 cc->write_elf32_note = x86_cpu_write_elf32_note;
3246 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3247 cc->vmsd = &vmstate_x86_cpu;
3249 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3250 #ifndef CONFIG_USER_ONLY
3251 cc->debug_excp_handler = breakpoint_handler;
3253 cc->cpu_exec_enter = x86_cpu_exec_enter;
3254 cc->cpu_exec_exit = x86_cpu_exec_exit;
3257 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3258 * object in cpus -> dangling pointer after final object_unref().
3260 dc->cannot_destroy_with_object_finalize_yet = true;
3263 static const TypeInfo x86_cpu_type_info = {
3264 .name = TYPE_X86_CPU,
3266 .instance_size = sizeof(X86CPU),
3267 .instance_init = x86_cpu_initfn,
3269 .class_size = sizeof(X86CPUClass),
3270 .class_init = x86_cpu_common_class_init,
3273 static void x86_cpu_register_types(void)
3277 type_register_static(&x86_cpu_type_info);
3278 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3279 x86_register_cpudef_type(&builtin_x86_defs[i]);
3282 type_register_static(&host_x86_cpu_type_info);
3286 type_init(x86_cpu_register_types)