2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
22 #include "sysemu/kvm.h"
23 #include "sysemu/cpus.h"
26 #include "qemu/error-report.h"
27 #include "qemu/option.h"
28 #include "qemu/config-file.h"
29 #include "qapi/qmp/qerror.h"
31 #include "qapi-types.h"
32 #include "qapi-visit.h"
33 #include "qapi/visitor.h"
34 #include "sysemu/arch_init.h"
37 #if defined(CONFIG_KVM)
38 #include <linux/kvm_para.h>
41 #include "sysemu/sysemu.h"
42 #include "hw/qdev-properties.h"
43 #ifndef CONFIG_USER_ONLY
44 #include "exec/address-spaces.h"
45 #include "hw/xen/xen.h"
46 #include "hw/i386/apic_internal.h"
50 /* Cache topology CPUID constants: */
52 /* CPUID Leaf 2 Descriptors */
54 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
55 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
56 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
59 /* CPUID Leaf 4 constants: */
62 #define CPUID_4_TYPE_DCACHE 1
63 #define CPUID_4_TYPE_ICACHE 2
64 #define CPUID_4_TYPE_UNIFIED 3
66 #define CPUID_4_LEVEL(l) ((l) << 5)
68 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
69 #define CPUID_4_FULLY_ASSOC (1 << 9)
72 #define CPUID_4_NO_INVD_SHARING (1 << 0)
73 #define CPUID_4_INCLUSIVE (1 << 1)
74 #define CPUID_4_COMPLEX_IDX (1 << 2)
76 #define ASSOC_FULL 0xFF
78 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
79 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
89 a == ASSOC_FULL ? 0xF : \
90 0 /* invalid value */)
93 /* Definitions of the hardcoded cache entries we expose: */
96 #define L1D_LINE_SIZE 64
97 #define L1D_ASSOCIATIVITY 8
99 #define L1D_PARTITIONS 1
100 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
101 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
102 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
103 #define L1D_LINES_PER_TAG 1
104 #define L1D_SIZE_KB_AMD 64
105 #define L1D_ASSOCIATIVITY_AMD 2
107 /* L1 instruction cache: */
108 #define L1I_LINE_SIZE 64
109 #define L1I_ASSOCIATIVITY 8
111 #define L1I_PARTITIONS 1
112 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
113 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
114 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
115 #define L1I_LINES_PER_TAG 1
116 #define L1I_SIZE_KB_AMD 64
117 #define L1I_ASSOCIATIVITY_AMD 2
119 /* Level 2 unified cache: */
120 #define L2_LINE_SIZE 64
121 #define L2_ASSOCIATIVITY 16
123 #define L2_PARTITIONS 1
124 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
125 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
126 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
127 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
128 #define L2_LINES_PER_TAG 1
129 #define L2_SIZE_KB_AMD 512
132 #define L3_SIZE_KB 0 /* disabled */
133 #define L3_ASSOCIATIVITY 0 /* disabled */
134 #define L3_LINES_PER_TAG 0 /* disabled */
135 #define L3_LINE_SIZE 0 /* disabled */
137 /* TLB definitions: */
139 #define L1_DTLB_2M_ASSOC 1
140 #define L1_DTLB_2M_ENTRIES 255
141 #define L1_DTLB_4K_ASSOC 1
142 #define L1_DTLB_4K_ENTRIES 255
144 #define L1_ITLB_2M_ASSOC 1
145 #define L1_ITLB_2M_ENTRIES 255
146 #define L1_ITLB_4K_ASSOC 1
147 #define L1_ITLB_4K_ENTRIES 255
149 #define L2_DTLB_2M_ASSOC 0 /* disabled */
150 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
151 #define L2_DTLB_4K_ASSOC 4
152 #define L2_DTLB_4K_ENTRIES 512
154 #define L2_ITLB_2M_ASSOC 0 /* disabled */
155 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
156 #define L2_ITLB_4K_ASSOC 4
157 #define L2_ITLB_4K_ENTRIES 512
161 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
162 uint32_t vendor2, uint32_t vendor3)
165 for (i = 0; i < 4; i++) {
166 dst[i] = vendor1 >> (8 * i);
167 dst[i + 4] = vendor2 >> (8 * i);
168 dst[i + 8] = vendor3 >> (8 * i);
170 dst[CPUID_VENDOR_SZ] = '\0';
173 /* feature flags taken from "Intel Processor Identification and the CPUID
174 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
175 * between feature naming conventions, aliases may be added.
177 static const char *feature_name[] = {
178 "fpu", "vme", "de", "pse",
179 "tsc", "msr", "pae", "mce",
180 "cx8", "apic", NULL, "sep",
181 "mtrr", "pge", "mca", "cmov",
182 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
183 NULL, "ds" /* Intel dts */, "acpi", "mmx",
184 "fxsr", "sse", "sse2", "ss",
185 "ht" /* Intel htt */, "tm", "ia64", "pbe",
187 static const char *ext_feature_name[] = {
188 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
189 "ds_cpl", "vmx", "smx", "est",
190 "tm2", "ssse3", "cid", NULL,
191 "fma", "cx16", "xtpr", "pdcm",
192 NULL, "pcid", "dca", "sse4.1|sse4_1",
193 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
194 "tsc-deadline", "aes", "xsave", "osxsave",
195 "avx", "f16c", "rdrand", "hypervisor",
197 /* Feature names that are already defined on feature_name[] but are set on
198 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
199 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
200 * if and only if CPU vendor is AMD.
202 static const char *ext2_feature_name[] = {
203 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
204 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
205 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
206 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
207 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
208 "nx|xd", NULL, "mmxext", NULL /* mmx */,
209 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
210 NULL, "lm|i64", "3dnowext", "3dnow",
212 static const char *ext3_feature_name[] = {
213 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
214 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
215 "3dnowprefetch", "osvw", "ibs", "xop",
216 "skinit", "wdt", NULL, "lwp",
217 "fma4", "tce", NULL, "nodeid_msr",
218 NULL, "tbm", "topoext", "perfctr_core",
219 "perfctr_nb", NULL, NULL, NULL,
220 NULL, NULL, NULL, NULL,
223 static const char *ext4_feature_name[] = {
224 NULL, NULL, "xstore", "xstore-en",
225 NULL, NULL, "xcrypt", "xcrypt-en",
226 "ace2", "ace2-en", "phe", "phe-en",
227 "pmm", "pmm-en", NULL, NULL,
228 NULL, NULL, NULL, NULL,
229 NULL, NULL, NULL, NULL,
230 NULL, NULL, NULL, NULL,
231 NULL, NULL, NULL, NULL,
234 static const char *kvm_feature_name[] = {
235 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
236 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
237 NULL, NULL, NULL, NULL,
238 NULL, NULL, NULL, NULL,
239 NULL, NULL, NULL, NULL,
240 NULL, NULL, NULL, NULL,
241 "kvmclock-stable-bit", NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
245 static const char *svm_feature_name[] = {
246 "npt", "lbrv", "svm_lock", "nrip_save",
247 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
248 NULL, NULL, "pause_filter", NULL,
249 "pfthreshold", NULL, NULL, NULL,
250 NULL, NULL, NULL, NULL,
251 NULL, NULL, NULL, NULL,
252 NULL, NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
256 static const char *cpuid_7_0_ebx_feature_name[] = {
257 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
258 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
259 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
260 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
263 static const char *cpuid_7_0_ecx_feature_name[] = {
264 NULL, NULL, NULL, "pku",
265 "ospke", NULL, NULL, NULL,
266 NULL, NULL, NULL, NULL,
267 NULL, NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
274 static const char *cpuid_apm_edx_feature_name[] = {
275 NULL, NULL, NULL, NULL,
276 NULL, NULL, NULL, NULL,
277 "invtsc", NULL, NULL, NULL,
278 NULL, NULL, NULL, NULL,
279 NULL, NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
285 static const char *cpuid_xsave_feature_name[] = {
286 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
287 NULL, NULL, NULL, NULL,
288 NULL, NULL, NULL, NULL,
289 NULL, NULL, NULL, NULL,
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
296 static const char *cpuid_6_feature_name[] = {
297 NULL, NULL, "arat", NULL,
298 NULL, NULL, NULL, NULL,
299 NULL, NULL, NULL, NULL,
300 NULL, NULL, NULL, NULL,
301 NULL, NULL, NULL, NULL,
302 NULL, NULL, NULL, NULL,
303 NULL, NULL, NULL, NULL,
304 NULL, NULL, NULL, NULL,
307 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
308 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
309 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
310 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
311 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
312 CPUID_PSE36 | CPUID_FXSR)
313 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
314 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
315 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
316 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
317 CPUID_PAE | CPUID_SEP | CPUID_APIC)
319 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
320 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
321 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
322 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
323 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
324 /* partly implemented:
325 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
327 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
328 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
329 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
330 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
331 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
332 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
334 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
335 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
336 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
337 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
338 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
341 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
343 #define TCG_EXT2_X86_64_FEATURES 0
346 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
347 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
348 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
349 TCG_EXT2_X86_64_FEATURES)
350 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
351 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
352 #define TCG_EXT4_FEATURES 0
353 #define TCG_SVM_FEATURES 0
354 #define TCG_KVM_FEATURES 0
355 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
356 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
357 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
358 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE)
360 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
361 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
362 CPUID_7_0_EBX_RDSEED */
363 #define TCG_7_0_ECX_FEATURES 0
364 #define TCG_APM_FEATURES 0
365 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
366 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
368 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
370 typedef struct FeatureWordInfo {
371 const char **feat_names;
372 uint32_t cpuid_eax; /* Input EAX for CPUID */
373 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
374 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
375 int cpuid_reg; /* output register (R_* constant) */
376 uint32_t tcg_features; /* Feature flags supported by TCG */
377 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
380 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
382 .feat_names = feature_name,
383 .cpuid_eax = 1, .cpuid_reg = R_EDX,
384 .tcg_features = TCG_FEATURES,
387 .feat_names = ext_feature_name,
388 .cpuid_eax = 1, .cpuid_reg = R_ECX,
389 .tcg_features = TCG_EXT_FEATURES,
391 [FEAT_8000_0001_EDX] = {
392 .feat_names = ext2_feature_name,
393 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
394 .tcg_features = TCG_EXT2_FEATURES,
396 [FEAT_8000_0001_ECX] = {
397 .feat_names = ext3_feature_name,
398 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
399 .tcg_features = TCG_EXT3_FEATURES,
401 [FEAT_C000_0001_EDX] = {
402 .feat_names = ext4_feature_name,
403 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
404 .tcg_features = TCG_EXT4_FEATURES,
407 .feat_names = kvm_feature_name,
408 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
409 .tcg_features = TCG_KVM_FEATURES,
412 .feat_names = svm_feature_name,
413 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
414 .tcg_features = TCG_SVM_FEATURES,
417 .feat_names = cpuid_7_0_ebx_feature_name,
419 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
421 .tcg_features = TCG_7_0_EBX_FEATURES,
424 .feat_names = cpuid_7_0_ecx_feature_name,
426 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
428 .tcg_features = TCG_7_0_ECX_FEATURES,
430 [FEAT_8000_0007_EDX] = {
431 .feat_names = cpuid_apm_edx_feature_name,
432 .cpuid_eax = 0x80000007,
434 .tcg_features = TCG_APM_FEATURES,
435 .unmigratable_flags = CPUID_APM_INVTSC,
438 .feat_names = cpuid_xsave_feature_name,
440 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
442 .tcg_features = TCG_XSAVE_FEATURES,
445 .feat_names = cpuid_6_feature_name,
446 .cpuid_eax = 6, .cpuid_reg = R_EAX,
447 .tcg_features = TCG_6_EAX_FEATURES,
451 typedef struct X86RegisterInfo32 {
452 /* Name of register */
454 /* QAPI enum value register */
455 X86CPURegister32 qapi_enum;
458 #define REGISTER(reg) \
459 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
460 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
472 const ExtSaveArea x86_ext_save_areas[] = {
473 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
474 .offset = 0x240, .size = 0x100 },
475 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
476 .offset = 0x3c0, .size = 0x40 },
477 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
478 .offset = 0x400, .size = 0x40 },
479 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
480 .offset = 0x440, .size = 0x40 },
481 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
482 .offset = 0x480, .size = 0x200 },
483 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
484 .offset = 0x680, .size = 0x400 },
485 [9] = { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
486 .offset = 0xA80, .size = 0x8 },
489 const char *get_register_name_32(unsigned int reg)
491 if (reg >= CPU_NB_REGS32) {
494 return x86_reg_info_32[reg].name;
498 * Returns the set of feature flags that are supported and migratable by
499 * QEMU, for a given FeatureWord.
501 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
503 FeatureWordInfo *wi = &feature_word_info[w];
507 for (i = 0; i < 32; i++) {
508 uint32_t f = 1U << i;
509 /* If the feature name is unknown, it is not supported by QEMU yet */
510 if (!wi->feat_names[i]) {
513 /* Skip features known to QEMU, but explicitly marked as unmigratable */
514 if (wi->unmigratable_flags & f) {
522 void host_cpuid(uint32_t function, uint32_t count,
523 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
529 : "=a"(vec[0]), "=b"(vec[1]),
530 "=c"(vec[2]), "=d"(vec[3])
531 : "0"(function), "c"(count) : "cc");
532 #elif defined(__i386__)
533 asm volatile("pusha \n\t"
535 "mov %%eax, 0(%2) \n\t"
536 "mov %%ebx, 4(%2) \n\t"
537 "mov %%ecx, 8(%2) \n\t"
538 "mov %%edx, 12(%2) \n\t"
540 : : "a"(function), "c"(count), "S"(vec)
556 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
558 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
559 * a substring. ex if !NULL points to the first char after a substring,
560 * otherwise the string is assumed to sized by a terminating nul.
561 * Return lexical ordering of *s1:*s2.
563 static int sstrcmp(const char *s1, const char *e1,
564 const char *s2, const char *e2)
567 if (!*s1 || !*s2 || *s1 != *s2)
570 if (s1 == e1 && s2 == e2)
579 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
580 * '|' delimited (possibly empty) strings in which case search for a match
581 * within the alternatives proceeds left to right. Return 0 for success,
582 * non-zero otherwise.
584 static int altcmp(const char *s, const char *e, const char *altstr)
588 for (q = p = altstr; ; ) {
589 while (*p && *p != '|')
591 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
600 /* search featureset for flag *[s..e), if found set corresponding bit in
601 * *pval and return true, otherwise return false
603 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
604 const char **featureset)
610 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
611 if (*ppc && !altcmp(s, e, *ppc)) {
619 static void add_flagname_to_bitmaps(const char *flagname,
620 FeatureWordArray words,
624 for (w = 0; w < FEATURE_WORDS; w++) {
625 FeatureWordInfo *wi = &feature_word_info[w];
626 if (wi->feat_names &&
627 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
631 if (w == FEATURE_WORDS) {
632 error_setg(errp, "CPU feature %s not found", flagname);
636 /* CPU class name definitions: */
638 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
639 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
641 /* Return type name for a given CPU model name
642 * Caller is responsible for freeing the returned string.
644 static char *x86_cpu_type_name(const char *model_name)
646 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
649 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
654 if (cpu_model == NULL) {
658 typename = x86_cpu_type_name(cpu_model);
659 oc = object_class_by_name(typename);
664 struct X86CPUDefinition {
669 /* vendor is zero-terminated, 12 character ASCII string */
670 char vendor[CPUID_VENDOR_SZ + 1];
674 FeatureWordArray features;
678 static X86CPUDefinition builtin_x86_defs[] = {
682 .vendor = CPUID_VENDOR_AMD,
686 .features[FEAT_1_EDX] =
688 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
690 .features[FEAT_1_ECX] =
691 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
692 .features[FEAT_8000_0001_EDX] =
693 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
694 .features[FEAT_8000_0001_ECX] =
695 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
696 .xlevel = 0x8000000A,
701 .vendor = CPUID_VENDOR_AMD,
705 /* Missing: CPUID_HT */
706 .features[FEAT_1_EDX] =
708 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
709 CPUID_PSE36 | CPUID_VME,
710 .features[FEAT_1_ECX] =
711 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
713 .features[FEAT_8000_0001_EDX] =
714 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
715 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
716 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
717 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
719 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
720 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
721 .features[FEAT_8000_0001_ECX] =
722 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
723 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
724 /* Missing: CPUID_SVM_LBRV */
725 .features[FEAT_SVM] =
727 .xlevel = 0x8000001A,
728 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
733 .vendor = CPUID_VENDOR_INTEL,
737 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
738 .features[FEAT_1_EDX] =
740 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
741 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
742 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
743 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
744 .features[FEAT_1_ECX] =
745 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
747 .features[FEAT_8000_0001_EDX] =
748 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
749 .features[FEAT_8000_0001_ECX] =
751 .xlevel = 0x80000008,
752 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
757 .vendor = CPUID_VENDOR_INTEL,
761 /* Missing: CPUID_HT */
762 .features[FEAT_1_EDX] =
763 PPRO_FEATURES | CPUID_VME |
764 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
766 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
767 .features[FEAT_1_ECX] =
768 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
769 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
770 .features[FEAT_8000_0001_EDX] =
771 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
772 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
773 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
774 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
775 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
776 .features[FEAT_8000_0001_ECX] =
778 .xlevel = 0x80000008,
779 .model_id = "Common KVM processor"
784 .vendor = CPUID_VENDOR_INTEL,
788 .features[FEAT_1_EDX] =
790 .features[FEAT_1_ECX] =
792 .xlevel = 0x80000004,
797 .vendor = CPUID_VENDOR_INTEL,
801 .features[FEAT_1_EDX] =
802 PPRO_FEATURES | CPUID_VME |
803 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
804 .features[FEAT_1_ECX] =
806 .features[FEAT_8000_0001_ECX] =
808 .xlevel = 0x80000008,
809 .model_id = "Common 32-bit KVM processor"
814 .vendor = CPUID_VENDOR_INTEL,
818 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
819 .features[FEAT_1_EDX] =
820 PPRO_FEATURES | CPUID_VME |
821 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
823 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
824 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
825 .features[FEAT_1_ECX] =
826 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
827 .features[FEAT_8000_0001_EDX] =
829 .xlevel = 0x80000008,
830 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
835 .vendor = CPUID_VENDOR_INTEL,
839 .features[FEAT_1_EDX] =
846 .vendor = CPUID_VENDOR_INTEL,
850 .features[FEAT_1_EDX] =
857 .vendor = CPUID_VENDOR_INTEL,
861 .features[FEAT_1_EDX] =
868 .vendor = CPUID_VENDOR_INTEL,
872 .features[FEAT_1_EDX] =
879 .vendor = CPUID_VENDOR_AMD,
883 .features[FEAT_1_EDX] =
884 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
886 .features[FEAT_8000_0001_EDX] =
887 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
888 .xlevel = 0x80000008,
893 .vendor = CPUID_VENDOR_INTEL,
897 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
898 .features[FEAT_1_EDX] =
900 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
901 CPUID_ACPI | CPUID_SS,
902 /* Some CPUs got no CPUID_SEP */
903 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
905 .features[FEAT_1_ECX] =
906 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
908 .features[FEAT_8000_0001_EDX] =
910 .features[FEAT_8000_0001_ECX] =
912 .xlevel = 0x80000008,
913 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
918 .vendor = CPUID_VENDOR_INTEL,
922 .features[FEAT_1_EDX] =
923 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
924 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
925 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
926 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
927 CPUID_DE | CPUID_FP87,
928 .features[FEAT_1_ECX] =
929 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
930 .features[FEAT_8000_0001_EDX] =
931 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
932 .features[FEAT_8000_0001_ECX] =
934 .xlevel = 0x80000008,
935 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
940 .vendor = CPUID_VENDOR_INTEL,
944 .features[FEAT_1_EDX] =
945 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
946 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
947 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
948 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
949 CPUID_DE | CPUID_FP87,
950 .features[FEAT_1_ECX] =
951 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
953 .features[FEAT_8000_0001_EDX] =
954 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
955 .features[FEAT_8000_0001_ECX] =
957 .xlevel = 0x80000008,
958 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
963 .vendor = CPUID_VENDOR_INTEL,
967 .features[FEAT_1_EDX] =
968 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
969 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
970 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
971 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
972 CPUID_DE | CPUID_FP87,
973 .features[FEAT_1_ECX] =
974 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
975 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
976 .features[FEAT_8000_0001_EDX] =
977 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
978 .features[FEAT_8000_0001_ECX] =
980 .xlevel = 0x80000008,
981 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
986 .vendor = CPUID_VENDOR_INTEL,
990 .features[FEAT_1_EDX] =
991 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
992 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
993 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
994 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
995 CPUID_DE | CPUID_FP87,
996 .features[FEAT_1_ECX] =
997 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
998 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
999 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1000 .features[FEAT_8000_0001_EDX] =
1001 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1002 .features[FEAT_8000_0001_ECX] =
1004 .features[FEAT_6_EAX] =
1006 .xlevel = 0x80000008,
1007 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1010 .name = "SandyBridge",
1012 .vendor = CPUID_VENDOR_INTEL,
1016 .features[FEAT_1_EDX] =
1017 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1018 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1019 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1020 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1021 CPUID_DE | CPUID_FP87,
1022 .features[FEAT_1_ECX] =
1023 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1024 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1025 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1026 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1028 .features[FEAT_8000_0001_EDX] =
1029 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1031 .features[FEAT_8000_0001_ECX] =
1033 .features[FEAT_XSAVE] =
1034 CPUID_XSAVE_XSAVEOPT,
1035 .features[FEAT_6_EAX] =
1037 .xlevel = 0x80000008,
1038 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1041 .name = "IvyBridge",
1043 .vendor = CPUID_VENDOR_INTEL,
1047 .features[FEAT_1_EDX] =
1048 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1049 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1050 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1051 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1052 CPUID_DE | CPUID_FP87,
1053 .features[FEAT_1_ECX] =
1054 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1055 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1056 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1057 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1058 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1059 .features[FEAT_7_0_EBX] =
1060 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1062 .features[FEAT_8000_0001_EDX] =
1063 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1065 .features[FEAT_8000_0001_ECX] =
1067 .features[FEAT_XSAVE] =
1068 CPUID_XSAVE_XSAVEOPT,
1069 .features[FEAT_6_EAX] =
1071 .xlevel = 0x80000008,
1072 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1075 .name = "Haswell-noTSX",
1077 .vendor = CPUID_VENDOR_INTEL,
1081 .features[FEAT_1_EDX] =
1082 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1083 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1084 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1085 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1086 CPUID_DE | CPUID_FP87,
1087 .features[FEAT_1_ECX] =
1088 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1089 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1090 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1091 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1092 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1093 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1094 .features[FEAT_8000_0001_EDX] =
1095 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1097 .features[FEAT_8000_0001_ECX] =
1098 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1099 .features[FEAT_7_0_EBX] =
1100 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1101 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1102 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1103 .features[FEAT_XSAVE] =
1104 CPUID_XSAVE_XSAVEOPT,
1105 .features[FEAT_6_EAX] =
1107 .xlevel = 0x80000008,
1108 .model_id = "Intel Core Processor (Haswell, no TSX)",
1112 .vendor = CPUID_VENDOR_INTEL,
1116 .features[FEAT_1_EDX] =
1117 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1118 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1119 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1120 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1121 CPUID_DE | CPUID_FP87,
1122 .features[FEAT_1_ECX] =
1123 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1124 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1125 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1126 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1127 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1128 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1129 .features[FEAT_8000_0001_EDX] =
1130 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1132 .features[FEAT_8000_0001_ECX] =
1133 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1134 .features[FEAT_7_0_EBX] =
1135 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1136 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1137 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1139 .features[FEAT_XSAVE] =
1140 CPUID_XSAVE_XSAVEOPT,
1141 .features[FEAT_6_EAX] =
1143 .xlevel = 0x80000008,
1144 .model_id = "Intel Core Processor (Haswell)",
1147 .name = "Broadwell-noTSX",
1149 .vendor = CPUID_VENDOR_INTEL,
1153 .features[FEAT_1_EDX] =
1154 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1155 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1156 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1157 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1158 CPUID_DE | CPUID_FP87,
1159 .features[FEAT_1_ECX] =
1160 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1161 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1162 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1163 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1164 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1165 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1166 .features[FEAT_8000_0001_EDX] =
1167 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1169 .features[FEAT_8000_0001_ECX] =
1170 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1171 .features[FEAT_7_0_EBX] =
1172 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1173 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1174 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1175 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1177 .features[FEAT_XSAVE] =
1178 CPUID_XSAVE_XSAVEOPT,
1179 .features[FEAT_6_EAX] =
1181 .xlevel = 0x80000008,
1182 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1185 .name = "Broadwell",
1187 .vendor = CPUID_VENDOR_INTEL,
1191 .features[FEAT_1_EDX] =
1192 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1193 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1194 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1195 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1196 CPUID_DE | CPUID_FP87,
1197 .features[FEAT_1_ECX] =
1198 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1199 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1200 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1201 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1202 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1203 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1204 .features[FEAT_8000_0001_EDX] =
1205 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1207 .features[FEAT_8000_0001_ECX] =
1208 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1209 .features[FEAT_7_0_EBX] =
1210 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1211 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1212 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1213 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1215 .features[FEAT_XSAVE] =
1216 CPUID_XSAVE_XSAVEOPT,
1217 .features[FEAT_6_EAX] =
1219 .xlevel = 0x80000008,
1220 .model_id = "Intel Core Processor (Broadwell)",
1223 .name = "Opteron_G1",
1225 .vendor = CPUID_VENDOR_AMD,
1229 .features[FEAT_1_EDX] =
1230 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1231 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1232 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1233 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1234 CPUID_DE | CPUID_FP87,
1235 .features[FEAT_1_ECX] =
1237 .features[FEAT_8000_0001_EDX] =
1238 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1239 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1240 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1241 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1242 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1243 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1244 .xlevel = 0x80000008,
1245 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1248 .name = "Opteron_G2",
1250 .vendor = CPUID_VENDOR_AMD,
1254 .features[FEAT_1_EDX] =
1255 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1256 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1257 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1258 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1259 CPUID_DE | CPUID_FP87,
1260 .features[FEAT_1_ECX] =
1261 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1262 /* Missing: CPUID_EXT2_RDTSCP */
1263 .features[FEAT_8000_0001_EDX] =
1264 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1265 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1266 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1267 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1268 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1269 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1270 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1271 .features[FEAT_8000_0001_ECX] =
1272 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1273 .xlevel = 0x80000008,
1274 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1277 .name = "Opteron_G3",
1279 .vendor = CPUID_VENDOR_AMD,
1283 .features[FEAT_1_EDX] =
1284 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1285 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1286 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1287 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1288 CPUID_DE | CPUID_FP87,
1289 .features[FEAT_1_ECX] =
1290 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1292 /* Missing: CPUID_EXT2_RDTSCP */
1293 .features[FEAT_8000_0001_EDX] =
1294 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1295 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1296 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1297 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1298 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1299 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1300 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1301 .features[FEAT_8000_0001_ECX] =
1302 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1303 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1304 .xlevel = 0x80000008,
1305 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1308 .name = "Opteron_G4",
1310 .vendor = CPUID_VENDOR_AMD,
1314 .features[FEAT_1_EDX] =
1315 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1316 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1317 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1318 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1319 CPUID_DE | CPUID_FP87,
1320 .features[FEAT_1_ECX] =
1321 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1322 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1323 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1325 /* Missing: CPUID_EXT2_RDTSCP */
1326 .features[FEAT_8000_0001_EDX] =
1328 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1329 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1330 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1331 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1332 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1333 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1334 .features[FEAT_8000_0001_ECX] =
1335 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1336 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1337 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1340 .xlevel = 0x8000001A,
1341 .model_id = "AMD Opteron 62xx class CPU",
1344 .name = "Opteron_G5",
1346 .vendor = CPUID_VENDOR_AMD,
1350 .features[FEAT_1_EDX] =
1351 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1352 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1353 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1354 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1355 CPUID_DE | CPUID_FP87,
1356 .features[FEAT_1_ECX] =
1357 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1358 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1359 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1360 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1361 /* Missing: CPUID_EXT2_RDTSCP */
1362 .features[FEAT_8000_0001_EDX] =
1364 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1365 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1366 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1367 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1368 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1369 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1370 .features[FEAT_8000_0001_ECX] =
1371 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1372 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1373 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1376 .xlevel = 0x8000001A,
1377 .model_id = "AMD Opteron 63xx class CPU",
1381 typedef struct PropValue {
1382 const char *prop, *value;
1385 /* KVM-specific features that are automatically added/removed
1386 * from all CPU models when KVM is enabled.
1388 static PropValue kvm_default_props[] = {
1389 { "kvmclock", "on" },
1390 { "kvm-nopiodelay", "on" },
1391 { "kvm-asyncpf", "on" },
1392 { "kvm-steal-time", "on" },
1393 { "kvm-pv-eoi", "on" },
1394 { "kvmclock-stable-bit", "on" },
1397 { "monitor", "off" },
1402 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1405 for (pv = kvm_default_props; pv->prop; pv++) {
1406 if (!strcmp(pv->prop, prop)) {
1412 /* It is valid to call this function only for properties that
1413 * are already present in the kvm_default_props table.
1418 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1419 bool migratable_only);
1423 static int cpu_x86_fill_model_id(char *str)
1425 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1428 for (i = 0; i < 3; i++) {
1429 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1430 memcpy(str + i * 16 + 0, &eax, 4);
1431 memcpy(str + i * 16 + 4, &ebx, 4);
1432 memcpy(str + i * 16 + 8, &ecx, 4);
1433 memcpy(str + i * 16 + 12, &edx, 4);
1438 static X86CPUDefinition host_cpudef;
1440 static Property host_x86_cpu_properties[] = {
1441 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1442 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1443 DEFINE_PROP_END_OF_LIST()
1446 /* class_init for the "host" CPU model
1448 * This function may be called before KVM is initialized.
1450 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1452 DeviceClass *dc = DEVICE_CLASS(oc);
1453 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1454 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1456 xcc->kvm_required = true;
1458 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1459 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1461 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1462 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1463 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1464 host_cpudef.stepping = eax & 0x0F;
1466 cpu_x86_fill_model_id(host_cpudef.model_id);
1468 xcc->cpu_def = &host_cpudef;
1470 /* level, xlevel, xlevel2, and the feature words are initialized on
1471 * instance_init, because they require KVM to be initialized.
1474 dc->props = host_x86_cpu_properties;
1475 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1476 dc->cannot_destroy_with_object_finalize_yet = true;
1479 static void host_x86_cpu_initfn(Object *obj)
1481 X86CPU *cpu = X86_CPU(obj);
1482 CPUX86State *env = &cpu->env;
1483 KVMState *s = kvm_state;
1485 assert(kvm_enabled());
1487 /* We can't fill the features array here because we don't know yet if
1488 * "migratable" is true or false.
1490 cpu->host_features = true;
1492 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1493 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1494 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1496 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1499 static const TypeInfo host_x86_cpu_type_info = {
1500 .name = X86_CPU_TYPE_NAME("host"),
1501 .parent = TYPE_X86_CPU,
1502 .instance_init = host_x86_cpu_initfn,
1503 .class_init = host_x86_cpu_class_init,
1508 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1510 FeatureWordInfo *f = &feature_word_info[w];
1513 for (i = 0; i < 32; ++i) {
1514 if ((1UL << i) & mask) {
1515 const char *reg = get_register_name_32(f->cpuid_reg);
1517 fprintf(stderr, "warning: %s doesn't support requested feature: "
1518 "CPUID.%02XH:%s%s%s [bit %d]\n",
1519 kvm_enabled() ? "host" : "TCG",
1521 f->feat_names[i] ? "." : "",
1522 f->feat_names[i] ? f->feat_names[i] : "", i);
1527 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1528 const char *name, void *opaque,
1531 X86CPU *cpu = X86_CPU(obj);
1532 CPUX86State *env = &cpu->env;
1535 value = (env->cpuid_version >> 8) & 0xf;
1537 value += (env->cpuid_version >> 20) & 0xff;
1539 visit_type_int(v, name, &value, errp);
1542 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1543 const char *name, void *opaque,
1546 X86CPU *cpu = X86_CPU(obj);
1547 CPUX86State *env = &cpu->env;
1548 const int64_t min = 0;
1549 const int64_t max = 0xff + 0xf;
1550 Error *local_err = NULL;
1553 visit_type_int(v, name, &value, &local_err);
1555 error_propagate(errp, local_err);
1558 if (value < min || value > max) {
1559 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1560 name ? name : "null", value, min, max);
1564 env->cpuid_version &= ~0xff00f00;
1566 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1568 env->cpuid_version |= value << 8;
1572 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1573 const char *name, void *opaque,
1576 X86CPU *cpu = X86_CPU(obj);
1577 CPUX86State *env = &cpu->env;
1580 value = (env->cpuid_version >> 4) & 0xf;
1581 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1582 visit_type_int(v, name, &value, errp);
1585 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1586 const char *name, void *opaque,
1589 X86CPU *cpu = X86_CPU(obj);
1590 CPUX86State *env = &cpu->env;
1591 const int64_t min = 0;
1592 const int64_t max = 0xff;
1593 Error *local_err = NULL;
1596 visit_type_int(v, name, &value, &local_err);
1598 error_propagate(errp, local_err);
1601 if (value < min || value > max) {
1602 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1603 name ? name : "null", value, min, max);
1607 env->cpuid_version &= ~0xf00f0;
1608 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1611 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1612 const char *name, void *opaque,
1615 X86CPU *cpu = X86_CPU(obj);
1616 CPUX86State *env = &cpu->env;
1619 value = env->cpuid_version & 0xf;
1620 visit_type_int(v, name, &value, errp);
1623 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1624 const char *name, void *opaque,
1627 X86CPU *cpu = X86_CPU(obj);
1628 CPUX86State *env = &cpu->env;
1629 const int64_t min = 0;
1630 const int64_t max = 0xf;
1631 Error *local_err = NULL;
1634 visit_type_int(v, name, &value, &local_err);
1636 error_propagate(errp, local_err);
1639 if (value < min || value > max) {
1640 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1641 name ? name : "null", value, min, max);
1645 env->cpuid_version &= ~0xf;
1646 env->cpuid_version |= value & 0xf;
1649 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1651 X86CPU *cpu = X86_CPU(obj);
1652 CPUX86State *env = &cpu->env;
1655 value = g_malloc(CPUID_VENDOR_SZ + 1);
1656 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1657 env->cpuid_vendor3);
1661 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1664 X86CPU *cpu = X86_CPU(obj);
1665 CPUX86State *env = &cpu->env;
1668 if (strlen(value) != CPUID_VENDOR_SZ) {
1669 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1673 env->cpuid_vendor1 = 0;
1674 env->cpuid_vendor2 = 0;
1675 env->cpuid_vendor3 = 0;
1676 for (i = 0; i < 4; i++) {
1677 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1678 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1679 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1683 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1685 X86CPU *cpu = X86_CPU(obj);
1686 CPUX86State *env = &cpu->env;
1690 value = g_malloc(48 + 1);
1691 for (i = 0; i < 48; i++) {
1692 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1698 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1701 X86CPU *cpu = X86_CPU(obj);
1702 CPUX86State *env = &cpu->env;
1705 if (model_id == NULL) {
1708 len = strlen(model_id);
1709 memset(env->cpuid_model, 0, 48);
1710 for (i = 0; i < 48; i++) {
1714 c = (uint8_t)model_id[i];
1716 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1720 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1721 void *opaque, Error **errp)
1723 X86CPU *cpu = X86_CPU(obj);
1726 value = cpu->env.tsc_khz * 1000;
1727 visit_type_int(v, name, &value, errp);
1730 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1731 void *opaque, Error **errp)
1733 X86CPU *cpu = X86_CPU(obj);
1734 const int64_t min = 0;
1735 const int64_t max = INT64_MAX;
1736 Error *local_err = NULL;
1739 visit_type_int(v, name, &value, &local_err);
1741 error_propagate(errp, local_err);
1744 if (value < min || value > max) {
1745 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1746 name ? name : "null", value, min, max);
1750 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1753 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
1754 void *opaque, Error **errp)
1756 X86CPU *cpu = X86_CPU(obj);
1757 int64_t value = cpu->apic_id;
1759 visit_type_int(v, name, &value, errp);
1762 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
1763 void *opaque, Error **errp)
1765 X86CPU *cpu = X86_CPU(obj);
1766 DeviceState *dev = DEVICE(obj);
1767 const int64_t min = 0;
1768 const int64_t max = UINT32_MAX;
1769 Error *error = NULL;
1772 if (dev->realized) {
1773 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1774 "it was realized", name, object_get_typename(obj));
1778 visit_type_int(v, name, &value, &error);
1780 error_propagate(errp, error);
1783 if (value < min || value > max) {
1784 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1785 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1786 object_get_typename(obj), name, value, min, max);
1790 if ((value != cpu->apic_id) && cpu_exists(value)) {
1791 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1794 cpu->apic_id = value;
1797 /* Generic getter for "feature-words" and "filtered-features" properties */
1798 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1799 const char *name, void *opaque,
1802 uint32_t *array = (uint32_t *)opaque;
1805 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1806 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1807 X86CPUFeatureWordInfoList *list = NULL;
1809 for (w = 0; w < FEATURE_WORDS; w++) {
1810 FeatureWordInfo *wi = &feature_word_info[w];
1811 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1812 qwi->cpuid_input_eax = wi->cpuid_eax;
1813 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1814 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1815 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1816 qwi->features = array[w];
1818 /* List will be in reverse order, but order shouldn't matter */
1819 list_entries[w].next = list;
1820 list_entries[w].value = &word_infos[w];
1821 list = &list_entries[w];
1824 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, &err);
1825 error_propagate(errp, err);
1828 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1829 void *opaque, Error **errp)
1831 X86CPU *cpu = X86_CPU(obj);
1832 int64_t value = cpu->hyperv_spinlock_attempts;
1834 visit_type_int(v, name, &value, errp);
1837 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1838 void *opaque, Error **errp)
1840 const int64_t min = 0xFFF;
1841 const int64_t max = UINT_MAX;
1842 X86CPU *cpu = X86_CPU(obj);
1846 visit_type_int(v, name, &value, &err);
1848 error_propagate(errp, err);
1852 if (value < min || value > max) {
1853 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1854 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1855 object_get_typename(obj), name ? name : "null",
1859 cpu->hyperv_spinlock_attempts = value;
1862 static PropertyInfo qdev_prop_spinlocks = {
1864 .get = x86_get_hv_spinlocks,
1865 .set = x86_set_hv_spinlocks,
1868 /* Convert all '_' in a feature string option name to '-', to make feature
1869 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1871 static inline void feat2prop(char *s)
1873 while ((s = strchr(s, '_'))) {
1878 /* Parse "+feature,-feature,feature=foo" CPU feature string
1880 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1883 X86CPU *cpu = X86_CPU(cs);
1884 char *featurestr; /* Single 'key=value" string being parsed */
1886 /* Features to be added */
1887 FeatureWordArray plus_features = { 0 };
1888 /* Features to be removed */
1889 FeatureWordArray minus_features = { 0 };
1891 CPUX86State *env = &cpu->env;
1892 Error *local_err = NULL;
1894 featurestr = features ? strtok(features, ",") : NULL;
1896 while (featurestr) {
1898 if (featurestr[0] == '+') {
1899 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1900 } else if (featurestr[0] == '-') {
1901 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1902 } else if ((val = strchr(featurestr, '='))) {
1904 feat2prop(featurestr);
1905 if (!strcmp(featurestr, "xlevel")) {
1909 numvalue = strtoul(val, &err, 0);
1910 if (!*val || *err) {
1911 error_setg(errp, "bad numerical value %s", val);
1914 if (numvalue < 0x80000000) {
1915 error_report("xlevel value shall always be >= 0x80000000"
1916 ", fixup will be removed in future versions");
1917 numvalue += 0x80000000;
1919 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1920 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1921 } else if (!strcmp(featurestr, "tsc-freq")) {
1926 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1927 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1928 if (tsc_freq < 0 || *err) {
1929 error_setg(errp, "bad numerical value %s", val);
1932 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1933 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1935 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1937 const int min = 0xFFF;
1939 numvalue = strtoul(val, &err, 0);
1940 if (!*val || *err) {
1941 error_setg(errp, "bad numerical value %s", val);
1944 if (numvalue < min) {
1945 error_report("hv-spinlocks value shall always be >= 0x%x"
1946 ", fixup will be removed in future versions",
1950 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1951 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1953 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1956 feat2prop(featurestr);
1957 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1960 error_propagate(errp, local_err);
1963 featurestr = strtok(NULL, ",");
1966 if (cpu->host_features) {
1967 for (w = 0; w < FEATURE_WORDS; w++) {
1969 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1973 for (w = 0; w < FEATURE_WORDS; w++) {
1974 env->features[w] |= plus_features[w];
1975 env->features[w] &= ~minus_features[w];
1979 /* Print all cpuid feature names in featureset
1981 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1986 for (bit = 0; bit < 32; bit++) {
1987 if (featureset[bit]) {
1988 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1994 /* generate CPU information. */
1995 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1997 X86CPUDefinition *def;
2001 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2002 def = &builtin_x86_defs[i];
2003 snprintf(buf, sizeof(buf), "%s", def->name);
2004 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2007 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2008 "KVM processor with all supported host features "
2009 "(only available in KVM mode)");
2012 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2013 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2014 FeatureWordInfo *fw = &feature_word_info[i];
2016 (*cpu_fprintf)(f, " ");
2017 listflags(f, cpu_fprintf, fw->feat_names);
2018 (*cpu_fprintf)(f, "\n");
2022 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2024 CpuDefinitionInfoList *cpu_list = NULL;
2025 X86CPUDefinition *def;
2028 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2029 CpuDefinitionInfoList *entry;
2030 CpuDefinitionInfo *info;
2032 def = &builtin_x86_defs[i];
2033 info = g_malloc0(sizeof(*info));
2034 info->name = g_strdup(def->name);
2036 entry = g_malloc0(sizeof(*entry));
2037 entry->value = info;
2038 entry->next = cpu_list;
2045 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2046 bool migratable_only)
2048 FeatureWordInfo *wi = &feature_word_info[w];
2051 if (kvm_enabled()) {
2052 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2055 } else if (tcg_enabled()) {
2056 r = wi->tcg_features;
2060 if (migratable_only) {
2061 r &= x86_cpu_get_migratable_flags(w);
2067 * Filters CPU feature words based on host availability of each feature.
2069 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2071 static int x86_cpu_filter_features(X86CPU *cpu)
2073 CPUX86State *env = &cpu->env;
2077 for (w = 0; w < FEATURE_WORDS; w++) {
2078 uint32_t host_feat =
2079 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2080 uint32_t requested_features = env->features[w];
2081 env->features[w] &= host_feat;
2082 cpu->filtered_features[w] = requested_features & ~env->features[w];
2083 if (cpu->filtered_features[w]) {
2084 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2085 report_unavailable_features(w, cpu->filtered_features[w]);
2094 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2097 for (pv = props; pv->prop; pv++) {
2101 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2106 /* Load data from X86CPUDefinition
2108 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2110 CPUX86State *env = &cpu->env;
2112 char host_vendor[CPUID_VENDOR_SZ + 1];
2115 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2116 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2117 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2118 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2119 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2120 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2121 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2122 for (w = 0; w < FEATURE_WORDS; w++) {
2123 env->features[w] = def->features[w];
2126 /* Special cases not set in the X86CPUDefinition structs: */
2127 if (kvm_enabled()) {
2128 x86_cpu_apply_props(cpu, kvm_default_props);
2131 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2133 /* sysenter isn't supported in compatibility mode on AMD,
2134 * syscall isn't supported in compatibility mode on Intel.
2135 * Normally we advertise the actual CPU vendor, but you can
2136 * override this using the 'vendor' property if you want to use
2137 * KVM's sysenter/syscall emulation in compatibility mode and
2138 * when doing cross vendor migration
2140 vendor = def->vendor;
2141 if (kvm_enabled()) {
2142 uint32_t ebx = 0, ecx = 0, edx = 0;
2143 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2144 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2145 vendor = host_vendor;
2148 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2152 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2157 gchar **model_pieces;
2158 char *name, *features;
2159 Error *error = NULL;
2161 model_pieces = g_strsplit(cpu_model, ",", 2);
2162 if (!model_pieces[0]) {
2163 error_setg(&error, "Invalid/empty CPU model name");
2166 name = model_pieces[0];
2167 features = model_pieces[1];
2169 oc = x86_cpu_class_by_name(name);
2171 error_setg(&error, "Unable to find CPU definition: %s", name);
2174 xcc = X86_CPU_CLASS(oc);
2176 if (xcc->kvm_required && !kvm_enabled()) {
2177 error_setg(&error, "CPU model '%s' requires KVM", name);
2181 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2183 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2189 if (error != NULL) {
2190 error_propagate(errp, error);
2192 object_unref(OBJECT(cpu));
2196 g_strfreev(model_pieces);
2200 X86CPU *cpu_x86_init(const char *cpu_model)
2202 Error *error = NULL;
2205 cpu = cpu_x86_create(cpu_model, &error);
2210 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2214 error_report_err(error);
2216 object_unref(OBJECT(cpu));
2223 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2225 X86CPUDefinition *cpudef = data;
2226 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2228 xcc->cpu_def = cpudef;
2231 static void x86_register_cpudef_type(X86CPUDefinition *def)
2233 char *typename = x86_cpu_type_name(def->name);
2236 .parent = TYPE_X86_CPU,
2237 .class_init = x86_cpu_cpudef_class_init,
2245 #if !defined(CONFIG_USER_ONLY)
2247 void cpu_clear_apic_feature(CPUX86State *env)
2249 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2252 #endif /* !CONFIG_USER_ONLY */
2254 /* Initialize list of CPU models, filling some non-static fields if necessary
2256 void x86_cpudef_setup(void)
2259 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2261 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2262 X86CPUDefinition *def = &builtin_x86_defs[i];
2264 /* Look for specific "cpudef" models that */
2265 /* have the QEMU version in .model_id */
2266 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2267 if (strcmp(model_with_versions[j], def->name) == 0) {
2268 pstrcpy(def->model_id, sizeof(def->model_id),
2269 "QEMU Virtual CPU version ");
2270 pstrcat(def->model_id, sizeof(def->model_id),
2278 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2279 uint32_t *eax, uint32_t *ebx,
2280 uint32_t *ecx, uint32_t *edx)
2282 X86CPU *cpu = x86_env_get_cpu(env);
2283 CPUState *cs = CPU(cpu);
2285 /* test if maximum index reached */
2286 if (index & 0x80000000) {
2287 if (index > env->cpuid_xlevel) {
2288 if (env->cpuid_xlevel2 > 0) {
2289 /* Handle the Centaur's CPUID instruction. */
2290 if (index > env->cpuid_xlevel2) {
2291 index = env->cpuid_xlevel2;
2292 } else if (index < 0xC0000000) {
2293 index = env->cpuid_xlevel;
2296 /* Intel documentation states that invalid EAX input will
2297 * return the same information as EAX=cpuid_level
2298 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2300 index = env->cpuid_level;
2304 if (index > env->cpuid_level)
2305 index = env->cpuid_level;
2310 *eax = env->cpuid_level;
2311 *ebx = env->cpuid_vendor1;
2312 *edx = env->cpuid_vendor2;
2313 *ecx = env->cpuid_vendor3;
2316 *eax = env->cpuid_version;
2317 *ebx = (cpu->apic_id << 24) |
2318 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2319 *ecx = env->features[FEAT_1_ECX];
2320 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2321 *ecx |= CPUID_EXT_OSXSAVE;
2323 *edx = env->features[FEAT_1_EDX];
2324 if (cs->nr_cores * cs->nr_threads > 1) {
2325 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2330 /* cache info: needed for Pentium Pro compatibility */
2331 if (cpu->cache_info_passthrough) {
2332 host_cpuid(index, 0, eax, ebx, ecx, edx);
2335 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2338 *edx = (L1D_DESCRIPTOR << 16) | \
2339 (L1I_DESCRIPTOR << 8) | \
2343 /* cache info: needed for Core compatibility */
2344 if (cpu->cache_info_passthrough) {
2345 host_cpuid(index, count, eax, ebx, ecx, edx);
2346 *eax &= ~0xFC000000;
2350 case 0: /* L1 dcache info */
2351 *eax |= CPUID_4_TYPE_DCACHE | \
2352 CPUID_4_LEVEL(1) | \
2353 CPUID_4_SELF_INIT_LEVEL;
2354 *ebx = (L1D_LINE_SIZE - 1) | \
2355 ((L1D_PARTITIONS - 1) << 12) | \
2356 ((L1D_ASSOCIATIVITY - 1) << 22);
2357 *ecx = L1D_SETS - 1;
2358 *edx = CPUID_4_NO_INVD_SHARING;
2360 case 1: /* L1 icache info */
2361 *eax |= CPUID_4_TYPE_ICACHE | \
2362 CPUID_4_LEVEL(1) | \
2363 CPUID_4_SELF_INIT_LEVEL;
2364 *ebx = (L1I_LINE_SIZE - 1) | \
2365 ((L1I_PARTITIONS - 1) << 12) | \
2366 ((L1I_ASSOCIATIVITY - 1) << 22);
2367 *ecx = L1I_SETS - 1;
2368 *edx = CPUID_4_NO_INVD_SHARING;
2370 case 2: /* L2 cache info */
2371 *eax |= CPUID_4_TYPE_UNIFIED | \
2372 CPUID_4_LEVEL(2) | \
2373 CPUID_4_SELF_INIT_LEVEL;
2374 if (cs->nr_threads > 1) {
2375 *eax |= (cs->nr_threads - 1) << 14;
2377 *ebx = (L2_LINE_SIZE - 1) | \
2378 ((L2_PARTITIONS - 1) << 12) | \
2379 ((L2_ASSOCIATIVITY - 1) << 22);
2381 *edx = CPUID_4_NO_INVD_SHARING;
2383 default: /* end of info */
2392 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2393 if ((*eax & 31) && cs->nr_cores > 1) {
2394 *eax |= (cs->nr_cores - 1) << 26;
2398 /* mwait info: needed for Core compatibility */
2399 *eax = 0; /* Smallest monitor-line size in bytes */
2400 *ebx = 0; /* Largest monitor-line size in bytes */
2401 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2405 /* Thermal and Power Leaf */
2406 *eax = env->features[FEAT_6_EAX];
2412 /* Structured Extended Feature Flags Enumeration Leaf */
2414 *eax = 0; /* Maximum ECX value for sub-leaves */
2415 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2416 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2417 *edx = 0; /* Reserved */
2426 /* Direct Cache Access Information Leaf */
2427 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2433 /* Architectural Performance Monitoring Leaf */
2434 if (kvm_enabled() && cpu->enable_pmu) {
2435 KVMState *s = cs->kvm_state;
2437 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2438 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2439 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2440 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2449 KVMState *s = cs->kvm_state;
2453 /* Processor Extended State */
2458 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2461 if (kvm_enabled()) {
2462 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2464 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2471 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2472 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2473 if ((env->features[esa->feature] & esa->bits) == esa->bits
2474 && ((ena_mask >> i) & 1) != 0) {
2478 *edx |= 1u << (i - 32);
2480 *ecx = MAX(*ecx, esa->offset + esa->size);
2483 *eax |= ena_mask & (XSTATE_FP | XSTATE_SSE);
2485 } else if (count == 1) {
2486 *eax = env->features[FEAT_XSAVE];
2487 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2488 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2489 if ((env->features[esa->feature] & esa->bits) == esa->bits
2490 && ((ena_mask >> count) & 1) != 0) {
2498 *eax = env->cpuid_xlevel;
2499 *ebx = env->cpuid_vendor1;
2500 *edx = env->cpuid_vendor2;
2501 *ecx = env->cpuid_vendor3;
2504 *eax = env->cpuid_version;
2506 *ecx = env->features[FEAT_8000_0001_ECX];
2507 *edx = env->features[FEAT_8000_0001_EDX];
2509 /* The Linux kernel checks for the CMPLegacy bit and
2510 * discards multiple thread information if it is set.
2511 * So dont set it here for Intel to make Linux guests happy.
2513 if (cs->nr_cores * cs->nr_threads > 1) {
2514 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2515 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2516 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2517 *ecx |= 1 << 1; /* CmpLegacy bit */
2524 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2525 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2526 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2527 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2530 /* cache info (L1 cache) */
2531 if (cpu->cache_info_passthrough) {
2532 host_cpuid(index, 0, eax, ebx, ecx, edx);
2535 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2536 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2537 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2538 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2539 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2540 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2541 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2542 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2545 /* cache info (L2 cache) */
2546 if (cpu->cache_info_passthrough) {
2547 host_cpuid(index, 0, eax, ebx, ecx, edx);
2550 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2551 (L2_DTLB_2M_ENTRIES << 16) | \
2552 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2553 (L2_ITLB_2M_ENTRIES);
2554 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2555 (L2_DTLB_4K_ENTRIES << 16) | \
2556 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2557 (L2_ITLB_4K_ENTRIES);
2558 *ecx = (L2_SIZE_KB_AMD << 16) | \
2559 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2560 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2561 *edx = ((L3_SIZE_KB/512) << 18) | \
2562 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2563 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2569 *edx = env->features[FEAT_8000_0007_EDX];
2572 /* virtual & phys address size in low 2 bytes. */
2573 /* XXX: This value must match the one used in the MMU code. */
2574 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2575 /* 64 bit processor */
2576 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2577 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2579 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2580 *eax = 0x00000024; /* 36 bits physical */
2582 *eax = 0x00000020; /* 32 bits physical */
2588 if (cs->nr_cores * cs->nr_threads > 1) {
2589 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2593 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2594 *eax = 0x00000001; /* SVM Revision */
2595 *ebx = 0x00000010; /* nr of ASIDs */
2597 *edx = env->features[FEAT_SVM]; /* optional features */
2606 *eax = env->cpuid_xlevel2;
2612 /* Support for VIA CPU's CPUID instruction */
2613 *eax = env->cpuid_version;
2616 *edx = env->features[FEAT_C000_0001_EDX];
2621 /* Reserved for the future, and now filled with zero */
2628 /* reserved values: zero */
2637 /* CPUClass::reset() */
2638 static void x86_cpu_reset(CPUState *s)
2640 X86CPU *cpu = X86_CPU(s);
2641 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2642 CPUX86State *env = &cpu->env;
2647 xcc->parent_reset(s);
2649 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2653 env->old_exception = -1;
2655 /* init to reset state */
2657 #ifdef CONFIG_SOFTMMU
2658 env->hflags |= HF_SOFTMMU_MASK;
2660 env->hflags2 |= HF2_GIF_MASK;
2662 cpu_x86_update_cr0(env, 0x60000010);
2663 env->a20_mask = ~0x0;
2664 env->smbase = 0x30000;
2666 env->idt.limit = 0xffff;
2667 env->gdt.limit = 0xffff;
2668 env->ldt.limit = 0xffff;
2669 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2670 env->tr.limit = 0xffff;
2671 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2673 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2674 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2675 DESC_R_MASK | DESC_A_MASK);
2676 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2677 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2679 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2680 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2682 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2683 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2685 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2686 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2688 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2689 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2693 env->regs[R_EDX] = env->cpuid_version;
2698 for (i = 0; i < 8; i++) {
2701 cpu_set_fpuc(env, 0x37f);
2703 env->mxcsr = 0x1f80;
2704 /* All units are in INIT state. */
2707 env->pat = 0x0007040600070406ULL;
2708 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2710 memset(env->dr, 0, sizeof(env->dr));
2711 env->dr[6] = DR6_FIXED_1;
2712 env->dr[7] = DR7_FIXED_1;
2713 cpu_breakpoint_remove_all(s, BP_CPU);
2714 cpu_watchpoint_remove_all(s, BP_CPU);
2719 #ifdef CONFIG_USER_ONLY
2720 /* Enable all the features for user-mode. */
2721 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2724 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_MPX) {
2725 xcr0 |= XSTATE_BNDREGS | XSTATE_BNDCSR;
2727 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2728 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2730 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2731 cr4 |= CR4_FSGSBASE_MASK;
2736 cpu_x86_update_cr4(env, cr4);
2739 * SDM 11.11.5 requires:
2740 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2741 * - IA32_MTRR_PHYSMASKn.V = 0
2742 * All other bits are undefined. For simplification, zero it all.
2744 env->mtrr_deftype = 0;
2745 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2746 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2748 #if !defined(CONFIG_USER_ONLY)
2749 /* We hard-wire the BSP to the first CPU. */
2750 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2752 s->halted = !cpu_is_bsp(cpu);
2754 if (kvm_enabled()) {
2755 kvm_arch_reset_vcpu(cpu);
2760 #ifndef CONFIG_USER_ONLY
2761 bool cpu_is_bsp(X86CPU *cpu)
2763 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2766 /* TODO: remove me, when reset over QOM tree is implemented */
2767 static void x86_cpu_machine_reset_cb(void *opaque)
2769 X86CPU *cpu = opaque;
2770 cpu_reset(CPU(cpu));
2774 static void mce_init(X86CPU *cpu)
2776 CPUX86State *cenv = &cpu->env;
2779 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2780 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2781 (CPUID_MCE | CPUID_MCA)) {
2782 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2783 cenv->mcg_ctl = ~(uint64_t)0;
2784 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2785 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2790 #ifndef CONFIG_USER_ONLY
2791 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2793 APICCommonState *apic;
2794 const char *apic_type = "apic";
2796 if (kvm_apic_in_kernel()) {
2797 apic_type = "kvm-apic";
2798 } else if (xen_enabled()) {
2799 apic_type = "xen-apic";
2802 cpu->apic_state = DEVICE(object_new(apic_type));
2804 object_property_add_child(OBJECT(cpu), "apic",
2805 OBJECT(cpu->apic_state), NULL);
2806 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2807 /* TODO: convert to link<> */
2808 apic = APIC_COMMON(cpu->apic_state);
2810 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2813 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2815 APICCommonState *apic;
2816 static bool apic_mmio_map_once;
2818 if (cpu->apic_state == NULL) {
2821 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2824 /* Map APIC MMIO area */
2825 apic = APIC_COMMON(cpu->apic_state);
2826 if (!apic_mmio_map_once) {
2827 memory_region_add_subregion_overlap(get_system_memory(),
2829 MSR_IA32_APICBASE_BASE,
2832 apic_mmio_map_once = true;
2836 static void x86_cpu_machine_done(Notifier *n, void *unused)
2838 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2839 MemoryRegion *smram =
2840 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2843 cpu->smram = g_new(MemoryRegion, 1);
2844 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2845 smram, 0, 1ull << 32);
2846 memory_region_set_enabled(cpu->smram, false);
2847 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2851 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2857 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2858 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2859 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2860 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2861 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2862 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2863 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2865 CPUState *cs = CPU(dev);
2866 X86CPU *cpu = X86_CPU(dev);
2867 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2868 CPUX86State *env = &cpu->env;
2869 Error *local_err = NULL;
2870 static bool ht_warned;
2872 if (cpu->apic_id < 0) {
2873 error_setg(errp, "apic-id property was not initialized properly");
2877 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2878 env->cpuid_level = 7;
2881 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2884 if (IS_AMD_CPU(env)) {
2885 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2886 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2887 & CPUID_EXT2_AMD_ALIASES);
2891 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2892 error_setg(&local_err,
2894 "Host doesn't support requested features" :
2895 "TCG doesn't support requested features");
2899 #ifndef CONFIG_USER_ONLY
2900 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2902 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2903 x86_cpu_apic_create(cpu, &local_err);
2904 if (local_err != NULL) {
2912 #ifndef CONFIG_USER_ONLY
2913 if (tcg_enabled()) {
2914 AddressSpace *newas = g_new(AddressSpace, 1);
2916 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2917 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2919 /* Outer container... */
2920 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2921 memory_region_set_enabled(cpu->cpu_as_root, true);
2923 /* ... with two regions inside: normal system memory with low
2926 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2927 get_system_memory(), 0, ~0ull);
2928 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2929 memory_region_set_enabled(cpu->cpu_as_mem, true);
2930 address_space_init(newas, cpu->cpu_as_root, "CPU");
2932 cpu_address_space_init(cs, newas, 0);
2934 /* ... SMRAM with higher priority, linked from /machine/smram. */
2935 cpu->machine_done.notify = x86_cpu_machine_done;
2936 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2942 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2943 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2944 * based on inputs (sockets,cores,threads), it is still better to gives
2947 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2948 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2950 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2951 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2952 " -smp options properly.");
2956 x86_cpu_apic_realize(cpu, &local_err);
2957 if (local_err != NULL) {
2962 xcc->parent_realize(dev, &local_err);
2965 if (local_err != NULL) {
2966 error_propagate(errp, local_err);
2971 typedef struct BitProperty {
2976 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
2977 void *opaque, Error **errp)
2979 BitProperty *fp = opaque;
2980 bool value = (*fp->ptr & fp->mask) == fp->mask;
2981 visit_type_bool(v, name, &value, errp);
2984 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
2985 void *opaque, Error **errp)
2987 DeviceState *dev = DEVICE(obj);
2988 BitProperty *fp = opaque;
2989 Error *local_err = NULL;
2992 if (dev->realized) {
2993 qdev_prop_set_after_realize(dev, name, errp);
2997 visit_type_bool(v, name, &value, &local_err);
2999 error_propagate(errp, local_err);
3004 *fp->ptr |= fp->mask;
3006 *fp->ptr &= ~fp->mask;
3010 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3013 BitProperty *prop = opaque;
3017 /* Register a boolean property to get/set a single bit in a uint32_t field.
3019 * The same property name can be registered multiple times to make it affect
3020 * multiple bits in the same FeatureWord. In that case, the getter will return
3021 * true only if all bits are set.
3023 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3024 const char *prop_name,
3030 uint32_t mask = (1UL << bitnr);
3032 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3035 assert(fp->ptr == field);
3038 fp = g_new0(BitProperty, 1);
3041 object_property_add(OBJECT(cpu), prop_name, "bool",
3042 x86_cpu_get_bit_prop,
3043 x86_cpu_set_bit_prop,
3044 x86_cpu_release_bit_prop, fp, &error_abort);
3048 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3052 Object *obj = OBJECT(cpu);
3055 FeatureWordInfo *fi = &feature_word_info[w];
3057 if (!fi->feat_names) {
3060 if (!fi->feat_names[bitnr]) {
3064 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3066 feat2prop(names[0]);
3067 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3069 for (i = 1; names[i]; i++) {
3070 feat2prop(names[i]);
3071 object_property_add_alias(obj, names[i], obj, names[0],
3078 static void x86_cpu_initfn(Object *obj)
3080 CPUState *cs = CPU(obj);
3081 X86CPU *cpu = X86_CPU(obj);
3082 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3083 CPUX86State *env = &cpu->env;
3088 cpu_exec_init(cs, &error_abort);
3090 object_property_add(obj, "family", "int",
3091 x86_cpuid_version_get_family,
3092 x86_cpuid_version_set_family, NULL, NULL, NULL);
3093 object_property_add(obj, "model", "int",
3094 x86_cpuid_version_get_model,
3095 x86_cpuid_version_set_model, NULL, NULL, NULL);
3096 object_property_add(obj, "stepping", "int",
3097 x86_cpuid_version_get_stepping,
3098 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3099 object_property_add_str(obj, "vendor",
3100 x86_cpuid_get_vendor,
3101 x86_cpuid_set_vendor, NULL);
3102 object_property_add_str(obj, "model-id",
3103 x86_cpuid_get_model_id,
3104 x86_cpuid_set_model_id, NULL);
3105 object_property_add(obj, "tsc-frequency", "int",
3106 x86_cpuid_get_tsc_freq,
3107 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3108 object_property_add(obj, "apic-id", "int",
3109 x86_cpuid_get_apic_id,
3110 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3111 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3112 x86_cpu_get_feature_words,
3113 NULL, NULL, (void *)env->features, NULL);
3114 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3115 x86_cpu_get_feature_words,
3116 NULL, NULL, (void *)cpu->filtered_features, NULL);
3118 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3120 #ifndef CONFIG_USER_ONLY
3121 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3125 for (w = 0; w < FEATURE_WORDS; w++) {
3128 for (bitnr = 0; bitnr < 32; bitnr++) {
3129 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3133 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3135 /* init various static tables used in TCG mode */
3136 if (tcg_enabled() && !inited) {
3142 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3144 X86CPU *cpu = X86_CPU(cs);
3146 return cpu->apic_id;
3149 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3151 X86CPU *cpu = X86_CPU(cs);
3153 return cpu->env.cr[0] & CR0_PG_MASK;
3156 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3158 X86CPU *cpu = X86_CPU(cs);
3160 cpu->env.eip = value;
3163 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3165 X86CPU *cpu = X86_CPU(cs);
3167 cpu->env.eip = tb->pc - tb->cs_base;
3170 static bool x86_cpu_has_work(CPUState *cs)
3172 X86CPU *cpu = X86_CPU(cs);
3173 CPUX86State *env = &cpu->env;
3175 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3176 CPU_INTERRUPT_POLL)) &&
3177 (env->eflags & IF_MASK)) ||
3178 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3179 CPU_INTERRUPT_INIT |
3180 CPU_INTERRUPT_SIPI |
3181 CPU_INTERRUPT_MCE)) ||
3182 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3183 !(env->hflags & HF_SMM_MASK));
3186 static Property x86_cpu_properties[] = {
3187 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3188 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3189 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3190 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3191 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3192 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3193 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3194 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3195 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3196 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3197 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3198 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3199 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3200 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3201 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3202 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3203 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3204 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3205 DEFINE_PROP_END_OF_LIST()
3208 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3210 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3211 CPUClass *cc = CPU_CLASS(oc);
3212 DeviceClass *dc = DEVICE_CLASS(oc);
3214 xcc->parent_realize = dc->realize;
3215 dc->realize = x86_cpu_realizefn;
3216 dc->props = x86_cpu_properties;
3218 xcc->parent_reset = cc->reset;
3219 cc->reset = x86_cpu_reset;
3220 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3222 cc->class_by_name = x86_cpu_class_by_name;
3223 cc->parse_features = x86_cpu_parse_featurestr;
3224 cc->has_work = x86_cpu_has_work;
3225 cc->do_interrupt = x86_cpu_do_interrupt;
3226 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3227 cc->dump_state = x86_cpu_dump_state;
3228 cc->set_pc = x86_cpu_set_pc;
3229 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3230 cc->gdb_read_register = x86_cpu_gdb_read_register;
3231 cc->gdb_write_register = x86_cpu_gdb_write_register;
3232 cc->get_arch_id = x86_cpu_get_arch_id;
3233 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3234 #ifdef CONFIG_USER_ONLY
3235 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3237 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3238 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3239 cc->write_elf64_note = x86_cpu_write_elf64_note;
3240 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3241 cc->write_elf32_note = x86_cpu_write_elf32_note;
3242 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3243 cc->vmsd = &vmstate_x86_cpu;
3245 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3246 #ifndef CONFIG_USER_ONLY
3247 cc->debug_excp_handler = breakpoint_handler;
3249 cc->cpu_exec_enter = x86_cpu_exec_enter;
3250 cc->cpu_exec_exit = x86_cpu_exec_exit;
3253 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3254 * object in cpus -> dangling pointer after final object_unref().
3256 dc->cannot_destroy_with_object_finalize_yet = true;
3259 static const TypeInfo x86_cpu_type_info = {
3260 .name = TYPE_X86_CPU,
3262 .instance_size = sizeof(X86CPU),
3263 .instance_init = x86_cpu_initfn,
3265 .class_size = sizeof(X86CPUClass),
3266 .class_init = x86_cpu_common_class_init,
3269 static void x86_cpu_register_types(void)
3273 type_register_static(&x86_cpu_type_info);
3274 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3275 x86_register_cpudef_type(&builtin_x86_defs[i]);
3278 type_register_static(&host_x86_cpu_type_info);
3282 type_init(x86_cpu_register_types)