2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
63 /* CPUID Leaf 4 constants: */
66 #define CPUID_4_TYPE_DCACHE 1
67 #define CPUID_4_TYPE_ICACHE 2
68 #define CPUID_4_TYPE_UNIFIED 3
70 #define CPUID_4_LEVEL(l) ((l) << 5)
72 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
73 #define CPUID_4_FULLY_ASSOC (1 << 9)
76 #define CPUID_4_NO_INVD_SHARING (1 << 0)
77 #define CPUID_4_INCLUSIVE (1 << 1)
78 #define CPUID_4_COMPLEX_IDX (1 << 2)
80 #define ASSOC_FULL 0xFF
82 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
83 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
93 a == ASSOC_FULL ? 0xF : \
94 0 /* invalid value */)
97 /* Definitions of the hardcoded cache entries we expose: */
100 #define L1D_LINE_SIZE 64
101 #define L1D_ASSOCIATIVITY 8
103 #define L1D_PARTITIONS 1
104 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
105 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
106 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
107 #define L1D_LINES_PER_TAG 1
108 #define L1D_SIZE_KB_AMD 64
109 #define L1D_ASSOCIATIVITY_AMD 2
111 /* L1 instruction cache: */
112 #define L1I_LINE_SIZE 64
113 #define L1I_ASSOCIATIVITY 8
115 #define L1I_PARTITIONS 1
116 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
117 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
118 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
119 #define L1I_LINES_PER_TAG 1
120 #define L1I_SIZE_KB_AMD 64
121 #define L1I_ASSOCIATIVITY_AMD 2
123 /* Level 2 unified cache: */
124 #define L2_LINE_SIZE 64
125 #define L2_ASSOCIATIVITY 16
127 #define L2_PARTITIONS 1
128 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
129 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
130 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
131 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
132 #define L2_LINES_PER_TAG 1
133 #define L2_SIZE_KB_AMD 512
135 /* Level 3 unified cache: */
136 #define L3_SIZE_KB 0 /* disabled */
137 #define L3_ASSOCIATIVITY 0 /* disabled */
138 #define L3_LINES_PER_TAG 0 /* disabled */
139 #define L3_LINE_SIZE 0 /* disabled */
140 #define L3_N_LINE_SIZE 64
141 #define L3_N_ASSOCIATIVITY 16
142 #define L3_N_SETS 16384
143 #define L3_N_PARTITIONS 1
144 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
145 #define L3_N_LINES_PER_TAG 1
146 #define L3_N_SIZE_KB_AMD 16384
148 /* TLB definitions: */
150 #define L1_DTLB_2M_ASSOC 1
151 #define L1_DTLB_2M_ENTRIES 255
152 #define L1_DTLB_4K_ASSOC 1
153 #define L1_DTLB_4K_ENTRIES 255
155 #define L1_ITLB_2M_ASSOC 1
156 #define L1_ITLB_2M_ENTRIES 255
157 #define L1_ITLB_4K_ASSOC 1
158 #define L1_ITLB_4K_ENTRIES 255
160 #define L2_DTLB_2M_ASSOC 0 /* disabled */
161 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
162 #define L2_DTLB_4K_ASSOC 4
163 #define L2_DTLB_4K_ENTRIES 512
165 #define L2_ITLB_2M_ASSOC 0 /* disabled */
166 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
167 #define L2_ITLB_4K_ASSOC 4
168 #define L2_ITLB_4K_ENTRIES 512
172 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
173 uint32_t vendor2, uint32_t vendor3)
176 for (i = 0; i < 4; i++) {
177 dst[i] = vendor1 >> (8 * i);
178 dst[i + 4] = vendor2 >> (8 * i);
179 dst[i + 8] = vendor3 >> (8 * i);
181 dst[CPUID_VENDOR_SZ] = '\0';
184 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
185 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
186 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
187 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
188 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
189 CPUID_PSE36 | CPUID_FXSR)
190 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
191 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
192 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
193 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
194 CPUID_PAE | CPUID_SEP | CPUID_APIC)
196 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
197 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
198 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
199 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
200 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
201 /* partly implemented:
202 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
204 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
205 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
206 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
207 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
208 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
209 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
211 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
212 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
213 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
214 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
215 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
218 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
220 #define TCG_EXT2_X86_64_FEATURES 0
223 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
224 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
225 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
226 TCG_EXT2_X86_64_FEATURES)
227 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
228 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
229 #define TCG_EXT4_FEATURES 0
230 #define TCG_SVM_FEATURES 0
231 #define TCG_KVM_FEATURES 0
232 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
233 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
234 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
235 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
238 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
239 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
240 CPUID_7_0_EBX_RDSEED */
241 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
242 #define TCG_APM_FEATURES 0
243 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
244 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
246 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
248 typedef struct FeatureWordInfo {
249 /* feature flags names are taken from "Intel Processor Identification and
250 * the CPUID Instruction" and AMD's "CPUID Specification".
251 * In cases of disagreement between feature naming conventions,
252 * aliases may be added.
254 const char *feat_names[32];
255 uint32_t cpuid_eax; /* Input EAX for CPUID */
256 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
257 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
258 int cpuid_reg; /* output register (R_* constant) */
259 uint32_t tcg_features; /* Feature flags supported by TCG */
260 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
261 uint32_t migratable_flags; /* Feature flags known to be migratable */
264 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
267 "fpu", "vme", "de", "pse",
268 "tsc", "msr", "pae", "mce",
269 "cx8", "apic", NULL, "sep",
270 "mtrr", "pge", "mca", "cmov",
271 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
272 NULL, "ds" /* Intel dts */, "acpi", "mmx",
273 "fxsr", "sse", "sse2", "ss",
274 "ht" /* Intel htt */, "tm", "ia64", "pbe",
276 .cpuid_eax = 1, .cpuid_reg = R_EDX,
277 .tcg_features = TCG_FEATURES,
281 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
282 "ds_cpl", "vmx", "smx", "est",
283 "tm2", "ssse3", "cid", NULL,
284 "fma", "cx16", "xtpr", "pdcm",
285 NULL, "pcid", "dca", "sse4.1|sse4_1",
286 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
287 "tsc-deadline", "aes", "xsave", "osxsave",
288 "avx", "f16c", "rdrand", "hypervisor",
290 .cpuid_eax = 1, .cpuid_reg = R_ECX,
291 .tcg_features = TCG_EXT_FEATURES,
293 /* Feature names that are already defined on feature_name[] but
294 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
295 * names on feat_names below. They are copied automatically
296 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
298 [FEAT_8000_0001_EDX] = {
300 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
301 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
302 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
303 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
304 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
305 "nx|xd", NULL, "mmxext", NULL /* mmx */,
306 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb", "rdtscp",
307 NULL, "lm|i64", "3dnowext", "3dnow",
309 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
310 .tcg_features = TCG_EXT2_FEATURES,
312 [FEAT_8000_0001_ECX] = {
314 "lahf_lm", "cmp_legacy", "svm", "extapic",
315 "cr8legacy", "abm", "sse4a", "misalignsse",
316 "3dnowprefetch", "osvw", "ibs", "xop",
317 "skinit", "wdt", NULL, "lwp",
318 "fma4", "tce", NULL, "nodeid_msr",
319 NULL, "tbm", "topoext", "perfctr_core",
320 "perfctr_nb", NULL, NULL, NULL,
321 NULL, NULL, NULL, NULL,
323 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
324 .tcg_features = TCG_EXT3_FEATURES,
326 [FEAT_C000_0001_EDX] = {
328 NULL, NULL, "xstore", "xstore-en",
329 NULL, NULL, "xcrypt", "xcrypt-en",
330 "ace2", "ace2-en", "phe", "phe-en",
331 "pmm", "pmm-en", NULL, NULL,
332 NULL, NULL, NULL, NULL,
333 NULL, NULL, NULL, NULL,
334 NULL, NULL, NULL, NULL,
335 NULL, NULL, NULL, NULL,
337 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
338 .tcg_features = TCG_EXT4_FEATURES,
342 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
343 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
344 NULL, NULL, NULL, NULL,
345 NULL, NULL, NULL, NULL,
346 NULL, NULL, NULL, NULL,
347 NULL, NULL, NULL, NULL,
348 "kvmclock-stable-bit", NULL, NULL, NULL,
349 NULL, NULL, NULL, NULL,
351 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
352 .tcg_features = TCG_KVM_FEATURES,
354 [FEAT_HYPERV_EAX] = {
356 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
357 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
358 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
359 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
360 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
361 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
362 NULL, NULL, NULL, NULL,
363 NULL, NULL, NULL, NULL,
364 NULL, NULL, NULL, NULL,
365 NULL, NULL, NULL, NULL,
366 NULL, NULL, NULL, NULL,
368 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
370 [FEAT_HYPERV_EBX] = {
372 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
373 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
374 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
375 NULL /* hv_create_port */, NULL /* hv_connect_port */,
376 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
377 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
379 NULL, NULL, NULL, NULL,
380 NULL, NULL, NULL, NULL,
381 NULL, NULL, NULL, NULL,
382 NULL, NULL, NULL, NULL,
384 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
386 [FEAT_HYPERV_EDX] = {
388 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
389 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
390 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
392 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
393 NULL, NULL, NULL, NULL,
394 NULL, NULL, NULL, NULL,
395 NULL, NULL, NULL, NULL,
396 NULL, NULL, NULL, NULL,
397 NULL, NULL, NULL, NULL,
399 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
403 "npt", "lbrv", "svm_lock", "nrip_save",
404 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
405 NULL, NULL, "pause_filter", NULL,
406 "pfthreshold", NULL, NULL, NULL,
407 NULL, NULL, NULL, NULL,
408 NULL, NULL, NULL, NULL,
409 NULL, NULL, NULL, NULL,
410 NULL, NULL, NULL, NULL,
412 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
413 .tcg_features = TCG_SVM_FEATURES,
417 "fsgsbase", "tsc_adjust", NULL, "bmi1",
418 "hle", "avx2", NULL, "smep",
419 "bmi2", "erms", "invpcid", "rtm",
420 NULL, NULL, "mpx", NULL,
421 "avx512f", "avx512dq", "rdseed", "adx",
422 "smap", "avx512ifma", "pcommit", "clflushopt",
423 "clwb", NULL, "avx512pf", "avx512er",
424 "avx512cd", NULL, "avx512bw", "avx512vl",
427 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
429 .tcg_features = TCG_7_0_EBX_FEATURES,
433 NULL, "avx512vbmi", "umip", "pku",
434 "ospke", NULL, NULL, NULL,
435 NULL, NULL, NULL, NULL,
436 NULL, NULL, NULL, NULL,
437 NULL, NULL, NULL, NULL,
438 NULL, NULL, "rdpid", NULL,
439 NULL, NULL, NULL, NULL,
440 NULL, NULL, NULL, NULL,
443 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
445 .tcg_features = TCG_7_0_ECX_FEATURES,
447 [FEAT_8000_0007_EDX] = {
449 NULL, NULL, NULL, NULL,
450 NULL, NULL, NULL, NULL,
451 "invtsc", NULL, NULL, NULL,
452 NULL, NULL, NULL, NULL,
453 NULL, NULL, NULL, NULL,
454 NULL, NULL, NULL, NULL,
455 NULL, NULL, NULL, NULL,
456 NULL, NULL, NULL, NULL,
458 .cpuid_eax = 0x80000007,
460 .tcg_features = TCG_APM_FEATURES,
461 .unmigratable_flags = CPUID_APM_INVTSC,
465 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
466 NULL, NULL, NULL, NULL,
467 NULL, NULL, NULL, NULL,
468 NULL, NULL, NULL, NULL,
469 NULL, NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL,
471 NULL, NULL, NULL, NULL,
472 NULL, NULL, NULL, NULL,
475 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
477 .tcg_features = TCG_XSAVE_FEATURES,
481 NULL, NULL, "arat", NULL,
482 NULL, NULL, NULL, NULL,
483 NULL, NULL, NULL, NULL,
484 NULL, NULL, NULL, NULL,
485 NULL, NULL, NULL, NULL,
486 NULL, NULL, NULL, NULL,
487 NULL, NULL, NULL, NULL,
488 NULL, NULL, NULL, NULL,
490 .cpuid_eax = 6, .cpuid_reg = R_EAX,
491 .tcg_features = TCG_6_EAX_FEATURES,
493 [FEAT_XSAVE_COMP_LO] = {
495 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
498 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
499 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
500 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
503 [FEAT_XSAVE_COMP_HI] = {
505 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
511 typedef struct X86RegisterInfo32 {
512 /* Name of register */
514 /* QAPI enum value register */
515 X86CPURegister32 qapi_enum;
518 #define REGISTER(reg) \
519 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
520 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
532 typedef struct ExtSaveArea {
533 uint32_t feature, bits;
534 uint32_t offset, size;
537 static const ExtSaveArea x86_ext_save_areas[] = {
539 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
540 .offset = offsetof(X86XSaveArea, avx_state),
541 .size = sizeof(XSaveAVX) },
542 [XSTATE_BNDREGS_BIT] =
543 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
544 .offset = offsetof(X86XSaveArea, bndreg_state),
545 .size = sizeof(XSaveBNDREG) },
546 [XSTATE_BNDCSR_BIT] =
547 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
548 .offset = offsetof(X86XSaveArea, bndcsr_state),
549 .size = sizeof(XSaveBNDCSR) },
550 [XSTATE_OPMASK_BIT] =
551 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
552 .offset = offsetof(X86XSaveArea, opmask_state),
553 .size = sizeof(XSaveOpmask) },
554 [XSTATE_ZMM_Hi256_BIT] =
555 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
556 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
557 .size = sizeof(XSaveZMM_Hi256) },
558 [XSTATE_Hi16_ZMM_BIT] =
559 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
560 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
561 .size = sizeof(XSaveHi16_ZMM) },
563 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
564 .offset = offsetof(X86XSaveArea, pkru_state),
565 .size = sizeof(XSavePKRU) },
568 static uint32_t xsave_area_size(uint64_t mask)
571 uint64_t ret = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader);
573 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
574 const ExtSaveArea *esa = &x86_ext_save_areas[i];
575 if ((mask >> i) & 1) {
576 ret = MAX(ret, esa->offset + esa->size);
582 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
584 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
585 cpu->env.features[FEAT_XSAVE_COMP_LO];
588 const char *get_register_name_32(unsigned int reg)
590 if (reg >= CPU_NB_REGS32) {
593 return x86_reg_info_32[reg].name;
597 * Returns the set of feature flags that are supported and migratable by
598 * QEMU, for a given FeatureWord.
600 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
602 FeatureWordInfo *wi = &feature_word_info[w];
606 for (i = 0; i < 32; i++) {
607 uint32_t f = 1U << i;
609 /* If the feature name is known, it is implicitly considered migratable,
610 * unless it is explicitly set in unmigratable_flags */
611 if ((wi->migratable_flags & f) ||
612 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
619 void host_cpuid(uint32_t function, uint32_t count,
620 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
626 : "=a"(vec[0]), "=b"(vec[1]),
627 "=c"(vec[2]), "=d"(vec[3])
628 : "0"(function), "c"(count) : "cc");
629 #elif defined(__i386__)
630 asm volatile("pusha \n\t"
632 "mov %%eax, 0(%2) \n\t"
633 "mov %%ebx, 4(%2) \n\t"
634 "mov %%ecx, 8(%2) \n\t"
635 "mov %%edx, 12(%2) \n\t"
637 : : "a"(function), "c"(count), "S"(vec)
653 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
655 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
656 * a substring. ex if !NULL points to the first char after a substring,
657 * otherwise the string is assumed to sized by a terminating nul.
658 * Return lexical ordering of *s1:*s2.
660 static int sstrcmp(const char *s1, const char *e1,
661 const char *s2, const char *e2)
664 if (!*s1 || !*s2 || *s1 != *s2)
667 if (s1 == e1 && s2 == e2)
676 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
677 * '|' delimited (possibly empty) strings in which case search for a match
678 * within the alternatives proceeds left to right. Return 0 for success,
679 * non-zero otherwise.
681 static int altcmp(const char *s, const char *e, const char *altstr)
685 for (q = p = altstr; ; ) {
686 while (*p && *p != '|')
688 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
697 /* search featureset for flag *[s..e), if found set corresponding bit in
698 * *pval and return true, otherwise return false
700 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
701 const char **featureset)
707 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
708 if (*ppc && !altcmp(s, e, *ppc)) {
716 static void add_flagname_to_bitmaps(const char *flagname,
717 FeatureWordArray words,
721 for (w = 0; w < FEATURE_WORDS; w++) {
722 FeatureWordInfo *wi = &feature_word_info[w];
723 if (lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
727 if (w == FEATURE_WORDS) {
728 error_setg(errp, "CPU feature %s not found", flagname);
732 /* CPU class name definitions: */
734 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
735 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
737 /* Return type name for a given CPU model name
738 * Caller is responsible for freeing the returned string.
740 static char *x86_cpu_type_name(const char *model_name)
742 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
745 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
750 if (cpu_model == NULL) {
754 typename = x86_cpu_type_name(cpu_model);
755 oc = object_class_by_name(typename);
760 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
762 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
763 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
764 return g_strndup(class_name,
765 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
768 struct X86CPUDefinition {
772 /* vendor is zero-terminated, 12 character ASCII string */
773 char vendor[CPUID_VENDOR_SZ + 1];
777 FeatureWordArray features;
781 static X86CPUDefinition builtin_x86_defs[] = {
785 .vendor = CPUID_VENDOR_AMD,
789 .features[FEAT_1_EDX] =
791 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
793 .features[FEAT_1_ECX] =
794 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
795 .features[FEAT_8000_0001_EDX] =
796 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
797 .features[FEAT_8000_0001_ECX] =
798 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
799 .xlevel = 0x8000000A,
800 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
805 .vendor = CPUID_VENDOR_AMD,
809 /* Missing: CPUID_HT */
810 .features[FEAT_1_EDX] =
812 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
813 CPUID_PSE36 | CPUID_VME,
814 .features[FEAT_1_ECX] =
815 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
817 .features[FEAT_8000_0001_EDX] =
818 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
819 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
820 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
821 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
823 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
824 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
825 .features[FEAT_8000_0001_ECX] =
826 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
827 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
828 /* Missing: CPUID_SVM_LBRV */
829 .features[FEAT_SVM] =
831 .xlevel = 0x8000001A,
832 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
837 .vendor = CPUID_VENDOR_INTEL,
841 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
842 .features[FEAT_1_EDX] =
844 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
845 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
846 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
847 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
848 .features[FEAT_1_ECX] =
849 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
851 .features[FEAT_8000_0001_EDX] =
852 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
853 .features[FEAT_8000_0001_ECX] =
855 .xlevel = 0x80000008,
856 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
861 .vendor = CPUID_VENDOR_INTEL,
865 /* Missing: CPUID_HT */
866 .features[FEAT_1_EDX] =
867 PPRO_FEATURES | CPUID_VME |
868 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
870 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
871 .features[FEAT_1_ECX] =
872 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
873 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
874 .features[FEAT_8000_0001_EDX] =
875 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
876 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
877 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
878 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
879 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
880 .features[FEAT_8000_0001_ECX] =
882 .xlevel = 0x80000008,
883 .model_id = "Common KVM processor"
888 .vendor = CPUID_VENDOR_INTEL,
892 .features[FEAT_1_EDX] =
894 .features[FEAT_1_ECX] =
896 .xlevel = 0x80000004,
897 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
902 .vendor = CPUID_VENDOR_INTEL,
906 .features[FEAT_1_EDX] =
907 PPRO_FEATURES | CPUID_VME |
908 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
909 .features[FEAT_1_ECX] =
911 .features[FEAT_8000_0001_ECX] =
913 .xlevel = 0x80000008,
914 .model_id = "Common 32-bit KVM processor"
919 .vendor = CPUID_VENDOR_INTEL,
923 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
924 .features[FEAT_1_EDX] =
925 PPRO_FEATURES | CPUID_VME |
926 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
928 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
929 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
930 .features[FEAT_1_ECX] =
931 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
932 .features[FEAT_8000_0001_EDX] =
934 .xlevel = 0x80000008,
935 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
940 .vendor = CPUID_VENDOR_INTEL,
944 .features[FEAT_1_EDX] =
951 .vendor = CPUID_VENDOR_INTEL,
955 .features[FEAT_1_EDX] =
962 .vendor = CPUID_VENDOR_INTEL,
966 .features[FEAT_1_EDX] =
973 .vendor = CPUID_VENDOR_INTEL,
977 .features[FEAT_1_EDX] =
984 .vendor = CPUID_VENDOR_AMD,
988 .features[FEAT_1_EDX] =
989 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
991 .features[FEAT_8000_0001_EDX] =
992 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
993 .xlevel = 0x80000008,
994 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
999 .vendor = CPUID_VENDOR_INTEL,
1003 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1004 .features[FEAT_1_EDX] =
1006 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1007 CPUID_ACPI | CPUID_SS,
1008 /* Some CPUs got no CPUID_SEP */
1009 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1011 .features[FEAT_1_ECX] =
1012 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1014 .features[FEAT_8000_0001_EDX] =
1016 .features[FEAT_8000_0001_ECX] =
1018 .xlevel = 0x80000008,
1019 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1024 .vendor = CPUID_VENDOR_INTEL,
1028 .features[FEAT_1_EDX] =
1029 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1030 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1031 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1032 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1033 CPUID_DE | CPUID_FP87,
1034 .features[FEAT_1_ECX] =
1035 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1036 .features[FEAT_8000_0001_EDX] =
1037 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1038 .features[FEAT_8000_0001_ECX] =
1040 .xlevel = 0x80000008,
1041 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1046 .vendor = CPUID_VENDOR_INTEL,
1050 .features[FEAT_1_EDX] =
1051 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1052 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1053 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1054 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1055 CPUID_DE | CPUID_FP87,
1056 .features[FEAT_1_ECX] =
1057 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1059 .features[FEAT_8000_0001_EDX] =
1060 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1061 .features[FEAT_8000_0001_ECX] =
1063 .xlevel = 0x80000008,
1064 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1069 .vendor = CPUID_VENDOR_INTEL,
1073 .features[FEAT_1_EDX] =
1074 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1075 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1076 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1077 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1078 CPUID_DE | CPUID_FP87,
1079 .features[FEAT_1_ECX] =
1080 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1081 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1082 .features[FEAT_8000_0001_EDX] =
1083 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1084 .features[FEAT_8000_0001_ECX] =
1086 .xlevel = 0x80000008,
1087 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1092 .vendor = CPUID_VENDOR_INTEL,
1096 .features[FEAT_1_EDX] =
1097 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1098 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1099 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1100 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1101 CPUID_DE | CPUID_FP87,
1102 .features[FEAT_1_ECX] =
1103 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1104 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1105 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1106 .features[FEAT_8000_0001_EDX] =
1107 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1108 .features[FEAT_8000_0001_ECX] =
1110 .features[FEAT_6_EAX] =
1112 .xlevel = 0x80000008,
1113 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1116 .name = "SandyBridge",
1118 .vendor = CPUID_VENDOR_INTEL,
1122 .features[FEAT_1_EDX] =
1123 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1124 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1125 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1126 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1127 CPUID_DE | CPUID_FP87,
1128 .features[FEAT_1_ECX] =
1129 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1130 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1131 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1132 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1134 .features[FEAT_8000_0001_EDX] =
1135 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1137 .features[FEAT_8000_0001_ECX] =
1139 .features[FEAT_XSAVE] =
1140 CPUID_XSAVE_XSAVEOPT,
1141 .features[FEAT_6_EAX] =
1143 .xlevel = 0x80000008,
1144 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1147 .name = "IvyBridge",
1149 .vendor = CPUID_VENDOR_INTEL,
1153 .features[FEAT_1_EDX] =
1154 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1155 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1156 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1157 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1158 CPUID_DE | CPUID_FP87,
1159 .features[FEAT_1_ECX] =
1160 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1161 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1162 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1163 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1164 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1165 .features[FEAT_7_0_EBX] =
1166 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1168 .features[FEAT_8000_0001_EDX] =
1169 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1171 .features[FEAT_8000_0001_ECX] =
1173 .features[FEAT_XSAVE] =
1174 CPUID_XSAVE_XSAVEOPT,
1175 .features[FEAT_6_EAX] =
1177 .xlevel = 0x80000008,
1178 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1181 .name = "Haswell-noTSX",
1183 .vendor = CPUID_VENDOR_INTEL,
1187 .features[FEAT_1_EDX] =
1188 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1189 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1190 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1191 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1192 CPUID_DE | CPUID_FP87,
1193 .features[FEAT_1_ECX] =
1194 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1195 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1196 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1197 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1198 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1199 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1200 .features[FEAT_8000_0001_EDX] =
1201 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1203 .features[FEAT_8000_0001_ECX] =
1204 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1205 .features[FEAT_7_0_EBX] =
1206 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1207 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1208 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1209 .features[FEAT_XSAVE] =
1210 CPUID_XSAVE_XSAVEOPT,
1211 .features[FEAT_6_EAX] =
1213 .xlevel = 0x80000008,
1214 .model_id = "Intel Core Processor (Haswell, no TSX)",
1218 .vendor = CPUID_VENDOR_INTEL,
1222 .features[FEAT_1_EDX] =
1223 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1224 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1225 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1226 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1227 CPUID_DE | CPUID_FP87,
1228 .features[FEAT_1_ECX] =
1229 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1230 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1231 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1232 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1233 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1234 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1235 .features[FEAT_8000_0001_EDX] =
1236 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1238 .features[FEAT_8000_0001_ECX] =
1239 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1240 .features[FEAT_7_0_EBX] =
1241 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1242 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1243 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1245 .features[FEAT_XSAVE] =
1246 CPUID_XSAVE_XSAVEOPT,
1247 .features[FEAT_6_EAX] =
1249 .xlevel = 0x80000008,
1250 .model_id = "Intel Core Processor (Haswell)",
1253 .name = "Broadwell-noTSX",
1255 .vendor = CPUID_VENDOR_INTEL,
1259 .features[FEAT_1_EDX] =
1260 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1261 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1262 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1263 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1264 CPUID_DE | CPUID_FP87,
1265 .features[FEAT_1_ECX] =
1266 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1267 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1268 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1269 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1270 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1271 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1272 .features[FEAT_8000_0001_EDX] =
1273 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1275 .features[FEAT_8000_0001_ECX] =
1276 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1277 .features[FEAT_7_0_EBX] =
1278 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1279 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1280 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1281 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1283 .features[FEAT_XSAVE] =
1284 CPUID_XSAVE_XSAVEOPT,
1285 .features[FEAT_6_EAX] =
1287 .xlevel = 0x80000008,
1288 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1291 .name = "Broadwell",
1293 .vendor = CPUID_VENDOR_INTEL,
1297 .features[FEAT_1_EDX] =
1298 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1299 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1300 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1301 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1302 CPUID_DE | CPUID_FP87,
1303 .features[FEAT_1_ECX] =
1304 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1305 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1306 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1307 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1308 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1309 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1310 .features[FEAT_8000_0001_EDX] =
1311 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1313 .features[FEAT_8000_0001_ECX] =
1314 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1315 .features[FEAT_7_0_EBX] =
1316 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1317 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1318 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1319 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1321 .features[FEAT_XSAVE] =
1322 CPUID_XSAVE_XSAVEOPT,
1323 .features[FEAT_6_EAX] =
1325 .xlevel = 0x80000008,
1326 .model_id = "Intel Core Processor (Broadwell)",
1329 .name = "Skylake-Client",
1331 .vendor = CPUID_VENDOR_INTEL,
1335 .features[FEAT_1_EDX] =
1336 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1337 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1338 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1339 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1340 CPUID_DE | CPUID_FP87,
1341 .features[FEAT_1_ECX] =
1342 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1343 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1344 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1345 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1346 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1347 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1348 .features[FEAT_8000_0001_EDX] =
1349 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1351 .features[FEAT_8000_0001_ECX] =
1352 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1353 .features[FEAT_7_0_EBX] =
1354 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1355 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1356 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1357 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1358 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1359 /* Missing: XSAVES (not supported by some Linux versions,
1360 * including v4.1 to v4.6).
1361 * KVM doesn't yet expose any XSAVES state save component,
1362 * and the only one defined in Skylake (processor tracing)
1363 * probably will block migration anyway.
1365 .features[FEAT_XSAVE] =
1366 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1367 CPUID_XSAVE_XGETBV1,
1368 .features[FEAT_6_EAX] =
1370 .xlevel = 0x80000008,
1371 .model_id = "Intel Core Processor (Skylake)",
1374 .name = "Opteron_G1",
1376 .vendor = CPUID_VENDOR_AMD,
1380 .features[FEAT_1_EDX] =
1381 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1382 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1383 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1384 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1385 CPUID_DE | CPUID_FP87,
1386 .features[FEAT_1_ECX] =
1388 .features[FEAT_8000_0001_EDX] =
1389 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1390 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1391 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1392 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1393 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1394 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1395 .xlevel = 0x80000008,
1396 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1399 .name = "Opteron_G2",
1401 .vendor = CPUID_VENDOR_AMD,
1405 .features[FEAT_1_EDX] =
1406 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1407 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1408 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1409 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1410 CPUID_DE | CPUID_FP87,
1411 .features[FEAT_1_ECX] =
1412 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1413 /* Missing: CPUID_EXT2_RDTSCP */
1414 .features[FEAT_8000_0001_EDX] =
1415 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1416 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1417 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1418 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1419 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1420 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1421 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1422 .features[FEAT_8000_0001_ECX] =
1423 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1424 .xlevel = 0x80000008,
1425 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1428 .name = "Opteron_G3",
1430 .vendor = CPUID_VENDOR_AMD,
1434 .features[FEAT_1_EDX] =
1435 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1436 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1437 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1438 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1439 CPUID_DE | CPUID_FP87,
1440 .features[FEAT_1_ECX] =
1441 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1443 /* Missing: CPUID_EXT2_RDTSCP */
1444 .features[FEAT_8000_0001_EDX] =
1445 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1446 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1447 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1448 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1449 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1450 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1451 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1452 .features[FEAT_8000_0001_ECX] =
1453 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1454 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1455 .xlevel = 0x80000008,
1456 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1459 .name = "Opteron_G4",
1461 .vendor = CPUID_VENDOR_AMD,
1465 .features[FEAT_1_EDX] =
1466 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1467 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1468 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1469 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1470 CPUID_DE | CPUID_FP87,
1471 .features[FEAT_1_ECX] =
1472 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1473 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1474 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1476 /* Missing: CPUID_EXT2_RDTSCP */
1477 .features[FEAT_8000_0001_EDX] =
1479 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1480 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1481 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1482 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1483 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1484 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1485 .features[FEAT_8000_0001_ECX] =
1486 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1487 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1488 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1491 .xlevel = 0x8000001A,
1492 .model_id = "AMD Opteron 62xx class CPU",
1495 .name = "Opteron_G5",
1497 .vendor = CPUID_VENDOR_AMD,
1501 .features[FEAT_1_EDX] =
1502 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1503 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1504 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1505 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1506 CPUID_DE | CPUID_FP87,
1507 .features[FEAT_1_ECX] =
1508 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1509 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1510 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1511 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1512 /* Missing: CPUID_EXT2_RDTSCP */
1513 .features[FEAT_8000_0001_EDX] =
1515 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1516 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1517 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1518 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1519 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1520 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1521 .features[FEAT_8000_0001_ECX] =
1522 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1523 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1524 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1527 .xlevel = 0x8000001A,
1528 .model_id = "AMD Opteron 63xx class CPU",
1532 typedef struct PropValue {
1533 const char *prop, *value;
1536 /* KVM-specific features that are automatically added/removed
1537 * from all CPU models when KVM is enabled.
1539 static PropValue kvm_default_props[] = {
1540 { "kvmclock", "on" },
1541 { "kvm-nopiodelay", "on" },
1542 { "kvm-asyncpf", "on" },
1543 { "kvm-steal-time", "on" },
1544 { "kvm-pv-eoi", "on" },
1545 { "kvmclock-stable-bit", "on" },
1548 { "monitor", "off" },
1553 /* TCG-specific defaults that override all CPU models when using TCG
1555 static PropValue tcg_default_props[] = {
1561 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1564 for (pv = kvm_default_props; pv->prop; pv++) {
1565 if (!strcmp(pv->prop, prop)) {
1571 /* It is valid to call this function only for properties that
1572 * are already present in the kvm_default_props table.
1577 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1578 bool migratable_only);
1582 static bool lmce_supported(void)
1586 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1590 return !!(mce_cap & MCG_LMCE_P);
1593 static int cpu_x86_fill_model_id(char *str)
1595 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1598 for (i = 0; i < 3; i++) {
1599 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1600 memcpy(str + i * 16 + 0, &eax, 4);
1601 memcpy(str + i * 16 + 4, &ebx, 4);
1602 memcpy(str + i * 16 + 8, &ecx, 4);
1603 memcpy(str + i * 16 + 12, &edx, 4);
1608 static X86CPUDefinition host_cpudef;
1610 static Property host_x86_cpu_properties[] = {
1611 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1612 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1613 DEFINE_PROP_END_OF_LIST()
1616 /* class_init for the "host" CPU model
1618 * This function may be called before KVM is initialized.
1620 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1622 DeviceClass *dc = DEVICE_CLASS(oc);
1623 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1624 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1626 xcc->kvm_required = true;
1628 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1629 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1631 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1632 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1633 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1634 host_cpudef.stepping = eax & 0x0F;
1636 cpu_x86_fill_model_id(host_cpudef.model_id);
1638 xcc->cpu_def = &host_cpudef;
1639 xcc->model_description =
1640 "KVM processor with all supported host features "
1641 "(only available in KVM mode)";
1643 /* level, xlevel, xlevel2, and the feature words are initialized on
1644 * instance_init, because they require KVM to be initialized.
1647 dc->props = host_x86_cpu_properties;
1648 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1649 dc->cannot_destroy_with_object_finalize_yet = true;
1652 static void host_x86_cpu_initfn(Object *obj)
1654 X86CPU *cpu = X86_CPU(obj);
1655 CPUX86State *env = &cpu->env;
1656 KVMState *s = kvm_state;
1658 /* We can't fill the features array here because we don't know yet if
1659 * "migratable" is true or false.
1661 cpu->host_features = true;
1663 /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
1664 if (kvm_enabled()) {
1665 env->cpuid_min_level =
1666 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1667 env->cpuid_min_xlevel =
1668 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1669 env->cpuid_min_xlevel2 =
1670 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1672 if (lmce_supported()) {
1673 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1677 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1680 static const TypeInfo host_x86_cpu_type_info = {
1681 .name = X86_CPU_TYPE_NAME("host"),
1682 .parent = TYPE_X86_CPU,
1683 .instance_init = host_x86_cpu_initfn,
1684 .class_init = host_x86_cpu_class_init,
1689 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1691 FeatureWordInfo *f = &feature_word_info[w];
1694 for (i = 0; i < 32; ++i) {
1695 if ((1UL << i) & mask) {
1696 const char *reg = get_register_name_32(f->cpuid_reg);
1698 fprintf(stderr, "warning: %s doesn't support requested feature: "
1699 "CPUID.%02XH:%s%s%s [bit %d]\n",
1700 kvm_enabled() ? "host" : "TCG",
1702 f->feat_names[i] ? "." : "",
1703 f->feat_names[i] ? f->feat_names[i] : "", i);
1708 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1709 const char *name, void *opaque,
1712 X86CPU *cpu = X86_CPU(obj);
1713 CPUX86State *env = &cpu->env;
1716 value = (env->cpuid_version >> 8) & 0xf;
1718 value += (env->cpuid_version >> 20) & 0xff;
1720 visit_type_int(v, name, &value, errp);
1723 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1724 const char *name, void *opaque,
1727 X86CPU *cpu = X86_CPU(obj);
1728 CPUX86State *env = &cpu->env;
1729 const int64_t min = 0;
1730 const int64_t max = 0xff + 0xf;
1731 Error *local_err = NULL;
1734 visit_type_int(v, name, &value, &local_err);
1736 error_propagate(errp, local_err);
1739 if (value < min || value > max) {
1740 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1741 name ? name : "null", value, min, max);
1745 env->cpuid_version &= ~0xff00f00;
1747 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1749 env->cpuid_version |= value << 8;
1753 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1754 const char *name, void *opaque,
1757 X86CPU *cpu = X86_CPU(obj);
1758 CPUX86State *env = &cpu->env;
1761 value = (env->cpuid_version >> 4) & 0xf;
1762 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1763 visit_type_int(v, name, &value, errp);
1766 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1767 const char *name, void *opaque,
1770 X86CPU *cpu = X86_CPU(obj);
1771 CPUX86State *env = &cpu->env;
1772 const int64_t min = 0;
1773 const int64_t max = 0xff;
1774 Error *local_err = NULL;
1777 visit_type_int(v, name, &value, &local_err);
1779 error_propagate(errp, local_err);
1782 if (value < min || value > max) {
1783 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1784 name ? name : "null", value, min, max);
1788 env->cpuid_version &= ~0xf00f0;
1789 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1792 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1793 const char *name, void *opaque,
1796 X86CPU *cpu = X86_CPU(obj);
1797 CPUX86State *env = &cpu->env;
1800 value = env->cpuid_version & 0xf;
1801 visit_type_int(v, name, &value, errp);
1804 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1805 const char *name, void *opaque,
1808 X86CPU *cpu = X86_CPU(obj);
1809 CPUX86State *env = &cpu->env;
1810 const int64_t min = 0;
1811 const int64_t max = 0xf;
1812 Error *local_err = NULL;
1815 visit_type_int(v, name, &value, &local_err);
1817 error_propagate(errp, local_err);
1820 if (value < min || value > max) {
1821 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1822 name ? name : "null", value, min, max);
1826 env->cpuid_version &= ~0xf;
1827 env->cpuid_version |= value & 0xf;
1830 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1832 X86CPU *cpu = X86_CPU(obj);
1833 CPUX86State *env = &cpu->env;
1836 value = g_malloc(CPUID_VENDOR_SZ + 1);
1837 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1838 env->cpuid_vendor3);
1842 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1845 X86CPU *cpu = X86_CPU(obj);
1846 CPUX86State *env = &cpu->env;
1849 if (strlen(value) != CPUID_VENDOR_SZ) {
1850 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1854 env->cpuid_vendor1 = 0;
1855 env->cpuid_vendor2 = 0;
1856 env->cpuid_vendor3 = 0;
1857 for (i = 0; i < 4; i++) {
1858 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1859 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1860 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1864 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1866 X86CPU *cpu = X86_CPU(obj);
1867 CPUX86State *env = &cpu->env;
1871 value = g_malloc(48 + 1);
1872 for (i = 0; i < 48; i++) {
1873 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1879 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1882 X86CPU *cpu = X86_CPU(obj);
1883 CPUX86State *env = &cpu->env;
1886 if (model_id == NULL) {
1889 len = strlen(model_id);
1890 memset(env->cpuid_model, 0, 48);
1891 for (i = 0; i < 48; i++) {
1895 c = (uint8_t)model_id[i];
1897 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1901 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1902 void *opaque, Error **errp)
1904 X86CPU *cpu = X86_CPU(obj);
1907 value = cpu->env.tsc_khz * 1000;
1908 visit_type_int(v, name, &value, errp);
1911 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1912 void *opaque, Error **errp)
1914 X86CPU *cpu = X86_CPU(obj);
1915 const int64_t min = 0;
1916 const int64_t max = INT64_MAX;
1917 Error *local_err = NULL;
1920 visit_type_int(v, name, &value, &local_err);
1922 error_propagate(errp, local_err);
1925 if (value < min || value > max) {
1926 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1927 name ? name : "null", value, min, max);
1931 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1934 /* Generic getter for "feature-words" and "filtered-features" properties */
1935 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1936 const char *name, void *opaque,
1939 uint32_t *array = (uint32_t *)opaque;
1941 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1942 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1943 X86CPUFeatureWordInfoList *list = NULL;
1945 for (w = 0; w < FEATURE_WORDS; w++) {
1946 FeatureWordInfo *wi = &feature_word_info[w];
1947 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1948 qwi->cpuid_input_eax = wi->cpuid_eax;
1949 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1950 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1951 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1952 qwi->features = array[w];
1954 /* List will be in reverse order, but order shouldn't matter */
1955 list_entries[w].next = list;
1956 list_entries[w].value = &word_infos[w];
1957 list = &list_entries[w];
1960 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1963 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1964 void *opaque, Error **errp)
1966 X86CPU *cpu = X86_CPU(obj);
1967 int64_t value = cpu->hyperv_spinlock_attempts;
1969 visit_type_int(v, name, &value, errp);
1972 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1973 void *opaque, Error **errp)
1975 const int64_t min = 0xFFF;
1976 const int64_t max = UINT_MAX;
1977 X86CPU *cpu = X86_CPU(obj);
1981 visit_type_int(v, name, &value, &err);
1983 error_propagate(errp, err);
1987 if (value < min || value > max) {
1988 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1989 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1990 object_get_typename(obj), name ? name : "null",
1994 cpu->hyperv_spinlock_attempts = value;
1997 static PropertyInfo qdev_prop_spinlocks = {
1999 .get = x86_get_hv_spinlocks,
2000 .set = x86_set_hv_spinlocks,
2003 /* Convert all '_' in a feature string option name to '-', to make feature
2004 * name conform to QOM property naming rule, which uses '-' instead of '_'.
2006 static inline void feat2prop(char *s)
2008 while ((s = strchr(s, '_'))) {
2013 /* Compatibily hack to maintain legacy +-feat semantic,
2014 * where +-feat overwrites any feature set by
2015 * feat=on|feat even if the later is parsed after +-feat
2016 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
2018 static FeatureWordArray plus_features = { 0 };
2019 static FeatureWordArray minus_features = { 0 };
2021 /* Parse "+feature,-feature,feature=foo" CPU feature string
2023 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2026 char *featurestr; /* Single 'key=value" string being parsed */
2027 Error *local_err = NULL;
2028 static bool cpu_globals_initialized;
2030 if (cpu_globals_initialized) {
2033 cpu_globals_initialized = true;
2039 for (featurestr = strtok(features, ",");
2040 featurestr && !local_err;
2041 featurestr = strtok(NULL, ",")) {
2043 const char *val = NULL;
2046 GlobalProperty *prop;
2048 /* Compatibility syntax: */
2049 if (featurestr[0] == '+') {
2050 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
2052 } else if (featurestr[0] == '-') {
2053 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
2057 eq = strchr(featurestr, '=');
2065 feat2prop(featurestr);
2069 if (!strcmp(name, "tsc-freq")) {
2073 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
2074 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
2075 if (tsc_freq < 0 || *err) {
2076 error_setg(errp, "bad numerical value %s", val);
2079 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2081 name = "tsc-frequency";
2084 prop = g_new0(typeof(*prop), 1);
2085 prop->driver = typename;
2086 prop->property = g_strdup(name);
2087 prop->value = g_strdup(val);
2088 prop->errp = &error_fatal;
2089 qdev_prop_register_global(prop);
2093 error_propagate(errp, local_err);
2097 /* Print all cpuid feature names in featureset
2099 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2104 for (bit = 0; bit < 32; bit++) {
2105 if (featureset[bit]) {
2106 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2112 /* Sort alphabetically by type name, listing kvm_required models last. */
2113 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2115 ObjectClass *class_a = (ObjectClass *)a;
2116 ObjectClass *class_b = (ObjectClass *)b;
2117 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2118 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2119 const char *name_a, *name_b;
2121 if (cc_a->kvm_required != cc_b->kvm_required) {
2122 /* kvm_required items go last */
2123 return cc_a->kvm_required ? 1 : -1;
2125 name_a = object_class_get_name(class_a);
2126 name_b = object_class_get_name(class_b);
2127 return strcmp(name_a, name_b);
2131 static GSList *get_sorted_cpu_model_list(void)
2133 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2134 list = g_slist_sort(list, x86_cpu_list_compare);
2138 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2140 ObjectClass *oc = data;
2141 X86CPUClass *cc = X86_CPU_CLASS(oc);
2142 CPUListState *s = user_data;
2143 char *name = x86_cpu_class_get_model_name(cc);
2144 const char *desc = cc->model_description;
2146 desc = cc->cpu_def->model_id;
2149 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2154 /* list available CPU models and flags */
2155 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2160 .cpu_fprintf = cpu_fprintf,
2164 (*cpu_fprintf)(f, "Available CPUs:\n");
2165 list = get_sorted_cpu_model_list();
2166 g_slist_foreach(list, x86_cpu_list_entry, &s);
2169 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2170 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2171 FeatureWordInfo *fw = &feature_word_info[i];
2173 (*cpu_fprintf)(f, " ");
2174 listflags(f, cpu_fprintf, fw->feat_names);
2175 (*cpu_fprintf)(f, "\n");
2179 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2181 ObjectClass *oc = data;
2182 X86CPUClass *cc = X86_CPU_CLASS(oc);
2183 CpuDefinitionInfoList **cpu_list = user_data;
2184 CpuDefinitionInfoList *entry;
2185 CpuDefinitionInfo *info;
2187 info = g_malloc0(sizeof(*info));
2188 info->name = x86_cpu_class_get_model_name(cc);
2190 entry = g_malloc0(sizeof(*entry));
2191 entry->value = info;
2192 entry->next = *cpu_list;
2196 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2198 CpuDefinitionInfoList *cpu_list = NULL;
2199 GSList *list = get_sorted_cpu_model_list();
2200 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2205 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2206 bool migratable_only)
2208 FeatureWordInfo *wi = &feature_word_info[w];
2211 if (kvm_enabled()) {
2212 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2215 } else if (tcg_enabled()) {
2216 r = wi->tcg_features;
2220 if (migratable_only) {
2221 r &= x86_cpu_get_migratable_flags(w);
2227 * Filters CPU feature words based on host availability of each feature.
2229 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2231 static int x86_cpu_filter_features(X86CPU *cpu)
2233 CPUX86State *env = &cpu->env;
2237 for (w = 0; w < FEATURE_WORDS; w++) {
2238 uint32_t host_feat =
2239 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2240 uint32_t requested_features = env->features[w];
2241 env->features[w] &= host_feat;
2242 cpu->filtered_features[w] = requested_features & ~env->features[w];
2243 if (cpu->filtered_features[w]) {
2244 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2245 report_unavailable_features(w, cpu->filtered_features[w]);
2254 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2257 for (pv = props; pv->prop; pv++) {
2261 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2266 /* Load data from X86CPUDefinition
2268 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2270 CPUX86State *env = &cpu->env;
2272 char host_vendor[CPUID_VENDOR_SZ + 1];
2275 /* CPU models only set _minimum_ values for level/xlevel: */
2276 object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
2277 object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2279 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2280 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2281 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2282 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2283 for (w = 0; w < FEATURE_WORDS; w++) {
2284 env->features[w] = def->features[w];
2287 /* Special cases not set in the X86CPUDefinition structs: */
2288 if (kvm_enabled()) {
2289 if (!kvm_irqchip_in_kernel()) {
2290 x86_cpu_change_kvm_default("x2apic", "off");
2293 x86_cpu_apply_props(cpu, kvm_default_props);
2294 } else if (tcg_enabled()) {
2295 x86_cpu_apply_props(cpu, tcg_default_props);
2298 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2300 /* sysenter isn't supported in compatibility mode on AMD,
2301 * syscall isn't supported in compatibility mode on Intel.
2302 * Normally we advertise the actual CPU vendor, but you can
2303 * override this using the 'vendor' property if you want to use
2304 * KVM's sysenter/syscall emulation in compatibility mode and
2305 * when doing cross vendor migration
2307 vendor = def->vendor;
2308 if (kvm_enabled()) {
2309 uint32_t ebx = 0, ecx = 0, edx = 0;
2310 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2311 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2312 vendor = host_vendor;
2315 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2319 X86CPU *cpu_x86_init(const char *cpu_model)
2321 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2324 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2326 X86CPUDefinition *cpudef = data;
2327 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2329 xcc->cpu_def = cpudef;
2332 static void x86_register_cpudef_type(X86CPUDefinition *def)
2334 char *typename = x86_cpu_type_name(def->name);
2337 .parent = TYPE_X86_CPU,
2338 .class_init = x86_cpu_cpudef_class_init,
2346 #if !defined(CONFIG_USER_ONLY)
2348 void cpu_clear_apic_feature(CPUX86State *env)
2350 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2353 #endif /* !CONFIG_USER_ONLY */
2355 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2356 uint32_t *eax, uint32_t *ebx,
2357 uint32_t *ecx, uint32_t *edx)
2359 X86CPU *cpu = x86_env_get_cpu(env);
2360 CPUState *cs = CPU(cpu);
2361 uint32_t pkg_offset;
2363 /* test if maximum index reached */
2364 if (index & 0x80000000) {
2365 if (index > env->cpuid_xlevel) {
2366 if (env->cpuid_xlevel2 > 0) {
2367 /* Handle the Centaur's CPUID instruction. */
2368 if (index > env->cpuid_xlevel2) {
2369 index = env->cpuid_xlevel2;
2370 } else if (index < 0xC0000000) {
2371 index = env->cpuid_xlevel;
2374 /* Intel documentation states that invalid EAX input will
2375 * return the same information as EAX=cpuid_level
2376 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2378 index = env->cpuid_level;
2382 if (index > env->cpuid_level)
2383 index = env->cpuid_level;
2388 *eax = env->cpuid_level;
2389 *ebx = env->cpuid_vendor1;
2390 *edx = env->cpuid_vendor2;
2391 *ecx = env->cpuid_vendor3;
2394 *eax = env->cpuid_version;
2395 *ebx = (cpu->apic_id << 24) |
2396 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2397 *ecx = env->features[FEAT_1_ECX];
2398 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2399 *ecx |= CPUID_EXT_OSXSAVE;
2401 *edx = env->features[FEAT_1_EDX];
2402 if (cs->nr_cores * cs->nr_threads > 1) {
2403 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2408 /* cache info: needed for Pentium Pro compatibility */
2409 if (cpu->cache_info_passthrough) {
2410 host_cpuid(index, 0, eax, ebx, ecx, edx);
2413 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2415 if (!cpu->enable_l3_cache) {
2418 *ecx = L3_N_DESCRIPTOR;
2420 *edx = (L1D_DESCRIPTOR << 16) | \
2421 (L1I_DESCRIPTOR << 8) | \
2425 /* cache info: needed for Core compatibility */
2426 if (cpu->cache_info_passthrough) {
2427 host_cpuid(index, count, eax, ebx, ecx, edx);
2428 *eax &= ~0xFC000000;
2432 case 0: /* L1 dcache info */
2433 *eax |= CPUID_4_TYPE_DCACHE | \
2434 CPUID_4_LEVEL(1) | \
2435 CPUID_4_SELF_INIT_LEVEL;
2436 *ebx = (L1D_LINE_SIZE - 1) | \
2437 ((L1D_PARTITIONS - 1) << 12) | \
2438 ((L1D_ASSOCIATIVITY - 1) << 22);
2439 *ecx = L1D_SETS - 1;
2440 *edx = CPUID_4_NO_INVD_SHARING;
2442 case 1: /* L1 icache info */
2443 *eax |= CPUID_4_TYPE_ICACHE | \
2444 CPUID_4_LEVEL(1) | \
2445 CPUID_4_SELF_INIT_LEVEL;
2446 *ebx = (L1I_LINE_SIZE - 1) | \
2447 ((L1I_PARTITIONS - 1) << 12) | \
2448 ((L1I_ASSOCIATIVITY - 1) << 22);
2449 *ecx = L1I_SETS - 1;
2450 *edx = CPUID_4_NO_INVD_SHARING;
2452 case 2: /* L2 cache info */
2453 *eax |= CPUID_4_TYPE_UNIFIED | \
2454 CPUID_4_LEVEL(2) | \
2455 CPUID_4_SELF_INIT_LEVEL;
2456 if (cs->nr_threads > 1) {
2457 *eax |= (cs->nr_threads - 1) << 14;
2459 *ebx = (L2_LINE_SIZE - 1) | \
2460 ((L2_PARTITIONS - 1) << 12) | \
2461 ((L2_ASSOCIATIVITY - 1) << 22);
2463 *edx = CPUID_4_NO_INVD_SHARING;
2465 case 3: /* L3 cache info */
2466 if (!cpu->enable_l3_cache) {
2473 *eax |= CPUID_4_TYPE_UNIFIED | \
2474 CPUID_4_LEVEL(3) | \
2475 CPUID_4_SELF_INIT_LEVEL;
2476 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2477 *eax |= ((1 << pkg_offset) - 1) << 14;
2478 *ebx = (L3_N_LINE_SIZE - 1) | \
2479 ((L3_N_PARTITIONS - 1) << 12) | \
2480 ((L3_N_ASSOCIATIVITY - 1) << 22);
2481 *ecx = L3_N_SETS - 1;
2482 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2484 default: /* end of info */
2493 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2494 if ((*eax & 31) && cs->nr_cores > 1) {
2495 *eax |= (cs->nr_cores - 1) << 26;
2499 /* mwait info: needed for Core compatibility */
2500 *eax = 0; /* Smallest monitor-line size in bytes */
2501 *ebx = 0; /* Largest monitor-line size in bytes */
2502 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2506 /* Thermal and Power Leaf */
2507 *eax = env->features[FEAT_6_EAX];
2513 /* Structured Extended Feature Flags Enumeration Leaf */
2515 *eax = 0; /* Maximum ECX value for sub-leaves */
2516 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2517 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2518 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2519 *ecx |= CPUID_7_0_ECX_OSPKE;
2521 *edx = 0; /* Reserved */
2530 /* Direct Cache Access Information Leaf */
2531 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2537 /* Architectural Performance Monitoring Leaf */
2538 if (kvm_enabled() && cpu->enable_pmu) {
2539 KVMState *s = cs->kvm_state;
2541 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2542 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2543 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2544 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2553 /* Extended Topology Enumeration Leaf */
2554 if (!cpu->enable_cpuid_0xb) {
2555 *eax = *ebx = *ecx = *edx = 0;
2559 *ecx = count & 0xff;
2560 *edx = cpu->apic_id;
2564 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2565 *ebx = cs->nr_threads;
2566 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2569 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2570 *ebx = cs->nr_cores * cs->nr_threads;
2571 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2576 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2579 assert(!(*eax & ~0x1f));
2580 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2583 /* Processor Extended State */
2588 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2593 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
2594 *eax = env->features[FEAT_XSAVE_COMP_LO];
2595 *edx = env->features[FEAT_XSAVE_COMP_HI];
2597 } else if (count == 1) {
2598 *eax = env->features[FEAT_XSAVE];
2599 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2600 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
2601 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2609 *eax = env->cpuid_xlevel;
2610 *ebx = env->cpuid_vendor1;
2611 *edx = env->cpuid_vendor2;
2612 *ecx = env->cpuid_vendor3;
2615 *eax = env->cpuid_version;
2617 *ecx = env->features[FEAT_8000_0001_ECX];
2618 *edx = env->features[FEAT_8000_0001_EDX];
2620 /* The Linux kernel checks for the CMPLegacy bit and
2621 * discards multiple thread information if it is set.
2622 * So don't set it here for Intel to make Linux guests happy.
2624 if (cs->nr_cores * cs->nr_threads > 1) {
2625 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2626 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2627 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2628 *ecx |= 1 << 1; /* CmpLegacy bit */
2635 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2636 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2637 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2638 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2641 /* cache info (L1 cache) */
2642 if (cpu->cache_info_passthrough) {
2643 host_cpuid(index, 0, eax, ebx, ecx, edx);
2646 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2647 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2648 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2649 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2650 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2651 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2652 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2653 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2656 /* cache info (L2 cache) */
2657 if (cpu->cache_info_passthrough) {
2658 host_cpuid(index, 0, eax, ebx, ecx, edx);
2661 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2662 (L2_DTLB_2M_ENTRIES << 16) | \
2663 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2664 (L2_ITLB_2M_ENTRIES);
2665 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2666 (L2_DTLB_4K_ENTRIES << 16) | \
2667 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2668 (L2_ITLB_4K_ENTRIES);
2669 *ecx = (L2_SIZE_KB_AMD << 16) | \
2670 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2671 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2672 if (!cpu->enable_l3_cache) {
2673 *edx = ((L3_SIZE_KB / 512) << 18) | \
2674 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2675 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2677 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
2678 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
2679 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
2686 *edx = env->features[FEAT_8000_0007_EDX];
2689 /* virtual & phys address size in low 2 bytes. */
2690 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2691 /* 64 bit processor, 48 bits virtual, configurable
2694 *eax = 0x00003000 + cpu->phys_bits;
2696 *eax = cpu->phys_bits;
2701 if (cs->nr_cores * cs->nr_threads > 1) {
2702 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2706 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2707 *eax = 0x00000001; /* SVM Revision */
2708 *ebx = 0x00000010; /* nr of ASIDs */
2710 *edx = env->features[FEAT_SVM]; /* optional features */
2719 *eax = env->cpuid_xlevel2;
2725 /* Support for VIA CPU's CPUID instruction */
2726 *eax = env->cpuid_version;
2729 *edx = env->features[FEAT_C000_0001_EDX];
2734 /* Reserved for the future, and now filled with zero */
2741 /* reserved values: zero */
2750 /* CPUClass::reset() */
2751 static void x86_cpu_reset(CPUState *s)
2753 X86CPU *cpu = X86_CPU(s);
2754 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2755 CPUX86State *env = &cpu->env;
2760 xcc->parent_reset(s);
2762 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
2766 env->old_exception = -1;
2768 /* init to reset state */
2770 env->hflags2 |= HF2_GIF_MASK;
2772 cpu_x86_update_cr0(env, 0x60000010);
2773 env->a20_mask = ~0x0;
2774 env->smbase = 0x30000;
2776 env->idt.limit = 0xffff;
2777 env->gdt.limit = 0xffff;
2778 env->ldt.limit = 0xffff;
2779 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2780 env->tr.limit = 0xffff;
2781 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2783 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2784 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2785 DESC_R_MASK | DESC_A_MASK);
2786 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2787 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2789 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2790 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2792 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2793 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2795 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2796 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2798 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2799 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2803 env->regs[R_EDX] = env->cpuid_version;
2808 for (i = 0; i < 8; i++) {
2811 cpu_set_fpuc(env, 0x37f);
2813 env->mxcsr = 0x1f80;
2814 /* All units are in INIT state. */
2817 env->pat = 0x0007040600070406ULL;
2818 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2820 memset(env->dr, 0, sizeof(env->dr));
2821 env->dr[6] = DR6_FIXED_1;
2822 env->dr[7] = DR7_FIXED_1;
2823 cpu_breakpoint_remove_all(s, BP_CPU);
2824 cpu_watchpoint_remove_all(s, BP_CPU);
2827 xcr0 = XSTATE_FP_MASK;
2829 #ifdef CONFIG_USER_ONLY
2830 /* Enable all the features for user-mode. */
2831 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2832 xcr0 |= XSTATE_SSE_MASK;
2834 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2835 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2836 if (env->features[esa->feature] & esa->bits) {
2841 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2842 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2844 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2845 cr4 |= CR4_FSGSBASE_MASK;
2850 cpu_x86_update_cr4(env, cr4);
2853 * SDM 11.11.5 requires:
2854 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2855 * - IA32_MTRR_PHYSMASKn.V = 0
2856 * All other bits are undefined. For simplification, zero it all.
2858 env->mtrr_deftype = 0;
2859 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2860 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2862 #if !defined(CONFIG_USER_ONLY)
2863 /* We hard-wire the BSP to the first CPU. */
2864 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2866 s->halted = !cpu_is_bsp(cpu);
2868 if (kvm_enabled()) {
2869 kvm_arch_reset_vcpu(cpu);
2874 #ifndef CONFIG_USER_ONLY
2875 bool cpu_is_bsp(X86CPU *cpu)
2877 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2880 /* TODO: remove me, when reset over QOM tree is implemented */
2881 static void x86_cpu_machine_reset_cb(void *opaque)
2883 X86CPU *cpu = opaque;
2884 cpu_reset(CPU(cpu));
2888 static void mce_init(X86CPU *cpu)
2890 CPUX86State *cenv = &cpu->env;
2893 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2894 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2895 (CPUID_MCE | CPUID_MCA)) {
2896 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
2897 (cpu->enable_lmce ? MCG_LMCE_P : 0);
2898 cenv->mcg_ctl = ~(uint64_t)0;
2899 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2900 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2905 #ifndef CONFIG_USER_ONLY
2906 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2908 APICCommonState *apic;
2909 const char *apic_type = "apic";
2911 if (kvm_apic_in_kernel()) {
2912 apic_type = "kvm-apic";
2913 } else if (xen_enabled()) {
2914 apic_type = "xen-apic";
2917 cpu->apic_state = DEVICE(object_new(apic_type));
2919 object_property_add_child(OBJECT(cpu), "lapic",
2920 OBJECT(cpu->apic_state), &error_abort);
2921 object_unref(OBJECT(cpu->apic_state));
2923 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2924 /* TODO: convert to link<> */
2925 apic = APIC_COMMON(cpu->apic_state);
2927 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2930 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2932 APICCommonState *apic;
2933 static bool apic_mmio_map_once;
2935 if (cpu->apic_state == NULL) {
2938 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2941 /* Map APIC MMIO area */
2942 apic = APIC_COMMON(cpu->apic_state);
2943 if (!apic_mmio_map_once) {
2944 memory_region_add_subregion_overlap(get_system_memory(),
2946 MSR_IA32_APICBASE_BASE,
2949 apic_mmio_map_once = true;
2953 static void x86_cpu_machine_done(Notifier *n, void *unused)
2955 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2956 MemoryRegion *smram =
2957 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2960 cpu->smram = g_new(MemoryRegion, 1);
2961 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2962 smram, 0, 1ull << 32);
2963 memory_region_set_enabled(cpu->smram, false);
2964 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2968 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2973 /* Note: Only safe for use on x86(-64) hosts */
2974 static uint32_t x86_host_phys_bits(void)
2977 uint32_t host_phys_bits;
2979 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
2980 if (eax >= 0x80000008) {
2981 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
2982 /* Note: According to AMD doc 25481 rev 2.34 they have a field
2983 * at 23:16 that can specify a maximum physical address bits for
2984 * the guest that can override this value; but I've not seen
2985 * anything with that set.
2987 host_phys_bits = eax & 0xff;
2989 /* It's an odd 64 bit machine that doesn't have the leaf for
2990 * physical address bits; fall back to 36 that's most older
2993 host_phys_bits = 36;
2996 return host_phys_bits;
2999 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
3006 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
3007 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
3009 CPUX86State *env = &cpu->env;
3010 FeatureWordInfo *fi = &feature_word_info[w];
3011 uint32_t eax = fi->cpuid_eax;
3012 uint32_t region = eax & 0xF0000000;
3014 if (!env->features[w]) {
3020 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
3023 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
3026 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
3031 /* Calculate XSAVE components based on the configured CPU feature flags */
3032 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
3034 CPUX86State *env = &cpu->env;
3038 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3042 mask = (XSTATE_FP_MASK | XSTATE_SSE_MASK);
3043 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3044 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3045 if (env->features[esa->feature] & esa->bits) {
3046 mask |= (1ULL << i);
3050 env->features[FEAT_XSAVE_COMP_LO] = mask;
3051 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
3054 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
3055 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
3056 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
3057 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
3058 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
3059 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
3060 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
3062 CPUState *cs = CPU(dev);
3063 X86CPU *cpu = X86_CPU(dev);
3064 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3065 CPUX86State *env = &cpu->env;
3066 Error *local_err = NULL;
3067 static bool ht_warned;
3070 if (xcc->kvm_required && !kvm_enabled()) {
3071 char *name = x86_cpu_class_get_model_name(xcc);
3072 error_setg(&local_err, "CPU model '%s' requires KVM", name);
3077 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
3078 error_setg(errp, "apic-id property was not initialized properly");
3082 /*TODO: cpu->host_features incorrectly overwrites features
3083 * set using "feat=on|off". Once we fix this, we can convert
3084 * plus_features & minus_features to global properties
3085 * inside x86_cpu_parse_featurestr() too.
3087 if (cpu->host_features) {
3088 for (w = 0; w < FEATURE_WORDS; w++) {
3090 x86_cpu_get_supported_feature_word(w, cpu->migratable);
3094 for (w = 0; w < FEATURE_WORDS; w++) {
3095 cpu->env.features[w] |= plus_features[w];
3096 cpu->env.features[w] &= ~minus_features[w];
3099 if (!kvm_enabled() || !cpu->expose_kvm) {
3100 env->features[FEAT_KVM] = 0;
3103 x86_cpu_enable_xsave_components(cpu);
3105 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3106 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3107 if (cpu->full_cpuid_auto_level) {
3108 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3109 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3110 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3111 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3112 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3113 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3114 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3115 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3116 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3117 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3118 /* SVM requires CPUID[0x8000000A] */
3119 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3120 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3124 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3125 if (env->cpuid_level == UINT32_MAX) {
3126 env->cpuid_level = env->cpuid_min_level;
3128 if (env->cpuid_xlevel == UINT32_MAX) {
3129 env->cpuid_xlevel = env->cpuid_min_xlevel;
3131 if (env->cpuid_xlevel2 == UINT32_MAX) {
3132 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3135 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
3136 error_setg(&local_err,
3138 "Host doesn't support requested features" :
3139 "TCG doesn't support requested features");
3143 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3146 if (IS_AMD_CPU(env)) {
3147 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3148 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3149 & CPUID_EXT2_AMD_ALIASES);
3152 /* For 64bit systems think about the number of physical bits to present.
3153 * ideally this should be the same as the host; anything other than matching
3154 * the host can cause incorrect guest behaviour.
3155 * QEMU used to pick the magic value of 40 bits that corresponds to
3156 * consumer AMD devices but nothing else.
3158 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3159 if (kvm_enabled()) {
3160 uint32_t host_phys_bits = x86_host_phys_bits();
3163 if (cpu->host_phys_bits) {
3164 /* The user asked for us to use the host physical bits */
3165 cpu->phys_bits = host_phys_bits;
3168 /* Print a warning if the user set it to a value that's not the
3171 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3173 error_report("Warning: Host physical bits (%u)"
3174 " does not match phys-bits property (%u)",
3175 host_phys_bits, cpu->phys_bits);
3179 if (cpu->phys_bits &&
3180 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3181 cpu->phys_bits < 32)) {
3182 error_setg(errp, "phys-bits should be between 32 and %u "
3184 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3188 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3189 error_setg(errp, "TCG only supports phys-bits=%u",
3190 TCG_PHYS_ADDR_BITS);
3194 /* 0 means it was not explicitly set by the user (or by machine
3195 * compat_props or by the host code above). In this case, the default
3196 * is the value used by TCG (40).
3198 if (cpu->phys_bits == 0) {
3199 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3202 /* For 32 bit systems don't use the user set value, but keep
3203 * phys_bits consistent with what we tell the guest.
3205 if (cpu->phys_bits != 0) {
3206 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3210 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3211 cpu->phys_bits = 36;
3213 cpu->phys_bits = 32;
3216 cpu_exec_init(cs, &error_abort);
3218 if (tcg_enabled()) {
3222 #ifndef CONFIG_USER_ONLY
3223 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3225 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3226 x86_cpu_apic_create(cpu, &local_err);
3227 if (local_err != NULL) {
3235 #ifndef CONFIG_USER_ONLY
3236 if (tcg_enabled()) {
3237 AddressSpace *newas = g_new(AddressSpace, 1);
3239 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3240 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3242 /* Outer container... */
3243 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3244 memory_region_set_enabled(cpu->cpu_as_root, true);
3246 /* ... with two regions inside: normal system memory with low
3249 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3250 get_system_memory(), 0, ~0ull);
3251 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3252 memory_region_set_enabled(cpu->cpu_as_mem, true);
3253 address_space_init(newas, cpu->cpu_as_root, "CPU");
3255 cpu_address_space_init(cs, newas, 0);
3257 /* ... SMRAM with higher priority, linked from /machine/smram. */
3258 cpu->machine_done.notify = x86_cpu_machine_done;
3259 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3265 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3266 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3267 * based on inputs (sockets,cores,threads), it is still better to gives
3270 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3271 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3273 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3274 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3275 " -smp options properly.");
3279 x86_cpu_apic_realize(cpu, &local_err);
3280 if (local_err != NULL) {
3285 xcc->parent_realize(dev, &local_err);
3288 if (local_err != NULL) {
3289 error_propagate(errp, local_err);
3294 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3296 X86CPU *cpu = X86_CPU(dev);
3298 #ifndef CONFIG_USER_ONLY
3299 cpu_remove_sync(CPU(dev));
3300 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3303 if (cpu->apic_state) {
3304 object_unparent(OBJECT(cpu->apic_state));
3305 cpu->apic_state = NULL;
3309 typedef struct BitProperty {
3314 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3315 void *opaque, Error **errp)
3317 BitProperty *fp = opaque;
3318 bool value = (*fp->ptr & fp->mask) == fp->mask;
3319 visit_type_bool(v, name, &value, errp);
3322 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3323 void *opaque, Error **errp)
3325 DeviceState *dev = DEVICE(obj);
3326 BitProperty *fp = opaque;
3327 Error *local_err = NULL;
3330 if (dev->realized) {
3331 qdev_prop_set_after_realize(dev, name, errp);
3335 visit_type_bool(v, name, &value, &local_err);
3337 error_propagate(errp, local_err);
3342 *fp->ptr |= fp->mask;
3344 *fp->ptr &= ~fp->mask;
3348 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3351 BitProperty *prop = opaque;
3355 /* Register a boolean property to get/set a single bit in a uint32_t field.
3357 * The same property name can be registered multiple times to make it affect
3358 * multiple bits in the same FeatureWord. In that case, the getter will return
3359 * true only if all bits are set.
3361 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3362 const char *prop_name,
3368 uint32_t mask = (1UL << bitnr);
3370 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3373 assert(fp->ptr == field);
3376 fp = g_new0(BitProperty, 1);
3379 object_property_add(OBJECT(cpu), prop_name, "bool",
3380 x86_cpu_get_bit_prop,
3381 x86_cpu_set_bit_prop,
3382 x86_cpu_release_bit_prop, fp, &error_abort);
3386 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3390 Object *obj = OBJECT(cpu);
3393 FeatureWordInfo *fi = &feature_word_info[w];
3395 if (!fi->feat_names[bitnr]) {
3399 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3401 feat2prop(names[0]);
3402 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3404 for (i = 1; names[i]; i++) {
3405 feat2prop(names[i]);
3406 object_property_add_alias(obj, names[i], obj, names[0],
3413 static void x86_cpu_initfn(Object *obj)
3415 CPUState *cs = CPU(obj);
3416 X86CPU *cpu = X86_CPU(obj);
3417 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3418 CPUX86State *env = &cpu->env;
3423 object_property_add(obj, "family", "int",
3424 x86_cpuid_version_get_family,
3425 x86_cpuid_version_set_family, NULL, NULL, NULL);
3426 object_property_add(obj, "model", "int",
3427 x86_cpuid_version_get_model,
3428 x86_cpuid_version_set_model, NULL, NULL, NULL);
3429 object_property_add(obj, "stepping", "int",
3430 x86_cpuid_version_get_stepping,
3431 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3432 object_property_add_str(obj, "vendor",
3433 x86_cpuid_get_vendor,
3434 x86_cpuid_set_vendor, NULL);
3435 object_property_add_str(obj, "model-id",
3436 x86_cpuid_get_model_id,
3437 x86_cpuid_set_model_id, NULL);
3438 object_property_add(obj, "tsc-frequency", "int",
3439 x86_cpuid_get_tsc_freq,
3440 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3441 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3442 x86_cpu_get_feature_words,
3443 NULL, NULL, (void *)env->features, NULL);
3444 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3445 x86_cpu_get_feature_words,
3446 NULL, NULL, (void *)cpu->filtered_features, NULL);
3448 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3450 for (w = 0; w < FEATURE_WORDS; w++) {
3453 for (bitnr = 0; bitnr < 32; bitnr++) {
3454 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3458 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3461 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3463 X86CPU *cpu = X86_CPU(cs);
3465 return cpu->apic_id;
3468 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3470 X86CPU *cpu = X86_CPU(cs);
3472 return cpu->env.cr[0] & CR0_PG_MASK;
3475 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3477 X86CPU *cpu = X86_CPU(cs);
3479 cpu->env.eip = value;
3482 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3484 X86CPU *cpu = X86_CPU(cs);
3486 cpu->env.eip = tb->pc - tb->cs_base;
3489 static bool x86_cpu_has_work(CPUState *cs)
3491 X86CPU *cpu = X86_CPU(cs);
3492 CPUX86State *env = &cpu->env;
3494 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3495 CPU_INTERRUPT_POLL)) &&
3496 (env->eflags & IF_MASK)) ||
3497 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3498 CPU_INTERRUPT_INIT |
3499 CPU_INTERRUPT_SIPI |
3500 CPU_INTERRUPT_MCE)) ||
3501 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3502 !(env->hflags & HF_SMM_MASK));
3505 static Property x86_cpu_properties[] = {
3506 #ifdef CONFIG_USER_ONLY
3507 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3508 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
3509 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
3510 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
3511 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
3513 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
3514 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
3515 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
3516 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
3518 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3519 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3520 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3521 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3522 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3523 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3524 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3525 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3526 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3527 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3528 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3529 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3530 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3531 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3532 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
3533 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
3534 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
3535 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
3536 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
3537 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
3538 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
3539 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
3540 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
3541 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
3542 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3543 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3544 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
3545 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
3546 DEFINE_PROP_END_OF_LIST()
3549 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3551 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3552 CPUClass *cc = CPU_CLASS(oc);
3553 DeviceClass *dc = DEVICE_CLASS(oc);
3555 xcc->parent_realize = dc->realize;
3556 dc->realize = x86_cpu_realizefn;
3557 dc->unrealize = x86_cpu_unrealizefn;
3558 dc->props = x86_cpu_properties;
3560 xcc->parent_reset = cc->reset;
3561 cc->reset = x86_cpu_reset;
3562 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3564 cc->class_by_name = x86_cpu_class_by_name;
3565 cc->parse_features = x86_cpu_parse_featurestr;
3566 cc->has_work = x86_cpu_has_work;
3567 cc->do_interrupt = x86_cpu_do_interrupt;
3568 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3569 cc->dump_state = x86_cpu_dump_state;
3570 cc->set_pc = x86_cpu_set_pc;
3571 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3572 cc->gdb_read_register = x86_cpu_gdb_read_register;
3573 cc->gdb_write_register = x86_cpu_gdb_write_register;
3574 cc->get_arch_id = x86_cpu_get_arch_id;
3575 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3576 #ifdef CONFIG_USER_ONLY
3577 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3579 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3580 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3581 cc->write_elf64_note = x86_cpu_write_elf64_note;
3582 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3583 cc->write_elf32_note = x86_cpu_write_elf32_note;
3584 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3585 cc->vmsd = &vmstate_x86_cpu;
3587 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3588 #ifndef CONFIG_USER_ONLY
3589 cc->debug_excp_handler = breakpoint_handler;
3591 cc->cpu_exec_enter = x86_cpu_exec_enter;
3592 cc->cpu_exec_exit = x86_cpu_exec_exit;
3594 dc->cannot_instantiate_with_device_add_yet = false;
3596 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3597 * object in cpus -> dangling pointer after final object_unref().
3599 dc->cannot_destroy_with_object_finalize_yet = true;
3602 static const TypeInfo x86_cpu_type_info = {
3603 .name = TYPE_X86_CPU,
3605 .instance_size = sizeof(X86CPU),
3606 .instance_init = x86_cpu_initfn,
3608 .class_size = sizeof(X86CPUClass),
3609 .class_init = x86_cpu_common_class_init,
3612 static void x86_cpu_register_types(void)
3616 type_register_static(&x86_cpu_type_info);
3617 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3618 x86_register_cpudef_type(&builtin_x86_defs[i]);
3621 type_register_static(&host_x86_cpu_type_info);
3625 type_init(x86_cpu_register_types)