2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
63 /* CPUID Leaf 4 constants: */
66 #define CPUID_4_TYPE_DCACHE 1
67 #define CPUID_4_TYPE_ICACHE 2
68 #define CPUID_4_TYPE_UNIFIED 3
70 #define CPUID_4_LEVEL(l) ((l) << 5)
72 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
73 #define CPUID_4_FULLY_ASSOC (1 << 9)
76 #define CPUID_4_NO_INVD_SHARING (1 << 0)
77 #define CPUID_4_INCLUSIVE (1 << 1)
78 #define CPUID_4_COMPLEX_IDX (1 << 2)
80 #define ASSOC_FULL 0xFF
82 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
83 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
93 a == ASSOC_FULL ? 0xF : \
94 0 /* invalid value */)
97 /* Definitions of the hardcoded cache entries we expose: */
100 #define L1D_LINE_SIZE 64
101 #define L1D_ASSOCIATIVITY 8
103 #define L1D_PARTITIONS 1
104 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
105 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
106 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
107 #define L1D_LINES_PER_TAG 1
108 #define L1D_SIZE_KB_AMD 64
109 #define L1D_ASSOCIATIVITY_AMD 2
111 /* L1 instruction cache: */
112 #define L1I_LINE_SIZE 64
113 #define L1I_ASSOCIATIVITY 8
115 #define L1I_PARTITIONS 1
116 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
117 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
118 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
119 #define L1I_LINES_PER_TAG 1
120 #define L1I_SIZE_KB_AMD 64
121 #define L1I_ASSOCIATIVITY_AMD 2
123 /* Level 2 unified cache: */
124 #define L2_LINE_SIZE 64
125 #define L2_ASSOCIATIVITY 16
127 #define L2_PARTITIONS 1
128 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
129 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
130 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
131 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
132 #define L2_LINES_PER_TAG 1
133 #define L2_SIZE_KB_AMD 512
135 /* Level 3 unified cache: */
136 #define L3_SIZE_KB 0 /* disabled */
137 #define L3_ASSOCIATIVITY 0 /* disabled */
138 #define L3_LINES_PER_TAG 0 /* disabled */
139 #define L3_LINE_SIZE 0 /* disabled */
140 #define L3_N_LINE_SIZE 64
141 #define L3_N_ASSOCIATIVITY 16
142 #define L3_N_SETS 16384
143 #define L3_N_PARTITIONS 1
144 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
145 #define L3_N_LINES_PER_TAG 1
146 #define L3_N_SIZE_KB_AMD 16384
148 /* TLB definitions: */
150 #define L1_DTLB_2M_ASSOC 1
151 #define L1_DTLB_2M_ENTRIES 255
152 #define L1_DTLB_4K_ASSOC 1
153 #define L1_DTLB_4K_ENTRIES 255
155 #define L1_ITLB_2M_ASSOC 1
156 #define L1_ITLB_2M_ENTRIES 255
157 #define L1_ITLB_4K_ASSOC 1
158 #define L1_ITLB_4K_ENTRIES 255
160 #define L2_DTLB_2M_ASSOC 0 /* disabled */
161 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
162 #define L2_DTLB_4K_ASSOC 4
163 #define L2_DTLB_4K_ENTRIES 512
165 #define L2_ITLB_2M_ASSOC 0 /* disabled */
166 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
167 #define L2_ITLB_4K_ASSOC 4
168 #define L2_ITLB_4K_ENTRIES 512
172 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
173 uint32_t vendor2, uint32_t vendor3)
176 for (i = 0; i < 4; i++) {
177 dst[i] = vendor1 >> (8 * i);
178 dst[i + 4] = vendor2 >> (8 * i);
179 dst[i + 8] = vendor3 >> (8 * i);
181 dst[CPUID_VENDOR_SZ] = '\0';
184 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
185 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
186 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
187 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
188 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
189 CPUID_PSE36 | CPUID_FXSR)
190 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
191 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
192 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
193 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
194 CPUID_PAE | CPUID_SEP | CPUID_APIC)
196 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
197 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
198 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
199 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
200 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
201 /* partly implemented:
202 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
204 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
205 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
206 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
207 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
208 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
209 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
211 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
212 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
213 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
214 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
215 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
218 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
220 #define TCG_EXT2_X86_64_FEATURES 0
223 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
224 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
225 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
226 TCG_EXT2_X86_64_FEATURES)
227 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
228 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
229 #define TCG_EXT4_FEATURES 0
230 #define TCG_SVM_FEATURES 0
231 #define TCG_KVM_FEATURES 0
232 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
233 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
234 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
235 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
238 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
239 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
240 CPUID_7_0_EBX_RDSEED */
241 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
242 #define TCG_APM_FEATURES 0
243 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
244 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
246 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
248 typedef struct FeatureWordInfo {
249 /* feature flags names are taken from "Intel Processor Identification and
250 * the CPUID Instruction" and AMD's "CPUID Specification".
251 * In cases of disagreement between feature naming conventions,
252 * aliases may be added.
254 const char *feat_names[32];
255 uint32_t cpuid_eax; /* Input EAX for CPUID */
256 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
257 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
258 int cpuid_reg; /* output register (R_* constant) */
259 uint32_t tcg_features; /* Feature flags supported by TCG */
260 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
261 uint32_t migratable_flags; /* Feature flags known to be migratable */
264 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
267 "fpu", "vme", "de", "pse",
268 "tsc", "msr", "pae", "mce",
269 "cx8", "apic", NULL, "sep",
270 "mtrr", "pge", "mca", "cmov",
271 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
272 NULL, "ds" /* Intel dts */, "acpi", "mmx",
273 "fxsr", "sse", "sse2", "ss",
274 "ht" /* Intel htt */, "tm", "ia64", "pbe",
276 .cpuid_eax = 1, .cpuid_reg = R_EDX,
277 .tcg_features = TCG_FEATURES,
281 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
282 "ds_cpl", "vmx", "smx", "est",
283 "tm2", "ssse3", "cid", NULL,
284 "fma", "cx16", "xtpr", "pdcm",
285 NULL, "pcid", "dca", "sse4.1|sse4_1",
286 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
287 "tsc-deadline", "aes", "xsave", "osxsave",
288 "avx", "f16c", "rdrand", "hypervisor",
290 .cpuid_eax = 1, .cpuid_reg = R_ECX,
291 .tcg_features = TCG_EXT_FEATURES,
293 /* Feature names that are already defined on feature_name[] but
294 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
295 * names on feat_names below. They are copied automatically
296 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
298 [FEAT_8000_0001_EDX] = {
300 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
301 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
302 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
303 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
304 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
305 "nx|xd", NULL, "mmxext", NULL /* mmx */,
306 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb", "rdtscp",
307 NULL, "lm|i64", "3dnowext", "3dnow",
309 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
310 .tcg_features = TCG_EXT2_FEATURES,
312 [FEAT_8000_0001_ECX] = {
314 "lahf_lm", "cmp_legacy", "svm", "extapic",
315 "cr8legacy", "abm", "sse4a", "misalignsse",
316 "3dnowprefetch", "osvw", "ibs", "xop",
317 "skinit", "wdt", NULL, "lwp",
318 "fma4", "tce", NULL, "nodeid_msr",
319 NULL, "tbm", "topoext", "perfctr_core",
320 "perfctr_nb", NULL, NULL, NULL,
321 NULL, NULL, NULL, NULL,
323 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
324 .tcg_features = TCG_EXT3_FEATURES,
326 [FEAT_C000_0001_EDX] = {
328 NULL, NULL, "xstore", "xstore-en",
329 NULL, NULL, "xcrypt", "xcrypt-en",
330 "ace2", "ace2-en", "phe", "phe-en",
331 "pmm", "pmm-en", NULL, NULL,
332 NULL, NULL, NULL, NULL,
333 NULL, NULL, NULL, NULL,
334 NULL, NULL, NULL, NULL,
335 NULL, NULL, NULL, NULL,
337 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
338 .tcg_features = TCG_EXT4_FEATURES,
342 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
343 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
344 NULL, NULL, NULL, NULL,
345 NULL, NULL, NULL, NULL,
346 NULL, NULL, NULL, NULL,
347 NULL, NULL, NULL, NULL,
348 "kvmclock-stable-bit", NULL, NULL, NULL,
349 NULL, NULL, NULL, NULL,
351 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
352 .tcg_features = TCG_KVM_FEATURES,
354 [FEAT_HYPERV_EAX] = {
356 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
357 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
358 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
359 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
360 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
361 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
362 NULL, NULL, NULL, NULL,
363 NULL, NULL, NULL, NULL,
364 NULL, NULL, NULL, NULL,
365 NULL, NULL, NULL, NULL,
366 NULL, NULL, NULL, NULL,
368 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
370 [FEAT_HYPERV_EBX] = {
372 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
373 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
374 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
375 NULL /* hv_create_port */, NULL /* hv_connect_port */,
376 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
377 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
379 NULL, NULL, NULL, NULL,
380 NULL, NULL, NULL, NULL,
381 NULL, NULL, NULL, NULL,
382 NULL, NULL, NULL, NULL,
384 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
386 [FEAT_HYPERV_EDX] = {
388 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
389 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
390 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
392 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
393 NULL, NULL, NULL, NULL,
394 NULL, NULL, NULL, NULL,
395 NULL, NULL, NULL, NULL,
396 NULL, NULL, NULL, NULL,
397 NULL, NULL, NULL, NULL,
399 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
403 "npt", "lbrv", "svm_lock", "nrip_save",
404 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
405 NULL, NULL, "pause_filter", NULL,
406 "pfthreshold", NULL, NULL, NULL,
407 NULL, NULL, NULL, NULL,
408 NULL, NULL, NULL, NULL,
409 NULL, NULL, NULL, NULL,
410 NULL, NULL, NULL, NULL,
412 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
413 .tcg_features = TCG_SVM_FEATURES,
417 "fsgsbase", "tsc_adjust", NULL, "bmi1",
418 "hle", "avx2", NULL, "smep",
419 "bmi2", "erms", "invpcid", "rtm",
420 NULL, NULL, "mpx", NULL,
421 "avx512f", "avx512dq", "rdseed", "adx",
422 "smap", "avx512ifma", "pcommit", "clflushopt",
423 "clwb", NULL, "avx512pf", "avx512er",
424 "avx512cd", NULL, "avx512bw", "avx512vl",
427 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
429 .tcg_features = TCG_7_0_EBX_FEATURES,
433 NULL, "avx512vbmi", "umip", "pku",
434 "ospke", NULL, NULL, NULL,
435 NULL, NULL, NULL, NULL,
436 NULL, NULL, NULL, NULL,
437 NULL, NULL, NULL, NULL,
438 NULL, NULL, "rdpid", NULL,
439 NULL, NULL, NULL, NULL,
440 NULL, NULL, NULL, NULL,
443 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
445 .tcg_features = TCG_7_0_ECX_FEATURES,
447 [FEAT_8000_0007_EDX] = {
449 NULL, NULL, NULL, NULL,
450 NULL, NULL, NULL, NULL,
451 "invtsc", NULL, NULL, NULL,
452 NULL, NULL, NULL, NULL,
453 NULL, NULL, NULL, NULL,
454 NULL, NULL, NULL, NULL,
455 NULL, NULL, NULL, NULL,
456 NULL, NULL, NULL, NULL,
458 .cpuid_eax = 0x80000007,
460 .tcg_features = TCG_APM_FEATURES,
461 .unmigratable_flags = CPUID_APM_INVTSC,
465 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
466 NULL, NULL, NULL, NULL,
467 NULL, NULL, NULL, NULL,
468 NULL, NULL, NULL, NULL,
469 NULL, NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL,
471 NULL, NULL, NULL, NULL,
472 NULL, NULL, NULL, NULL,
475 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
477 .tcg_features = TCG_XSAVE_FEATURES,
481 NULL, NULL, "arat", NULL,
482 NULL, NULL, NULL, NULL,
483 NULL, NULL, NULL, NULL,
484 NULL, NULL, NULL, NULL,
485 NULL, NULL, NULL, NULL,
486 NULL, NULL, NULL, NULL,
487 NULL, NULL, NULL, NULL,
488 NULL, NULL, NULL, NULL,
490 .cpuid_eax = 6, .cpuid_reg = R_EAX,
491 .tcg_features = TCG_6_EAX_FEATURES,
493 [FEAT_XSAVE_COMP_LO] = {
495 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
498 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
499 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
500 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
503 [FEAT_XSAVE_COMP_HI] = {
505 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
511 typedef struct X86RegisterInfo32 {
512 /* Name of register */
514 /* QAPI enum value register */
515 X86CPURegister32 qapi_enum;
518 #define REGISTER(reg) \
519 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
520 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
532 typedef struct ExtSaveArea {
533 uint32_t feature, bits;
534 uint32_t offset, size;
537 static const ExtSaveArea x86_ext_save_areas[] = {
539 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
540 .offset = offsetof(X86XSaveArea, avx_state),
541 .size = sizeof(XSaveAVX) },
542 [XSTATE_BNDREGS_BIT] =
543 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
544 .offset = offsetof(X86XSaveArea, bndreg_state),
545 .size = sizeof(XSaveBNDREG) },
546 [XSTATE_BNDCSR_BIT] =
547 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
548 .offset = offsetof(X86XSaveArea, bndcsr_state),
549 .size = sizeof(XSaveBNDCSR) },
550 [XSTATE_OPMASK_BIT] =
551 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
552 .offset = offsetof(X86XSaveArea, opmask_state),
553 .size = sizeof(XSaveOpmask) },
554 [XSTATE_ZMM_Hi256_BIT] =
555 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
556 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
557 .size = sizeof(XSaveZMM_Hi256) },
558 [XSTATE_Hi16_ZMM_BIT] =
559 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
560 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
561 .size = sizeof(XSaveHi16_ZMM) },
563 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
564 .offset = offsetof(X86XSaveArea, pkru_state),
565 .size = sizeof(XSavePKRU) },
568 static uint32_t xsave_area_size(uint64_t mask)
571 uint64_t ret = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader);
573 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
574 const ExtSaveArea *esa = &x86_ext_save_areas[i];
575 if ((mask >> i) & 1) {
576 ret = MAX(ret, esa->offset + esa->size);
582 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
584 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
585 cpu->env.features[FEAT_XSAVE_COMP_LO];
588 const char *get_register_name_32(unsigned int reg)
590 if (reg >= CPU_NB_REGS32) {
593 return x86_reg_info_32[reg].name;
597 * Returns the set of feature flags that are supported and migratable by
598 * QEMU, for a given FeatureWord.
600 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
602 FeatureWordInfo *wi = &feature_word_info[w];
606 for (i = 0; i < 32; i++) {
607 uint32_t f = 1U << i;
609 /* If the feature name is known, it is implicitly considered migratable,
610 * unless it is explicitly set in unmigratable_flags */
611 if ((wi->migratable_flags & f) ||
612 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
619 void host_cpuid(uint32_t function, uint32_t count,
620 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
626 : "=a"(vec[0]), "=b"(vec[1]),
627 "=c"(vec[2]), "=d"(vec[3])
628 : "0"(function), "c"(count) : "cc");
629 #elif defined(__i386__)
630 asm volatile("pusha \n\t"
632 "mov %%eax, 0(%2) \n\t"
633 "mov %%ebx, 4(%2) \n\t"
634 "mov %%ecx, 8(%2) \n\t"
635 "mov %%edx, 12(%2) \n\t"
637 : : "a"(function), "c"(count), "S"(vec)
653 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
655 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
656 * a substring. ex if !NULL points to the first char after a substring,
657 * otherwise the string is assumed to sized by a terminating nul.
658 * Return lexical ordering of *s1:*s2.
660 static int sstrcmp(const char *s1, const char *e1,
661 const char *s2, const char *e2)
664 if (!*s1 || !*s2 || *s1 != *s2)
667 if (s1 == e1 && s2 == e2)
676 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
677 * '|' delimited (possibly empty) strings in which case search for a match
678 * within the alternatives proceeds left to right. Return 0 for success,
679 * non-zero otherwise.
681 static int altcmp(const char *s, const char *e, const char *altstr)
685 for (q = p = altstr; ; ) {
686 while (*p && *p != '|')
688 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
697 /* search featureset for flag *[s..e), if found set corresponding bit in
698 * *pval and return true, otherwise return false
700 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
701 const char **featureset)
707 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
708 if (*ppc && !altcmp(s, e, *ppc)) {
716 static void add_flagname_to_bitmaps(const char *flagname,
717 FeatureWordArray words,
721 for (w = 0; w < FEATURE_WORDS; w++) {
722 FeatureWordInfo *wi = &feature_word_info[w];
723 if (lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
727 if (w == FEATURE_WORDS) {
728 error_setg(errp, "CPU feature %s not found", flagname);
732 /* CPU class name definitions: */
734 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
735 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
737 /* Return type name for a given CPU model name
738 * Caller is responsible for freeing the returned string.
740 static char *x86_cpu_type_name(const char *model_name)
742 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
745 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
750 if (cpu_model == NULL) {
754 typename = x86_cpu_type_name(cpu_model);
755 oc = object_class_by_name(typename);
760 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
762 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
763 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
764 return g_strndup(class_name,
765 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
768 struct X86CPUDefinition {
772 /* vendor is zero-terminated, 12 character ASCII string */
773 char vendor[CPUID_VENDOR_SZ + 1];
777 FeatureWordArray features;
781 static X86CPUDefinition builtin_x86_defs[] = {
785 .vendor = CPUID_VENDOR_AMD,
789 .features[FEAT_1_EDX] =
791 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
793 .features[FEAT_1_ECX] =
794 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
795 .features[FEAT_8000_0001_EDX] =
796 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
797 .features[FEAT_8000_0001_ECX] =
798 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
799 .xlevel = 0x8000000A,
800 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
805 .vendor = CPUID_VENDOR_AMD,
809 /* Missing: CPUID_HT */
810 .features[FEAT_1_EDX] =
812 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
813 CPUID_PSE36 | CPUID_VME,
814 .features[FEAT_1_ECX] =
815 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
817 .features[FEAT_8000_0001_EDX] =
818 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
819 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
820 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
821 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
823 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
824 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
825 .features[FEAT_8000_0001_ECX] =
826 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
827 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
828 /* Missing: CPUID_SVM_LBRV */
829 .features[FEAT_SVM] =
831 .xlevel = 0x8000001A,
832 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
837 .vendor = CPUID_VENDOR_INTEL,
841 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
842 .features[FEAT_1_EDX] =
844 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
845 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
846 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
847 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
848 .features[FEAT_1_ECX] =
849 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
851 .features[FEAT_8000_0001_EDX] =
852 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
853 .features[FEAT_8000_0001_ECX] =
855 .xlevel = 0x80000008,
856 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
861 .vendor = CPUID_VENDOR_INTEL,
865 /* Missing: CPUID_HT */
866 .features[FEAT_1_EDX] =
867 PPRO_FEATURES | CPUID_VME |
868 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
870 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
871 .features[FEAT_1_ECX] =
872 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
873 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
874 .features[FEAT_8000_0001_EDX] =
875 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
876 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
877 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
878 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
879 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
880 .features[FEAT_8000_0001_ECX] =
882 .xlevel = 0x80000008,
883 .model_id = "Common KVM processor"
888 .vendor = CPUID_VENDOR_INTEL,
892 .features[FEAT_1_EDX] =
894 .features[FEAT_1_ECX] =
896 .xlevel = 0x80000004,
897 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
902 .vendor = CPUID_VENDOR_INTEL,
906 .features[FEAT_1_EDX] =
907 PPRO_FEATURES | CPUID_VME |
908 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
909 .features[FEAT_1_ECX] =
911 .features[FEAT_8000_0001_ECX] =
913 .xlevel = 0x80000008,
914 .model_id = "Common 32-bit KVM processor"
919 .vendor = CPUID_VENDOR_INTEL,
923 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
924 .features[FEAT_1_EDX] =
925 PPRO_FEATURES | CPUID_VME |
926 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
928 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
929 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
930 .features[FEAT_1_ECX] =
931 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
932 .features[FEAT_8000_0001_EDX] =
934 .xlevel = 0x80000008,
935 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
940 .vendor = CPUID_VENDOR_INTEL,
944 .features[FEAT_1_EDX] =
951 .vendor = CPUID_VENDOR_INTEL,
955 .features[FEAT_1_EDX] =
962 .vendor = CPUID_VENDOR_INTEL,
966 .features[FEAT_1_EDX] =
973 .vendor = CPUID_VENDOR_INTEL,
977 .features[FEAT_1_EDX] =
984 .vendor = CPUID_VENDOR_AMD,
988 .features[FEAT_1_EDX] =
989 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
991 .features[FEAT_8000_0001_EDX] =
992 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
993 .xlevel = 0x80000008,
994 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
999 .vendor = CPUID_VENDOR_INTEL,
1003 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1004 .features[FEAT_1_EDX] =
1006 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1007 CPUID_ACPI | CPUID_SS,
1008 /* Some CPUs got no CPUID_SEP */
1009 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1011 .features[FEAT_1_ECX] =
1012 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1014 .features[FEAT_8000_0001_EDX] =
1016 .features[FEAT_8000_0001_ECX] =
1018 .xlevel = 0x80000008,
1019 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1024 .vendor = CPUID_VENDOR_INTEL,
1028 .features[FEAT_1_EDX] =
1029 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1030 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1031 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1032 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1033 CPUID_DE | CPUID_FP87,
1034 .features[FEAT_1_ECX] =
1035 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1036 .features[FEAT_8000_0001_EDX] =
1037 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1038 .features[FEAT_8000_0001_ECX] =
1040 .xlevel = 0x80000008,
1041 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1046 .vendor = CPUID_VENDOR_INTEL,
1050 .features[FEAT_1_EDX] =
1051 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1052 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1053 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1054 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1055 CPUID_DE | CPUID_FP87,
1056 .features[FEAT_1_ECX] =
1057 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1059 .features[FEAT_8000_0001_EDX] =
1060 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1061 .features[FEAT_8000_0001_ECX] =
1063 .xlevel = 0x80000008,
1064 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1069 .vendor = CPUID_VENDOR_INTEL,
1073 .features[FEAT_1_EDX] =
1074 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1075 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1076 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1077 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1078 CPUID_DE | CPUID_FP87,
1079 .features[FEAT_1_ECX] =
1080 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1081 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1082 .features[FEAT_8000_0001_EDX] =
1083 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1084 .features[FEAT_8000_0001_ECX] =
1086 .xlevel = 0x80000008,
1087 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1092 .vendor = CPUID_VENDOR_INTEL,
1096 .features[FEAT_1_EDX] =
1097 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1098 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1099 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1100 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1101 CPUID_DE | CPUID_FP87,
1102 .features[FEAT_1_ECX] =
1103 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1104 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1105 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1106 .features[FEAT_8000_0001_EDX] =
1107 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1108 .features[FEAT_8000_0001_ECX] =
1110 .features[FEAT_6_EAX] =
1112 .xlevel = 0x80000008,
1113 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1116 .name = "SandyBridge",
1118 .vendor = CPUID_VENDOR_INTEL,
1122 .features[FEAT_1_EDX] =
1123 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1124 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1125 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1126 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1127 CPUID_DE | CPUID_FP87,
1128 .features[FEAT_1_ECX] =
1129 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1130 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1131 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1132 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1134 .features[FEAT_8000_0001_EDX] =
1135 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1137 .features[FEAT_8000_0001_ECX] =
1139 .features[FEAT_XSAVE] =
1140 CPUID_XSAVE_XSAVEOPT,
1141 .features[FEAT_6_EAX] =
1143 .xlevel = 0x80000008,
1144 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1147 .name = "IvyBridge",
1149 .vendor = CPUID_VENDOR_INTEL,
1153 .features[FEAT_1_EDX] =
1154 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1155 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1156 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1157 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1158 CPUID_DE | CPUID_FP87,
1159 .features[FEAT_1_ECX] =
1160 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1161 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1162 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1163 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1164 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1165 .features[FEAT_7_0_EBX] =
1166 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1168 .features[FEAT_8000_0001_EDX] =
1169 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1171 .features[FEAT_8000_0001_ECX] =
1173 .features[FEAT_XSAVE] =
1174 CPUID_XSAVE_XSAVEOPT,
1175 .features[FEAT_6_EAX] =
1177 .xlevel = 0x80000008,
1178 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1181 .name = "Haswell-noTSX",
1183 .vendor = CPUID_VENDOR_INTEL,
1187 .features[FEAT_1_EDX] =
1188 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1189 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1190 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1191 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1192 CPUID_DE | CPUID_FP87,
1193 .features[FEAT_1_ECX] =
1194 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1195 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1196 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1197 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1198 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1199 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1200 .features[FEAT_8000_0001_EDX] =
1201 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1203 .features[FEAT_8000_0001_ECX] =
1204 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1205 .features[FEAT_7_0_EBX] =
1206 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1207 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1208 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1209 .features[FEAT_XSAVE] =
1210 CPUID_XSAVE_XSAVEOPT,
1211 .features[FEAT_6_EAX] =
1213 .xlevel = 0x80000008,
1214 .model_id = "Intel Core Processor (Haswell, no TSX)",
1218 .vendor = CPUID_VENDOR_INTEL,
1222 .features[FEAT_1_EDX] =
1223 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1224 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1225 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1226 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1227 CPUID_DE | CPUID_FP87,
1228 .features[FEAT_1_ECX] =
1229 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1230 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1231 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1232 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1233 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1234 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1235 .features[FEAT_8000_0001_EDX] =
1236 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1238 .features[FEAT_8000_0001_ECX] =
1239 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1240 .features[FEAT_7_0_EBX] =
1241 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1242 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1243 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1245 .features[FEAT_XSAVE] =
1246 CPUID_XSAVE_XSAVEOPT,
1247 .features[FEAT_6_EAX] =
1249 .xlevel = 0x80000008,
1250 .model_id = "Intel Core Processor (Haswell)",
1253 .name = "Broadwell-noTSX",
1255 .vendor = CPUID_VENDOR_INTEL,
1259 .features[FEAT_1_EDX] =
1260 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1261 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1262 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1263 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1264 CPUID_DE | CPUID_FP87,
1265 .features[FEAT_1_ECX] =
1266 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1267 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1268 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1269 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1270 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1271 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1272 .features[FEAT_8000_0001_EDX] =
1273 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1275 .features[FEAT_8000_0001_ECX] =
1276 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1277 .features[FEAT_7_0_EBX] =
1278 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1279 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1280 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1281 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1283 .features[FEAT_XSAVE] =
1284 CPUID_XSAVE_XSAVEOPT,
1285 .features[FEAT_6_EAX] =
1287 .xlevel = 0x80000008,
1288 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1291 .name = "Broadwell",
1293 .vendor = CPUID_VENDOR_INTEL,
1297 .features[FEAT_1_EDX] =
1298 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1299 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1300 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1301 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1302 CPUID_DE | CPUID_FP87,
1303 .features[FEAT_1_ECX] =
1304 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1305 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1306 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1307 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1308 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1309 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1310 .features[FEAT_8000_0001_EDX] =
1311 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1313 .features[FEAT_8000_0001_ECX] =
1314 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1315 .features[FEAT_7_0_EBX] =
1316 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1317 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1318 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1319 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1321 .features[FEAT_XSAVE] =
1322 CPUID_XSAVE_XSAVEOPT,
1323 .features[FEAT_6_EAX] =
1325 .xlevel = 0x80000008,
1326 .model_id = "Intel Core Processor (Broadwell)",
1329 .name = "Skylake-Client",
1331 .vendor = CPUID_VENDOR_INTEL,
1335 .features[FEAT_1_EDX] =
1336 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1337 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1338 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1339 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1340 CPUID_DE | CPUID_FP87,
1341 .features[FEAT_1_ECX] =
1342 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1343 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1344 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1345 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1346 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1347 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1348 .features[FEAT_8000_0001_EDX] =
1349 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1351 .features[FEAT_8000_0001_ECX] =
1352 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1353 .features[FEAT_7_0_EBX] =
1354 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1355 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1356 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1357 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1358 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1359 /* Missing: XSAVES (not supported by some Linux versions,
1360 * including v4.1 to v4.6).
1361 * KVM doesn't yet expose any XSAVES state save component,
1362 * and the only one defined in Skylake (processor tracing)
1363 * probably will block migration anyway.
1365 .features[FEAT_XSAVE] =
1366 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1367 CPUID_XSAVE_XGETBV1,
1368 .features[FEAT_6_EAX] =
1370 .xlevel = 0x80000008,
1371 .model_id = "Intel Core Processor (Skylake)",
1374 .name = "Opteron_G1",
1376 .vendor = CPUID_VENDOR_AMD,
1380 .features[FEAT_1_EDX] =
1381 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1382 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1383 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1384 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1385 CPUID_DE | CPUID_FP87,
1386 .features[FEAT_1_ECX] =
1388 .features[FEAT_8000_0001_EDX] =
1389 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1390 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1391 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1392 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1393 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1394 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1395 .xlevel = 0x80000008,
1396 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1399 .name = "Opteron_G2",
1401 .vendor = CPUID_VENDOR_AMD,
1405 .features[FEAT_1_EDX] =
1406 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1407 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1408 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1409 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1410 CPUID_DE | CPUID_FP87,
1411 .features[FEAT_1_ECX] =
1412 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1413 /* Missing: CPUID_EXT2_RDTSCP */
1414 .features[FEAT_8000_0001_EDX] =
1415 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1416 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1417 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1418 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1419 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1420 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1421 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1422 .features[FEAT_8000_0001_ECX] =
1423 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1424 .xlevel = 0x80000008,
1425 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1428 .name = "Opteron_G3",
1430 .vendor = CPUID_VENDOR_AMD,
1434 .features[FEAT_1_EDX] =
1435 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1436 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1437 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1438 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1439 CPUID_DE | CPUID_FP87,
1440 .features[FEAT_1_ECX] =
1441 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1443 /* Missing: CPUID_EXT2_RDTSCP */
1444 .features[FEAT_8000_0001_EDX] =
1445 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1446 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1447 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1448 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1449 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1450 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1451 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1452 .features[FEAT_8000_0001_ECX] =
1453 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1454 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1455 .xlevel = 0x80000008,
1456 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1459 .name = "Opteron_G4",
1461 .vendor = CPUID_VENDOR_AMD,
1465 .features[FEAT_1_EDX] =
1466 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1467 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1468 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1469 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1470 CPUID_DE | CPUID_FP87,
1471 .features[FEAT_1_ECX] =
1472 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1473 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1474 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1476 /* Missing: CPUID_EXT2_RDTSCP */
1477 .features[FEAT_8000_0001_EDX] =
1479 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1480 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1481 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1482 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1483 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1484 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1485 .features[FEAT_8000_0001_ECX] =
1486 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1487 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1488 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1491 .xlevel = 0x8000001A,
1492 .model_id = "AMD Opteron 62xx class CPU",
1495 .name = "Opteron_G5",
1497 .vendor = CPUID_VENDOR_AMD,
1501 .features[FEAT_1_EDX] =
1502 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1503 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1504 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1505 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1506 CPUID_DE | CPUID_FP87,
1507 .features[FEAT_1_ECX] =
1508 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1509 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1510 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1511 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1512 /* Missing: CPUID_EXT2_RDTSCP */
1513 .features[FEAT_8000_0001_EDX] =
1515 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1516 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1517 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1518 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1519 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1520 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1521 .features[FEAT_8000_0001_ECX] =
1522 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1523 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1524 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1527 .xlevel = 0x8000001A,
1528 .model_id = "AMD Opteron 63xx class CPU",
1532 typedef struct PropValue {
1533 const char *prop, *value;
1536 /* KVM-specific features that are automatically added/removed
1537 * from all CPU models when KVM is enabled.
1539 static PropValue kvm_default_props[] = {
1540 { "kvmclock", "on" },
1541 { "kvm-nopiodelay", "on" },
1542 { "kvm-asyncpf", "on" },
1543 { "kvm-steal-time", "on" },
1544 { "kvm-pv-eoi", "on" },
1545 { "kvmclock-stable-bit", "on" },
1548 { "monitor", "off" },
1553 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1556 for (pv = kvm_default_props; pv->prop; pv++) {
1557 if (!strcmp(pv->prop, prop)) {
1563 /* It is valid to call this function only for properties that
1564 * are already present in the kvm_default_props table.
1569 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1570 bool migratable_only);
1574 static bool lmce_supported(void)
1578 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1582 return !!(mce_cap & MCG_LMCE_P);
1585 static int cpu_x86_fill_model_id(char *str)
1587 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1590 for (i = 0; i < 3; i++) {
1591 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1592 memcpy(str + i * 16 + 0, &eax, 4);
1593 memcpy(str + i * 16 + 4, &ebx, 4);
1594 memcpy(str + i * 16 + 8, &ecx, 4);
1595 memcpy(str + i * 16 + 12, &edx, 4);
1600 static X86CPUDefinition host_cpudef;
1602 static Property host_x86_cpu_properties[] = {
1603 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1604 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1605 DEFINE_PROP_END_OF_LIST()
1608 /* class_init for the "host" CPU model
1610 * This function may be called before KVM is initialized.
1612 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1614 DeviceClass *dc = DEVICE_CLASS(oc);
1615 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1616 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1618 xcc->kvm_required = true;
1620 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1621 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1623 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1624 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1625 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1626 host_cpudef.stepping = eax & 0x0F;
1628 cpu_x86_fill_model_id(host_cpudef.model_id);
1630 xcc->cpu_def = &host_cpudef;
1632 /* level, xlevel, xlevel2, and the feature words are initialized on
1633 * instance_init, because they require KVM to be initialized.
1636 dc->props = host_x86_cpu_properties;
1637 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1638 dc->cannot_destroy_with_object_finalize_yet = true;
1641 static void host_x86_cpu_initfn(Object *obj)
1643 X86CPU *cpu = X86_CPU(obj);
1644 CPUX86State *env = &cpu->env;
1645 KVMState *s = kvm_state;
1647 /* We can't fill the features array here because we don't know yet if
1648 * "migratable" is true or false.
1650 cpu->host_features = true;
1652 /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
1653 if (kvm_enabled()) {
1654 env->cpuid_min_level =
1655 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1656 env->cpuid_min_xlevel =
1657 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1658 env->cpuid_min_xlevel2 =
1659 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1661 if (lmce_supported()) {
1662 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1666 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1669 static const TypeInfo host_x86_cpu_type_info = {
1670 .name = X86_CPU_TYPE_NAME("host"),
1671 .parent = TYPE_X86_CPU,
1672 .instance_init = host_x86_cpu_initfn,
1673 .class_init = host_x86_cpu_class_init,
1678 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1680 FeatureWordInfo *f = &feature_word_info[w];
1683 for (i = 0; i < 32; ++i) {
1684 if ((1UL << i) & mask) {
1685 const char *reg = get_register_name_32(f->cpuid_reg);
1687 fprintf(stderr, "warning: %s doesn't support requested feature: "
1688 "CPUID.%02XH:%s%s%s [bit %d]\n",
1689 kvm_enabled() ? "host" : "TCG",
1691 f->feat_names[i] ? "." : "",
1692 f->feat_names[i] ? f->feat_names[i] : "", i);
1697 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1698 const char *name, void *opaque,
1701 X86CPU *cpu = X86_CPU(obj);
1702 CPUX86State *env = &cpu->env;
1705 value = (env->cpuid_version >> 8) & 0xf;
1707 value += (env->cpuid_version >> 20) & 0xff;
1709 visit_type_int(v, name, &value, errp);
1712 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1713 const char *name, void *opaque,
1716 X86CPU *cpu = X86_CPU(obj);
1717 CPUX86State *env = &cpu->env;
1718 const int64_t min = 0;
1719 const int64_t max = 0xff + 0xf;
1720 Error *local_err = NULL;
1723 visit_type_int(v, name, &value, &local_err);
1725 error_propagate(errp, local_err);
1728 if (value < min || value > max) {
1729 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1730 name ? name : "null", value, min, max);
1734 env->cpuid_version &= ~0xff00f00;
1736 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1738 env->cpuid_version |= value << 8;
1742 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1743 const char *name, void *opaque,
1746 X86CPU *cpu = X86_CPU(obj);
1747 CPUX86State *env = &cpu->env;
1750 value = (env->cpuid_version >> 4) & 0xf;
1751 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1752 visit_type_int(v, name, &value, errp);
1755 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1756 const char *name, void *opaque,
1759 X86CPU *cpu = X86_CPU(obj);
1760 CPUX86State *env = &cpu->env;
1761 const int64_t min = 0;
1762 const int64_t max = 0xff;
1763 Error *local_err = NULL;
1766 visit_type_int(v, name, &value, &local_err);
1768 error_propagate(errp, local_err);
1771 if (value < min || value > max) {
1772 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1773 name ? name : "null", value, min, max);
1777 env->cpuid_version &= ~0xf00f0;
1778 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1781 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1782 const char *name, void *opaque,
1785 X86CPU *cpu = X86_CPU(obj);
1786 CPUX86State *env = &cpu->env;
1789 value = env->cpuid_version & 0xf;
1790 visit_type_int(v, name, &value, errp);
1793 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1794 const char *name, void *opaque,
1797 X86CPU *cpu = X86_CPU(obj);
1798 CPUX86State *env = &cpu->env;
1799 const int64_t min = 0;
1800 const int64_t max = 0xf;
1801 Error *local_err = NULL;
1804 visit_type_int(v, name, &value, &local_err);
1806 error_propagate(errp, local_err);
1809 if (value < min || value > max) {
1810 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1811 name ? name : "null", value, min, max);
1815 env->cpuid_version &= ~0xf;
1816 env->cpuid_version |= value & 0xf;
1819 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1821 X86CPU *cpu = X86_CPU(obj);
1822 CPUX86State *env = &cpu->env;
1825 value = g_malloc(CPUID_VENDOR_SZ + 1);
1826 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1827 env->cpuid_vendor3);
1831 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1834 X86CPU *cpu = X86_CPU(obj);
1835 CPUX86State *env = &cpu->env;
1838 if (strlen(value) != CPUID_VENDOR_SZ) {
1839 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1843 env->cpuid_vendor1 = 0;
1844 env->cpuid_vendor2 = 0;
1845 env->cpuid_vendor3 = 0;
1846 for (i = 0; i < 4; i++) {
1847 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1848 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1849 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1853 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1855 X86CPU *cpu = X86_CPU(obj);
1856 CPUX86State *env = &cpu->env;
1860 value = g_malloc(48 + 1);
1861 for (i = 0; i < 48; i++) {
1862 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1868 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1871 X86CPU *cpu = X86_CPU(obj);
1872 CPUX86State *env = &cpu->env;
1875 if (model_id == NULL) {
1878 len = strlen(model_id);
1879 memset(env->cpuid_model, 0, 48);
1880 for (i = 0; i < 48; i++) {
1884 c = (uint8_t)model_id[i];
1886 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1890 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1891 void *opaque, Error **errp)
1893 X86CPU *cpu = X86_CPU(obj);
1896 value = cpu->env.tsc_khz * 1000;
1897 visit_type_int(v, name, &value, errp);
1900 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1901 void *opaque, Error **errp)
1903 X86CPU *cpu = X86_CPU(obj);
1904 const int64_t min = 0;
1905 const int64_t max = INT64_MAX;
1906 Error *local_err = NULL;
1909 visit_type_int(v, name, &value, &local_err);
1911 error_propagate(errp, local_err);
1914 if (value < min || value > max) {
1915 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1916 name ? name : "null", value, min, max);
1920 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1923 /* Generic getter for "feature-words" and "filtered-features" properties */
1924 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1925 const char *name, void *opaque,
1928 uint32_t *array = (uint32_t *)opaque;
1930 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1931 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1932 X86CPUFeatureWordInfoList *list = NULL;
1934 for (w = 0; w < FEATURE_WORDS; w++) {
1935 FeatureWordInfo *wi = &feature_word_info[w];
1936 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1937 qwi->cpuid_input_eax = wi->cpuid_eax;
1938 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1939 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1940 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1941 qwi->features = array[w];
1943 /* List will be in reverse order, but order shouldn't matter */
1944 list_entries[w].next = list;
1945 list_entries[w].value = &word_infos[w];
1946 list = &list_entries[w];
1949 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1952 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1953 void *opaque, Error **errp)
1955 X86CPU *cpu = X86_CPU(obj);
1956 int64_t value = cpu->hyperv_spinlock_attempts;
1958 visit_type_int(v, name, &value, errp);
1961 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1962 void *opaque, Error **errp)
1964 const int64_t min = 0xFFF;
1965 const int64_t max = UINT_MAX;
1966 X86CPU *cpu = X86_CPU(obj);
1970 visit_type_int(v, name, &value, &err);
1972 error_propagate(errp, err);
1976 if (value < min || value > max) {
1977 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1978 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1979 object_get_typename(obj), name ? name : "null",
1983 cpu->hyperv_spinlock_attempts = value;
1986 static PropertyInfo qdev_prop_spinlocks = {
1988 .get = x86_get_hv_spinlocks,
1989 .set = x86_set_hv_spinlocks,
1992 /* Convert all '_' in a feature string option name to '-', to make feature
1993 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1995 static inline void feat2prop(char *s)
1997 while ((s = strchr(s, '_'))) {
2002 /* Compatibily hack to maintain legacy +-feat semantic,
2003 * where +-feat overwrites any feature set by
2004 * feat=on|feat even if the later is parsed after +-feat
2005 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
2007 static FeatureWordArray plus_features = { 0 };
2008 static FeatureWordArray minus_features = { 0 };
2010 /* Parse "+feature,-feature,feature=foo" CPU feature string
2012 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2015 char *featurestr; /* Single 'key=value" string being parsed */
2016 Error *local_err = NULL;
2017 static bool cpu_globals_initialized;
2019 if (cpu_globals_initialized) {
2022 cpu_globals_initialized = true;
2028 for (featurestr = strtok(features, ",");
2029 featurestr && !local_err;
2030 featurestr = strtok(NULL, ",")) {
2032 const char *val = NULL;
2035 GlobalProperty *prop;
2037 /* Compatibility syntax: */
2038 if (featurestr[0] == '+') {
2039 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
2041 } else if (featurestr[0] == '-') {
2042 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
2046 eq = strchr(featurestr, '=');
2054 feat2prop(featurestr);
2058 if (!strcmp(name, "tsc-freq")) {
2062 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
2063 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
2064 if (tsc_freq < 0 || *err) {
2065 error_setg(errp, "bad numerical value %s", val);
2068 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2070 name = "tsc-frequency";
2073 prop = g_new0(typeof(*prop), 1);
2074 prop->driver = typename;
2075 prop->property = g_strdup(name);
2076 prop->value = g_strdup(val);
2077 prop->errp = &error_fatal;
2078 qdev_prop_register_global(prop);
2082 error_propagate(errp, local_err);
2086 /* Print all cpuid feature names in featureset
2088 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2093 for (bit = 0; bit < 32; bit++) {
2094 if (featureset[bit]) {
2095 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2101 /* generate CPU information. */
2102 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2104 X86CPUDefinition *def;
2108 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2109 def = &builtin_x86_defs[i];
2110 snprintf(buf, sizeof(buf), "%s", def->name);
2111 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2114 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2115 "KVM processor with all supported host features "
2116 "(only available in KVM mode)");
2119 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2120 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2121 FeatureWordInfo *fw = &feature_word_info[i];
2123 (*cpu_fprintf)(f, " ");
2124 listflags(f, cpu_fprintf, fw->feat_names);
2125 (*cpu_fprintf)(f, "\n");
2129 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2131 CpuDefinitionInfoList *cpu_list = NULL;
2132 X86CPUDefinition *def;
2135 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2136 CpuDefinitionInfoList *entry;
2137 CpuDefinitionInfo *info;
2139 def = &builtin_x86_defs[i];
2140 info = g_malloc0(sizeof(*info));
2141 info->name = g_strdup(def->name);
2143 entry = g_malloc0(sizeof(*entry));
2144 entry->value = info;
2145 entry->next = cpu_list;
2152 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2153 bool migratable_only)
2155 FeatureWordInfo *wi = &feature_word_info[w];
2158 if (kvm_enabled()) {
2159 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2162 } else if (tcg_enabled()) {
2163 r = wi->tcg_features;
2167 if (migratable_only) {
2168 r &= x86_cpu_get_migratable_flags(w);
2174 * Filters CPU feature words based on host availability of each feature.
2176 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2178 static int x86_cpu_filter_features(X86CPU *cpu)
2180 CPUX86State *env = &cpu->env;
2184 for (w = 0; w < FEATURE_WORDS; w++) {
2185 uint32_t host_feat =
2186 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2187 uint32_t requested_features = env->features[w];
2188 env->features[w] &= host_feat;
2189 cpu->filtered_features[w] = requested_features & ~env->features[w];
2190 if (cpu->filtered_features[w]) {
2191 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2192 report_unavailable_features(w, cpu->filtered_features[w]);
2201 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2204 for (pv = props; pv->prop; pv++) {
2208 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2213 /* Load data from X86CPUDefinition
2215 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2217 CPUX86State *env = &cpu->env;
2219 char host_vendor[CPUID_VENDOR_SZ + 1];
2222 /* CPU models only set _minimum_ values for level/xlevel: */
2223 object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
2224 object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2226 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2227 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2228 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2229 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2230 for (w = 0; w < FEATURE_WORDS; w++) {
2231 env->features[w] = def->features[w];
2234 /* Special cases not set in the X86CPUDefinition structs: */
2235 if (kvm_enabled()) {
2236 if (!kvm_irqchip_in_kernel()) {
2237 x86_cpu_change_kvm_default("x2apic", "off");
2240 x86_cpu_apply_props(cpu, kvm_default_props);
2243 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2245 /* sysenter isn't supported in compatibility mode on AMD,
2246 * syscall isn't supported in compatibility mode on Intel.
2247 * Normally we advertise the actual CPU vendor, but you can
2248 * override this using the 'vendor' property if you want to use
2249 * KVM's sysenter/syscall emulation in compatibility mode and
2250 * when doing cross vendor migration
2252 vendor = def->vendor;
2253 if (kvm_enabled()) {
2254 uint32_t ebx = 0, ecx = 0, edx = 0;
2255 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2256 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2257 vendor = host_vendor;
2260 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2264 X86CPU *cpu_x86_init(const char *cpu_model)
2266 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2269 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2271 X86CPUDefinition *cpudef = data;
2272 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2274 xcc->cpu_def = cpudef;
2277 static void x86_register_cpudef_type(X86CPUDefinition *def)
2279 char *typename = x86_cpu_type_name(def->name);
2282 .parent = TYPE_X86_CPU,
2283 .class_init = x86_cpu_cpudef_class_init,
2291 #if !defined(CONFIG_USER_ONLY)
2293 void cpu_clear_apic_feature(CPUX86State *env)
2295 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2298 #endif /* !CONFIG_USER_ONLY */
2300 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2301 uint32_t *eax, uint32_t *ebx,
2302 uint32_t *ecx, uint32_t *edx)
2304 X86CPU *cpu = x86_env_get_cpu(env);
2305 CPUState *cs = CPU(cpu);
2306 uint32_t pkg_offset;
2308 /* test if maximum index reached */
2309 if (index & 0x80000000) {
2310 if (index > env->cpuid_xlevel) {
2311 if (env->cpuid_xlevel2 > 0) {
2312 /* Handle the Centaur's CPUID instruction. */
2313 if (index > env->cpuid_xlevel2) {
2314 index = env->cpuid_xlevel2;
2315 } else if (index < 0xC0000000) {
2316 index = env->cpuid_xlevel;
2319 /* Intel documentation states that invalid EAX input will
2320 * return the same information as EAX=cpuid_level
2321 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2323 index = env->cpuid_level;
2327 if (index > env->cpuid_level)
2328 index = env->cpuid_level;
2333 *eax = env->cpuid_level;
2334 *ebx = env->cpuid_vendor1;
2335 *edx = env->cpuid_vendor2;
2336 *ecx = env->cpuid_vendor3;
2339 *eax = env->cpuid_version;
2340 *ebx = (cpu->apic_id << 24) |
2341 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2342 *ecx = env->features[FEAT_1_ECX];
2343 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2344 *ecx |= CPUID_EXT_OSXSAVE;
2346 *edx = env->features[FEAT_1_EDX];
2347 if (cs->nr_cores * cs->nr_threads > 1) {
2348 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2353 /* cache info: needed for Pentium Pro compatibility */
2354 if (cpu->cache_info_passthrough) {
2355 host_cpuid(index, 0, eax, ebx, ecx, edx);
2358 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2360 if (!cpu->enable_l3_cache) {
2363 *ecx = L3_N_DESCRIPTOR;
2365 *edx = (L1D_DESCRIPTOR << 16) | \
2366 (L1I_DESCRIPTOR << 8) | \
2370 /* cache info: needed for Core compatibility */
2371 if (cpu->cache_info_passthrough) {
2372 host_cpuid(index, count, eax, ebx, ecx, edx);
2373 *eax &= ~0xFC000000;
2377 case 0: /* L1 dcache info */
2378 *eax |= CPUID_4_TYPE_DCACHE | \
2379 CPUID_4_LEVEL(1) | \
2380 CPUID_4_SELF_INIT_LEVEL;
2381 *ebx = (L1D_LINE_SIZE - 1) | \
2382 ((L1D_PARTITIONS - 1) << 12) | \
2383 ((L1D_ASSOCIATIVITY - 1) << 22);
2384 *ecx = L1D_SETS - 1;
2385 *edx = CPUID_4_NO_INVD_SHARING;
2387 case 1: /* L1 icache info */
2388 *eax |= CPUID_4_TYPE_ICACHE | \
2389 CPUID_4_LEVEL(1) | \
2390 CPUID_4_SELF_INIT_LEVEL;
2391 *ebx = (L1I_LINE_SIZE - 1) | \
2392 ((L1I_PARTITIONS - 1) << 12) | \
2393 ((L1I_ASSOCIATIVITY - 1) << 22);
2394 *ecx = L1I_SETS - 1;
2395 *edx = CPUID_4_NO_INVD_SHARING;
2397 case 2: /* L2 cache info */
2398 *eax |= CPUID_4_TYPE_UNIFIED | \
2399 CPUID_4_LEVEL(2) | \
2400 CPUID_4_SELF_INIT_LEVEL;
2401 if (cs->nr_threads > 1) {
2402 *eax |= (cs->nr_threads - 1) << 14;
2404 *ebx = (L2_LINE_SIZE - 1) | \
2405 ((L2_PARTITIONS - 1) << 12) | \
2406 ((L2_ASSOCIATIVITY - 1) << 22);
2408 *edx = CPUID_4_NO_INVD_SHARING;
2410 case 3: /* L3 cache info */
2411 if (!cpu->enable_l3_cache) {
2418 *eax |= CPUID_4_TYPE_UNIFIED | \
2419 CPUID_4_LEVEL(3) | \
2420 CPUID_4_SELF_INIT_LEVEL;
2421 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2422 *eax |= ((1 << pkg_offset) - 1) << 14;
2423 *ebx = (L3_N_LINE_SIZE - 1) | \
2424 ((L3_N_PARTITIONS - 1) << 12) | \
2425 ((L3_N_ASSOCIATIVITY - 1) << 22);
2426 *ecx = L3_N_SETS - 1;
2427 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2429 default: /* end of info */
2438 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2439 if ((*eax & 31) && cs->nr_cores > 1) {
2440 *eax |= (cs->nr_cores - 1) << 26;
2444 /* mwait info: needed for Core compatibility */
2445 *eax = 0; /* Smallest monitor-line size in bytes */
2446 *ebx = 0; /* Largest monitor-line size in bytes */
2447 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2451 /* Thermal and Power Leaf */
2452 *eax = env->features[FEAT_6_EAX];
2458 /* Structured Extended Feature Flags Enumeration Leaf */
2460 *eax = 0; /* Maximum ECX value for sub-leaves */
2461 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2462 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2463 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2464 *ecx |= CPUID_7_0_ECX_OSPKE;
2466 *edx = 0; /* Reserved */
2475 /* Direct Cache Access Information Leaf */
2476 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2482 /* Architectural Performance Monitoring Leaf */
2483 if (kvm_enabled() && cpu->enable_pmu) {
2484 KVMState *s = cs->kvm_state;
2486 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2487 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2488 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2489 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2498 /* Extended Topology Enumeration Leaf */
2499 if (!cpu->enable_cpuid_0xb) {
2500 *eax = *ebx = *ecx = *edx = 0;
2504 *ecx = count & 0xff;
2505 *edx = cpu->apic_id;
2509 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2510 *ebx = cs->nr_threads;
2511 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2514 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2515 *ebx = cs->nr_cores * cs->nr_threads;
2516 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2521 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2524 assert(!(*eax & ~0x1f));
2525 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2528 /* Processor Extended State */
2533 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2538 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
2539 *eax = env->features[FEAT_XSAVE_COMP_LO];
2540 *edx = env->features[FEAT_XSAVE_COMP_HI];
2542 } else if (count == 1) {
2543 *eax = env->features[FEAT_XSAVE];
2544 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2545 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
2546 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2554 *eax = env->cpuid_xlevel;
2555 *ebx = env->cpuid_vendor1;
2556 *edx = env->cpuid_vendor2;
2557 *ecx = env->cpuid_vendor3;
2560 *eax = env->cpuid_version;
2562 *ecx = env->features[FEAT_8000_0001_ECX];
2563 *edx = env->features[FEAT_8000_0001_EDX];
2565 /* The Linux kernel checks for the CMPLegacy bit and
2566 * discards multiple thread information if it is set.
2567 * So don't set it here for Intel to make Linux guests happy.
2569 if (cs->nr_cores * cs->nr_threads > 1) {
2570 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2571 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2572 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2573 *ecx |= 1 << 1; /* CmpLegacy bit */
2580 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2581 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2582 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2583 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2586 /* cache info (L1 cache) */
2587 if (cpu->cache_info_passthrough) {
2588 host_cpuid(index, 0, eax, ebx, ecx, edx);
2591 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2592 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2593 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2594 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2595 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2596 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2597 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2598 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2601 /* cache info (L2 cache) */
2602 if (cpu->cache_info_passthrough) {
2603 host_cpuid(index, 0, eax, ebx, ecx, edx);
2606 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2607 (L2_DTLB_2M_ENTRIES << 16) | \
2608 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2609 (L2_ITLB_2M_ENTRIES);
2610 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2611 (L2_DTLB_4K_ENTRIES << 16) | \
2612 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2613 (L2_ITLB_4K_ENTRIES);
2614 *ecx = (L2_SIZE_KB_AMD << 16) | \
2615 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2616 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2617 if (!cpu->enable_l3_cache) {
2618 *edx = ((L3_SIZE_KB / 512) << 18) | \
2619 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2620 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2622 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
2623 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
2624 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
2631 *edx = env->features[FEAT_8000_0007_EDX];
2634 /* virtual & phys address size in low 2 bytes. */
2635 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2636 /* 64 bit processor, 48 bits virtual, configurable
2639 *eax = 0x00003000 + cpu->phys_bits;
2641 *eax = cpu->phys_bits;
2646 if (cs->nr_cores * cs->nr_threads > 1) {
2647 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2651 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2652 *eax = 0x00000001; /* SVM Revision */
2653 *ebx = 0x00000010; /* nr of ASIDs */
2655 *edx = env->features[FEAT_SVM]; /* optional features */
2664 *eax = env->cpuid_xlevel2;
2670 /* Support for VIA CPU's CPUID instruction */
2671 *eax = env->cpuid_version;
2674 *edx = env->features[FEAT_C000_0001_EDX];
2679 /* Reserved for the future, and now filled with zero */
2686 /* reserved values: zero */
2695 /* CPUClass::reset() */
2696 static void x86_cpu_reset(CPUState *s)
2698 X86CPU *cpu = X86_CPU(s);
2699 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2700 CPUX86State *env = &cpu->env;
2705 xcc->parent_reset(s);
2707 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
2711 env->old_exception = -1;
2713 /* init to reset state */
2715 env->hflags2 |= HF2_GIF_MASK;
2717 cpu_x86_update_cr0(env, 0x60000010);
2718 env->a20_mask = ~0x0;
2719 env->smbase = 0x30000;
2721 env->idt.limit = 0xffff;
2722 env->gdt.limit = 0xffff;
2723 env->ldt.limit = 0xffff;
2724 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2725 env->tr.limit = 0xffff;
2726 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2728 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2729 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2730 DESC_R_MASK | DESC_A_MASK);
2731 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2732 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2734 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2735 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2737 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2738 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2740 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2741 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2743 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2744 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2748 env->regs[R_EDX] = env->cpuid_version;
2753 for (i = 0; i < 8; i++) {
2756 cpu_set_fpuc(env, 0x37f);
2758 env->mxcsr = 0x1f80;
2759 /* All units are in INIT state. */
2762 env->pat = 0x0007040600070406ULL;
2763 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2765 memset(env->dr, 0, sizeof(env->dr));
2766 env->dr[6] = DR6_FIXED_1;
2767 env->dr[7] = DR7_FIXED_1;
2768 cpu_breakpoint_remove_all(s, BP_CPU);
2769 cpu_watchpoint_remove_all(s, BP_CPU);
2772 xcr0 = XSTATE_FP_MASK;
2774 #ifdef CONFIG_USER_ONLY
2775 /* Enable all the features for user-mode. */
2776 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2777 xcr0 |= XSTATE_SSE_MASK;
2779 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2780 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2781 if (env->features[esa->feature] & esa->bits) {
2786 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2787 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2789 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2790 cr4 |= CR4_FSGSBASE_MASK;
2795 cpu_x86_update_cr4(env, cr4);
2798 * SDM 11.11.5 requires:
2799 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2800 * - IA32_MTRR_PHYSMASKn.V = 0
2801 * All other bits are undefined. For simplification, zero it all.
2803 env->mtrr_deftype = 0;
2804 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2805 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2807 #if !defined(CONFIG_USER_ONLY)
2808 /* We hard-wire the BSP to the first CPU. */
2809 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2811 s->halted = !cpu_is_bsp(cpu);
2813 if (kvm_enabled()) {
2814 kvm_arch_reset_vcpu(cpu);
2819 #ifndef CONFIG_USER_ONLY
2820 bool cpu_is_bsp(X86CPU *cpu)
2822 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2825 /* TODO: remove me, when reset over QOM tree is implemented */
2826 static void x86_cpu_machine_reset_cb(void *opaque)
2828 X86CPU *cpu = opaque;
2829 cpu_reset(CPU(cpu));
2833 static void mce_init(X86CPU *cpu)
2835 CPUX86State *cenv = &cpu->env;
2838 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2839 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2840 (CPUID_MCE | CPUID_MCA)) {
2841 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
2842 (cpu->enable_lmce ? MCG_LMCE_P : 0);
2843 cenv->mcg_ctl = ~(uint64_t)0;
2844 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2845 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2850 #ifndef CONFIG_USER_ONLY
2851 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2853 APICCommonState *apic;
2854 const char *apic_type = "apic";
2856 if (kvm_apic_in_kernel()) {
2857 apic_type = "kvm-apic";
2858 } else if (xen_enabled()) {
2859 apic_type = "xen-apic";
2862 cpu->apic_state = DEVICE(object_new(apic_type));
2864 object_property_add_child(OBJECT(cpu), "lapic",
2865 OBJECT(cpu->apic_state), &error_abort);
2866 object_unref(OBJECT(cpu->apic_state));
2868 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2869 /* TODO: convert to link<> */
2870 apic = APIC_COMMON(cpu->apic_state);
2872 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2875 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2877 APICCommonState *apic;
2878 static bool apic_mmio_map_once;
2880 if (cpu->apic_state == NULL) {
2883 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2886 /* Map APIC MMIO area */
2887 apic = APIC_COMMON(cpu->apic_state);
2888 if (!apic_mmio_map_once) {
2889 memory_region_add_subregion_overlap(get_system_memory(),
2891 MSR_IA32_APICBASE_BASE,
2894 apic_mmio_map_once = true;
2898 static void x86_cpu_machine_done(Notifier *n, void *unused)
2900 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2901 MemoryRegion *smram =
2902 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2905 cpu->smram = g_new(MemoryRegion, 1);
2906 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2907 smram, 0, 1ull << 32);
2908 memory_region_set_enabled(cpu->smram, false);
2909 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2913 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2918 /* Note: Only safe for use on x86(-64) hosts */
2919 static uint32_t x86_host_phys_bits(void)
2922 uint32_t host_phys_bits;
2924 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
2925 if (eax >= 0x80000008) {
2926 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
2927 /* Note: According to AMD doc 25481 rev 2.34 they have a field
2928 * at 23:16 that can specify a maximum physical address bits for
2929 * the guest that can override this value; but I've not seen
2930 * anything with that set.
2932 host_phys_bits = eax & 0xff;
2934 /* It's an odd 64 bit machine that doesn't have the leaf for
2935 * physical address bits; fall back to 36 that's most older
2938 host_phys_bits = 36;
2941 return host_phys_bits;
2944 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
2951 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
2952 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
2954 CPUX86State *env = &cpu->env;
2955 FeatureWordInfo *fi = &feature_word_info[w];
2956 uint32_t eax = fi->cpuid_eax;
2957 uint32_t region = eax & 0xF0000000;
2959 if (!env->features[w]) {
2965 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
2968 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
2971 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
2976 /* Calculate XSAVE components based on the configured CPU feature flags */
2977 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
2979 CPUX86State *env = &cpu->env;
2983 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2987 mask = (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2988 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2989 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2990 if (env->features[esa->feature] & esa->bits) {
2991 mask |= (1ULL << i);
2995 env->features[FEAT_XSAVE_COMP_LO] = mask;
2996 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
2999 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
3000 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
3001 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
3002 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
3003 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
3004 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
3005 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
3007 CPUState *cs = CPU(dev);
3008 X86CPU *cpu = X86_CPU(dev);
3009 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3010 CPUX86State *env = &cpu->env;
3011 Error *local_err = NULL;
3012 static bool ht_warned;
3015 if (xcc->kvm_required && !kvm_enabled()) {
3016 char *name = x86_cpu_class_get_model_name(xcc);
3017 error_setg(&local_err, "CPU model '%s' requires KVM", name);
3022 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
3023 error_setg(errp, "apic-id property was not initialized properly");
3027 /*TODO: cpu->host_features incorrectly overwrites features
3028 * set using "feat=on|off". Once we fix this, we can convert
3029 * plus_features & minus_features to global properties
3030 * inside x86_cpu_parse_featurestr() too.
3032 if (cpu->host_features) {
3033 for (w = 0; w < FEATURE_WORDS; w++) {
3035 x86_cpu_get_supported_feature_word(w, cpu->migratable);
3039 for (w = 0; w < FEATURE_WORDS; w++) {
3040 cpu->env.features[w] |= plus_features[w];
3041 cpu->env.features[w] &= ~minus_features[w];
3044 if (!kvm_enabled() || !cpu->expose_kvm) {
3045 env->features[FEAT_KVM] = 0;
3048 x86_cpu_enable_xsave_components(cpu);
3050 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3051 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3052 if (cpu->full_cpuid_auto_level) {
3053 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3054 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3055 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3056 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3057 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3058 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3059 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3060 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3061 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3062 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3063 /* SVM requires CPUID[0x8000000A] */
3064 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3065 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3069 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3070 if (env->cpuid_level == UINT32_MAX) {
3071 env->cpuid_level = env->cpuid_min_level;
3073 if (env->cpuid_xlevel == UINT32_MAX) {
3074 env->cpuid_xlevel = env->cpuid_min_xlevel;
3076 if (env->cpuid_xlevel2 == UINT32_MAX) {
3077 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3080 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
3081 error_setg(&local_err,
3083 "Host doesn't support requested features" :
3084 "TCG doesn't support requested features");
3088 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3091 if (IS_AMD_CPU(env)) {
3092 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3093 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3094 & CPUID_EXT2_AMD_ALIASES);
3097 /* For 64bit systems think about the number of physical bits to present.
3098 * ideally this should be the same as the host; anything other than matching
3099 * the host can cause incorrect guest behaviour.
3100 * QEMU used to pick the magic value of 40 bits that corresponds to
3101 * consumer AMD devices but nothing else.
3103 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3104 if (kvm_enabled()) {
3105 uint32_t host_phys_bits = x86_host_phys_bits();
3108 if (cpu->host_phys_bits) {
3109 /* The user asked for us to use the host physical bits */
3110 cpu->phys_bits = host_phys_bits;
3113 /* Print a warning if the user set it to a value that's not the
3116 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3118 error_report("Warning: Host physical bits (%u)"
3119 " does not match phys-bits property (%u)",
3120 host_phys_bits, cpu->phys_bits);
3124 if (cpu->phys_bits &&
3125 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3126 cpu->phys_bits < 32)) {
3127 error_setg(errp, "phys-bits should be between 32 and %u "
3129 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3133 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3134 error_setg(errp, "TCG only supports phys-bits=%u",
3135 TCG_PHYS_ADDR_BITS);
3139 /* 0 means it was not explicitly set by the user (or by machine
3140 * compat_props or by the host code above). In this case, the default
3141 * is the value used by TCG (40).
3143 if (cpu->phys_bits == 0) {
3144 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3147 /* For 32 bit systems don't use the user set value, but keep
3148 * phys_bits consistent with what we tell the guest.
3150 if (cpu->phys_bits != 0) {
3151 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3155 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3156 cpu->phys_bits = 36;
3158 cpu->phys_bits = 32;
3161 cpu_exec_init(cs, &error_abort);
3163 if (tcg_enabled()) {
3167 #ifndef CONFIG_USER_ONLY
3168 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3170 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3171 x86_cpu_apic_create(cpu, &local_err);
3172 if (local_err != NULL) {
3180 #ifndef CONFIG_USER_ONLY
3181 if (tcg_enabled()) {
3182 AddressSpace *newas = g_new(AddressSpace, 1);
3184 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3185 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3187 /* Outer container... */
3188 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3189 memory_region_set_enabled(cpu->cpu_as_root, true);
3191 /* ... with two regions inside: normal system memory with low
3194 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3195 get_system_memory(), 0, ~0ull);
3196 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3197 memory_region_set_enabled(cpu->cpu_as_mem, true);
3198 address_space_init(newas, cpu->cpu_as_root, "CPU");
3200 cpu_address_space_init(cs, newas, 0);
3202 /* ... SMRAM with higher priority, linked from /machine/smram. */
3203 cpu->machine_done.notify = x86_cpu_machine_done;
3204 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3210 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3211 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3212 * based on inputs (sockets,cores,threads), it is still better to gives
3215 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3216 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3218 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3219 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3220 " -smp options properly.");
3224 x86_cpu_apic_realize(cpu, &local_err);
3225 if (local_err != NULL) {
3230 xcc->parent_realize(dev, &local_err);
3233 if (local_err != NULL) {
3234 error_propagate(errp, local_err);
3239 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3241 X86CPU *cpu = X86_CPU(dev);
3243 #ifndef CONFIG_USER_ONLY
3244 cpu_remove_sync(CPU(dev));
3245 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3248 if (cpu->apic_state) {
3249 object_unparent(OBJECT(cpu->apic_state));
3250 cpu->apic_state = NULL;
3254 typedef struct BitProperty {
3259 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3260 void *opaque, Error **errp)
3262 BitProperty *fp = opaque;
3263 bool value = (*fp->ptr & fp->mask) == fp->mask;
3264 visit_type_bool(v, name, &value, errp);
3267 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3268 void *opaque, Error **errp)
3270 DeviceState *dev = DEVICE(obj);
3271 BitProperty *fp = opaque;
3272 Error *local_err = NULL;
3275 if (dev->realized) {
3276 qdev_prop_set_after_realize(dev, name, errp);
3280 visit_type_bool(v, name, &value, &local_err);
3282 error_propagate(errp, local_err);
3287 *fp->ptr |= fp->mask;
3289 *fp->ptr &= ~fp->mask;
3293 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3296 BitProperty *prop = opaque;
3300 /* Register a boolean property to get/set a single bit in a uint32_t field.
3302 * The same property name can be registered multiple times to make it affect
3303 * multiple bits in the same FeatureWord. In that case, the getter will return
3304 * true only if all bits are set.
3306 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3307 const char *prop_name,
3313 uint32_t mask = (1UL << bitnr);
3315 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3318 assert(fp->ptr == field);
3321 fp = g_new0(BitProperty, 1);
3324 object_property_add(OBJECT(cpu), prop_name, "bool",
3325 x86_cpu_get_bit_prop,
3326 x86_cpu_set_bit_prop,
3327 x86_cpu_release_bit_prop, fp, &error_abort);
3331 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3335 Object *obj = OBJECT(cpu);
3338 FeatureWordInfo *fi = &feature_word_info[w];
3340 if (!fi->feat_names[bitnr]) {
3344 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3346 feat2prop(names[0]);
3347 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3349 for (i = 1; names[i]; i++) {
3350 feat2prop(names[i]);
3351 object_property_add_alias(obj, names[i], obj, names[0],
3358 static void x86_cpu_initfn(Object *obj)
3360 CPUState *cs = CPU(obj);
3361 X86CPU *cpu = X86_CPU(obj);
3362 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3363 CPUX86State *env = &cpu->env;
3368 object_property_add(obj, "family", "int",
3369 x86_cpuid_version_get_family,
3370 x86_cpuid_version_set_family, NULL, NULL, NULL);
3371 object_property_add(obj, "model", "int",
3372 x86_cpuid_version_get_model,
3373 x86_cpuid_version_set_model, NULL, NULL, NULL);
3374 object_property_add(obj, "stepping", "int",
3375 x86_cpuid_version_get_stepping,
3376 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3377 object_property_add_str(obj, "vendor",
3378 x86_cpuid_get_vendor,
3379 x86_cpuid_set_vendor, NULL);
3380 object_property_add_str(obj, "model-id",
3381 x86_cpuid_get_model_id,
3382 x86_cpuid_set_model_id, NULL);
3383 object_property_add(obj, "tsc-frequency", "int",
3384 x86_cpuid_get_tsc_freq,
3385 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3386 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3387 x86_cpu_get_feature_words,
3388 NULL, NULL, (void *)env->features, NULL);
3389 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3390 x86_cpu_get_feature_words,
3391 NULL, NULL, (void *)cpu->filtered_features, NULL);
3393 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3395 for (w = 0; w < FEATURE_WORDS; w++) {
3398 for (bitnr = 0; bitnr < 32; bitnr++) {
3399 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3403 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3406 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3408 X86CPU *cpu = X86_CPU(cs);
3410 return cpu->apic_id;
3413 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3415 X86CPU *cpu = X86_CPU(cs);
3417 return cpu->env.cr[0] & CR0_PG_MASK;
3420 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3422 X86CPU *cpu = X86_CPU(cs);
3424 cpu->env.eip = value;
3427 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3429 X86CPU *cpu = X86_CPU(cs);
3431 cpu->env.eip = tb->pc - tb->cs_base;
3434 static bool x86_cpu_has_work(CPUState *cs)
3436 X86CPU *cpu = X86_CPU(cs);
3437 CPUX86State *env = &cpu->env;
3439 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3440 CPU_INTERRUPT_POLL)) &&
3441 (env->eflags & IF_MASK)) ||
3442 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3443 CPU_INTERRUPT_INIT |
3444 CPU_INTERRUPT_SIPI |
3445 CPU_INTERRUPT_MCE)) ||
3446 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3447 !(env->hflags & HF_SMM_MASK));
3450 static Property x86_cpu_properties[] = {
3451 #ifdef CONFIG_USER_ONLY
3452 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3453 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
3454 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
3455 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
3456 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
3458 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
3459 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
3460 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
3461 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
3463 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3464 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3465 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3466 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3467 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3468 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3469 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3470 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3471 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3472 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3473 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3474 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3475 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3476 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3477 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
3478 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
3479 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
3480 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
3481 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
3482 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
3483 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
3484 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
3485 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
3486 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
3487 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3488 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3489 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
3490 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
3491 DEFINE_PROP_END_OF_LIST()
3494 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3496 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3497 CPUClass *cc = CPU_CLASS(oc);
3498 DeviceClass *dc = DEVICE_CLASS(oc);
3500 xcc->parent_realize = dc->realize;
3501 dc->realize = x86_cpu_realizefn;
3502 dc->unrealize = x86_cpu_unrealizefn;
3503 dc->props = x86_cpu_properties;
3505 xcc->parent_reset = cc->reset;
3506 cc->reset = x86_cpu_reset;
3507 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3509 cc->class_by_name = x86_cpu_class_by_name;
3510 cc->parse_features = x86_cpu_parse_featurestr;
3511 cc->has_work = x86_cpu_has_work;
3512 cc->do_interrupt = x86_cpu_do_interrupt;
3513 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3514 cc->dump_state = x86_cpu_dump_state;
3515 cc->set_pc = x86_cpu_set_pc;
3516 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3517 cc->gdb_read_register = x86_cpu_gdb_read_register;
3518 cc->gdb_write_register = x86_cpu_gdb_write_register;
3519 cc->get_arch_id = x86_cpu_get_arch_id;
3520 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3521 #ifdef CONFIG_USER_ONLY
3522 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3524 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3525 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3526 cc->write_elf64_note = x86_cpu_write_elf64_note;
3527 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3528 cc->write_elf32_note = x86_cpu_write_elf32_note;
3529 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3530 cc->vmsd = &vmstate_x86_cpu;
3532 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3533 #ifndef CONFIG_USER_ONLY
3534 cc->debug_excp_handler = breakpoint_handler;
3536 cc->cpu_exec_enter = x86_cpu_exec_enter;
3537 cc->cpu_exec_exit = x86_cpu_exec_exit;
3539 dc->cannot_instantiate_with_device_add_yet = false;
3541 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3542 * object in cpus -> dangling pointer after final object_unref().
3544 dc->cannot_destroy_with_object_finalize_yet = true;
3547 static const TypeInfo x86_cpu_type_info = {
3548 .name = TYPE_X86_CPU,
3550 .instance_size = sizeof(X86CPU),
3551 .instance_init = x86_cpu_initfn,
3553 .class_size = sizeof(X86CPUClass),
3554 .class_init = x86_cpu_common_class_init,
3557 static void x86_cpu_register_types(void)
3561 type_register_static(&x86_cpu_type_info);
3562 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3563 x86_register_cpudef_type(&builtin_x86_defs[i]);
3566 type_register_static(&host_x86_cpu_type_info);
3570 type_init(x86_cpu_register_types)