2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 /* CPUID Leaf 4 constants: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
69 #define CPUID_4_LEVEL(l) ((l) << 5)
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
79 #define ASSOC_FULL 0xFF
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
96 /* Definitions of the hardcoded cache entries we expose: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
140 /* TLB definitions: */
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
173 dst[CPUID_VENDOR_SZ] = '\0';
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
248 static const char *hyperv_priv_feature_name[] = {
249 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
250 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
251 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
252 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
253 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
254 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
257 NULL, NULL, NULL, NULL,
258 NULL, NULL, NULL, NULL,
259 NULL, NULL, NULL, NULL,
262 static const char *hyperv_ident_feature_name[] = {
263 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
264 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
265 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
266 NULL /* hv_create_port */, NULL /* hv_connect_port */,
267 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
268 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
276 static const char *hyperv_misc_feature_name[] = {
277 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
278 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
279 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
281 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
286 NULL, NULL, NULL, NULL,
289 static const char *svm_feature_name[] = {
290 "npt", "lbrv", "svm_lock", "nrip_save",
291 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
292 NULL, NULL, "pause_filter", NULL,
293 "pfthreshold", NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
297 NULL, NULL, NULL, NULL,
300 static const char *cpuid_7_0_ebx_feature_name[] = {
301 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
302 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
303 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
304 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
307 static const char *cpuid_7_0_ecx_feature_name[] = {
308 NULL, NULL, "umip", "pku",
309 "ospke", NULL, NULL, NULL,
310 NULL, NULL, NULL, NULL,
311 NULL, NULL, NULL, NULL,
312 NULL, NULL, NULL, NULL,
313 NULL, NULL, "rdpid", NULL,
314 NULL, NULL, NULL, NULL,
315 NULL, NULL, NULL, NULL,
318 static const char *cpuid_apm_edx_feature_name[] = {
319 NULL, NULL, NULL, NULL,
320 NULL, NULL, NULL, NULL,
321 "invtsc", NULL, NULL, NULL,
322 NULL, NULL, NULL, NULL,
323 NULL, NULL, NULL, NULL,
324 NULL, NULL, NULL, NULL,
325 NULL, NULL, NULL, NULL,
326 NULL, NULL, NULL, NULL,
329 static const char *cpuid_xsave_feature_name[] = {
330 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
331 NULL, NULL, NULL, NULL,
332 NULL, NULL, NULL, NULL,
333 NULL, NULL, NULL, NULL,
334 NULL, NULL, NULL, NULL,
335 NULL, NULL, NULL, NULL,
336 NULL, NULL, NULL, NULL,
337 NULL, NULL, NULL, NULL,
340 static const char *cpuid_6_feature_name[] = {
341 NULL, NULL, "arat", NULL,
342 NULL, NULL, NULL, NULL,
343 NULL, NULL, NULL, NULL,
344 NULL, NULL, NULL, NULL,
345 NULL, NULL, NULL, NULL,
346 NULL, NULL, NULL, NULL,
347 NULL, NULL, NULL, NULL,
348 NULL, NULL, NULL, NULL,
351 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
352 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
353 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
354 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
355 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
356 CPUID_PSE36 | CPUID_FXSR)
357 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
358 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
359 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
360 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
361 CPUID_PAE | CPUID_SEP | CPUID_APIC)
363 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
364 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
365 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
366 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
367 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
368 /* partly implemented:
369 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
371 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
372 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
373 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
374 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
375 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
376 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
378 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
379 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
380 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
381 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
382 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
385 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
387 #define TCG_EXT2_X86_64_FEATURES 0
390 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
391 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
392 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
393 TCG_EXT2_X86_64_FEATURES)
394 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
395 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
396 #define TCG_EXT4_FEATURES 0
397 #define TCG_SVM_FEATURES 0
398 #define TCG_KVM_FEATURES 0
399 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
400 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
401 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
402 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
405 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
406 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
407 CPUID_7_0_EBX_RDSEED */
408 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
409 #define TCG_APM_FEATURES 0
410 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
411 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
413 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
415 typedef struct FeatureWordInfo {
416 const char **feat_names;
417 uint32_t cpuid_eax; /* Input EAX for CPUID */
418 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
419 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
420 int cpuid_reg; /* output register (R_* constant) */
421 uint32_t tcg_features; /* Feature flags supported by TCG */
422 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
425 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
427 .feat_names = feature_name,
428 .cpuid_eax = 1, .cpuid_reg = R_EDX,
429 .tcg_features = TCG_FEATURES,
432 .feat_names = ext_feature_name,
433 .cpuid_eax = 1, .cpuid_reg = R_ECX,
434 .tcg_features = TCG_EXT_FEATURES,
436 [FEAT_8000_0001_EDX] = {
437 .feat_names = ext2_feature_name,
438 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
439 .tcg_features = TCG_EXT2_FEATURES,
441 [FEAT_8000_0001_ECX] = {
442 .feat_names = ext3_feature_name,
443 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
444 .tcg_features = TCG_EXT3_FEATURES,
446 [FEAT_C000_0001_EDX] = {
447 .feat_names = ext4_feature_name,
448 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
449 .tcg_features = TCG_EXT4_FEATURES,
452 .feat_names = kvm_feature_name,
453 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
454 .tcg_features = TCG_KVM_FEATURES,
456 [FEAT_HYPERV_EAX] = {
457 .feat_names = hyperv_priv_feature_name,
458 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
460 [FEAT_HYPERV_EBX] = {
461 .feat_names = hyperv_ident_feature_name,
462 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
464 [FEAT_HYPERV_EDX] = {
465 .feat_names = hyperv_misc_feature_name,
466 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
469 .feat_names = svm_feature_name,
470 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
471 .tcg_features = TCG_SVM_FEATURES,
474 .feat_names = cpuid_7_0_ebx_feature_name,
476 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
478 .tcg_features = TCG_7_0_EBX_FEATURES,
481 .feat_names = cpuid_7_0_ecx_feature_name,
483 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
485 .tcg_features = TCG_7_0_ECX_FEATURES,
487 [FEAT_8000_0007_EDX] = {
488 .feat_names = cpuid_apm_edx_feature_name,
489 .cpuid_eax = 0x80000007,
491 .tcg_features = TCG_APM_FEATURES,
492 .unmigratable_flags = CPUID_APM_INVTSC,
495 .feat_names = cpuid_xsave_feature_name,
497 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
499 .tcg_features = TCG_XSAVE_FEATURES,
502 .feat_names = cpuid_6_feature_name,
503 .cpuid_eax = 6, .cpuid_reg = R_EAX,
504 .tcg_features = TCG_6_EAX_FEATURES,
508 typedef struct X86RegisterInfo32 {
509 /* Name of register */
511 /* QAPI enum value register */
512 X86CPURegister32 qapi_enum;
515 #define REGISTER(reg) \
516 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
517 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
529 const ExtSaveArea x86_ext_save_areas[] = {
531 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
532 .offset = offsetof(X86XSaveArea, avx_state),
533 .size = sizeof(XSaveAVX) },
534 [XSTATE_BNDREGS_BIT] =
535 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
536 .offset = offsetof(X86XSaveArea, bndreg_state),
537 .size = sizeof(XSaveBNDREG) },
538 [XSTATE_BNDCSR_BIT] =
539 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
540 .offset = offsetof(X86XSaveArea, bndcsr_state),
541 .size = sizeof(XSaveBNDCSR) },
542 [XSTATE_OPMASK_BIT] =
543 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
544 .offset = offsetof(X86XSaveArea, opmask_state),
545 .size = sizeof(XSaveOpmask) },
546 [XSTATE_ZMM_Hi256_BIT] =
547 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
548 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
549 .size = sizeof(XSaveZMM_Hi256) },
550 [XSTATE_Hi16_ZMM_BIT] =
551 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
552 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
553 .size = sizeof(XSaveHi16_ZMM) },
555 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
556 .offset = offsetof(X86XSaveArea, pkru_state),
557 .size = sizeof(XSavePKRU) },
560 const char *get_register_name_32(unsigned int reg)
562 if (reg >= CPU_NB_REGS32) {
565 return x86_reg_info_32[reg].name;
569 * Returns the set of feature flags that are supported and migratable by
570 * QEMU, for a given FeatureWord.
572 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
574 FeatureWordInfo *wi = &feature_word_info[w];
578 for (i = 0; i < 32; i++) {
579 uint32_t f = 1U << i;
580 /* If the feature name is unknown, it is not supported by QEMU yet */
581 if (!wi->feat_names[i]) {
584 /* Skip features known to QEMU, but explicitly marked as unmigratable */
585 if (wi->unmigratable_flags & f) {
593 void host_cpuid(uint32_t function, uint32_t count,
594 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
600 : "=a"(vec[0]), "=b"(vec[1]),
601 "=c"(vec[2]), "=d"(vec[3])
602 : "0"(function), "c"(count) : "cc");
603 #elif defined(__i386__)
604 asm volatile("pusha \n\t"
606 "mov %%eax, 0(%2) \n\t"
607 "mov %%ebx, 4(%2) \n\t"
608 "mov %%ecx, 8(%2) \n\t"
609 "mov %%edx, 12(%2) \n\t"
611 : : "a"(function), "c"(count), "S"(vec)
627 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
629 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
630 * a substring. ex if !NULL points to the first char after a substring,
631 * otherwise the string is assumed to sized by a terminating nul.
632 * Return lexical ordering of *s1:*s2.
634 static int sstrcmp(const char *s1, const char *e1,
635 const char *s2, const char *e2)
638 if (!*s1 || !*s2 || *s1 != *s2)
641 if (s1 == e1 && s2 == e2)
650 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
651 * '|' delimited (possibly empty) strings in which case search for a match
652 * within the alternatives proceeds left to right. Return 0 for success,
653 * non-zero otherwise.
655 static int altcmp(const char *s, const char *e, const char *altstr)
659 for (q = p = altstr; ; ) {
660 while (*p && *p != '|')
662 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
671 /* search featureset for flag *[s..e), if found set corresponding bit in
672 * *pval and return true, otherwise return false
674 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
675 const char **featureset)
681 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
682 if (*ppc && !altcmp(s, e, *ppc)) {
690 static void add_flagname_to_bitmaps(const char *flagname,
691 FeatureWordArray words,
695 for (w = 0; w < FEATURE_WORDS; w++) {
696 FeatureWordInfo *wi = &feature_word_info[w];
697 if (wi->feat_names &&
698 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
702 if (w == FEATURE_WORDS) {
703 error_setg(errp, "CPU feature %s not found", flagname);
707 /* CPU class name definitions: */
709 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
710 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
712 /* Return type name for a given CPU model name
713 * Caller is responsible for freeing the returned string.
715 static char *x86_cpu_type_name(const char *model_name)
717 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
720 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
725 if (cpu_model == NULL) {
729 typename = x86_cpu_type_name(cpu_model);
730 oc = object_class_by_name(typename);
735 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
737 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
738 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
739 return g_strndup(class_name,
740 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
743 struct X86CPUDefinition {
748 /* vendor is zero-terminated, 12 character ASCII string */
749 char vendor[CPUID_VENDOR_SZ + 1];
753 FeatureWordArray features;
757 static X86CPUDefinition builtin_x86_defs[] = {
761 .vendor = CPUID_VENDOR_AMD,
765 .features[FEAT_1_EDX] =
767 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
769 .features[FEAT_1_ECX] =
770 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
771 .features[FEAT_8000_0001_EDX] =
772 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
773 .features[FEAT_8000_0001_ECX] =
774 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
775 .xlevel = 0x8000000A,
776 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
781 .vendor = CPUID_VENDOR_AMD,
785 /* Missing: CPUID_HT */
786 .features[FEAT_1_EDX] =
788 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
789 CPUID_PSE36 | CPUID_VME,
790 .features[FEAT_1_ECX] =
791 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
793 .features[FEAT_8000_0001_EDX] =
794 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
795 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
796 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
797 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
799 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
800 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
801 .features[FEAT_8000_0001_ECX] =
802 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
803 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
804 /* Missing: CPUID_SVM_LBRV */
805 .features[FEAT_SVM] =
807 .xlevel = 0x8000001A,
808 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
813 .vendor = CPUID_VENDOR_INTEL,
817 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
818 .features[FEAT_1_EDX] =
820 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
821 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
822 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
823 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
824 .features[FEAT_1_ECX] =
825 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
827 .features[FEAT_8000_0001_EDX] =
828 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
829 .features[FEAT_8000_0001_ECX] =
831 .xlevel = 0x80000008,
832 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
837 .vendor = CPUID_VENDOR_INTEL,
841 /* Missing: CPUID_HT */
842 .features[FEAT_1_EDX] =
843 PPRO_FEATURES | CPUID_VME |
844 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
846 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
847 .features[FEAT_1_ECX] =
848 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
849 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
850 .features[FEAT_8000_0001_EDX] =
851 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
852 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
853 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
854 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
855 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
856 .features[FEAT_8000_0001_ECX] =
858 .xlevel = 0x80000008,
859 .model_id = "Common KVM processor"
864 .vendor = CPUID_VENDOR_INTEL,
868 .features[FEAT_1_EDX] =
870 .features[FEAT_1_ECX] =
872 .xlevel = 0x80000004,
873 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
878 .vendor = CPUID_VENDOR_INTEL,
882 .features[FEAT_1_EDX] =
883 PPRO_FEATURES | CPUID_VME |
884 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
885 .features[FEAT_1_ECX] =
887 .features[FEAT_8000_0001_ECX] =
889 .xlevel = 0x80000008,
890 .model_id = "Common 32-bit KVM processor"
895 .vendor = CPUID_VENDOR_INTEL,
899 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
900 .features[FEAT_1_EDX] =
901 PPRO_FEATURES | CPUID_VME |
902 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
904 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
905 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
906 .features[FEAT_1_ECX] =
907 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
908 .features[FEAT_8000_0001_EDX] =
910 .xlevel = 0x80000008,
911 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
916 .vendor = CPUID_VENDOR_INTEL,
920 .features[FEAT_1_EDX] =
927 .vendor = CPUID_VENDOR_INTEL,
931 .features[FEAT_1_EDX] =
938 .vendor = CPUID_VENDOR_INTEL,
942 .features[FEAT_1_EDX] =
949 .vendor = CPUID_VENDOR_INTEL,
953 .features[FEAT_1_EDX] =
960 .vendor = CPUID_VENDOR_AMD,
964 .features[FEAT_1_EDX] =
965 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
967 .features[FEAT_8000_0001_EDX] =
968 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
969 .xlevel = 0x80000008,
970 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
975 .vendor = CPUID_VENDOR_INTEL,
979 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
980 .features[FEAT_1_EDX] =
982 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
983 CPUID_ACPI | CPUID_SS,
984 /* Some CPUs got no CPUID_SEP */
985 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
987 .features[FEAT_1_ECX] =
988 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
990 .features[FEAT_8000_0001_EDX] =
992 .features[FEAT_8000_0001_ECX] =
994 .xlevel = 0x80000008,
995 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1000 .vendor = CPUID_VENDOR_INTEL,
1004 .features[FEAT_1_EDX] =
1005 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1006 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1007 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1008 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1009 CPUID_DE | CPUID_FP87,
1010 .features[FEAT_1_ECX] =
1011 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1012 .features[FEAT_8000_0001_EDX] =
1013 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1014 .features[FEAT_8000_0001_ECX] =
1016 .xlevel = 0x80000008,
1017 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1022 .vendor = CPUID_VENDOR_INTEL,
1026 .features[FEAT_1_EDX] =
1027 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1028 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1029 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1030 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1031 CPUID_DE | CPUID_FP87,
1032 .features[FEAT_1_ECX] =
1033 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1035 .features[FEAT_8000_0001_EDX] =
1036 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1037 .features[FEAT_8000_0001_ECX] =
1039 .xlevel = 0x80000008,
1040 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1045 .vendor = CPUID_VENDOR_INTEL,
1049 .features[FEAT_1_EDX] =
1050 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1051 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1052 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1053 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1054 CPUID_DE | CPUID_FP87,
1055 .features[FEAT_1_ECX] =
1056 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1057 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1058 .features[FEAT_8000_0001_EDX] =
1059 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1060 .features[FEAT_8000_0001_ECX] =
1062 .xlevel = 0x80000008,
1063 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1068 .vendor = CPUID_VENDOR_INTEL,
1072 .features[FEAT_1_EDX] =
1073 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1074 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1075 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1076 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1077 CPUID_DE | CPUID_FP87,
1078 .features[FEAT_1_ECX] =
1079 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1080 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1081 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1082 .features[FEAT_8000_0001_EDX] =
1083 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1084 .features[FEAT_8000_0001_ECX] =
1086 .features[FEAT_6_EAX] =
1088 .xlevel = 0x80000008,
1089 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1092 .name = "SandyBridge",
1094 .vendor = CPUID_VENDOR_INTEL,
1098 .features[FEAT_1_EDX] =
1099 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1100 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1101 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1102 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1103 CPUID_DE | CPUID_FP87,
1104 .features[FEAT_1_ECX] =
1105 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1106 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1107 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1108 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1110 .features[FEAT_8000_0001_EDX] =
1111 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1113 .features[FEAT_8000_0001_ECX] =
1115 .features[FEAT_XSAVE] =
1116 CPUID_XSAVE_XSAVEOPT,
1117 .features[FEAT_6_EAX] =
1119 .xlevel = 0x80000008,
1120 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1123 .name = "IvyBridge",
1125 .vendor = CPUID_VENDOR_INTEL,
1129 .features[FEAT_1_EDX] =
1130 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1131 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1132 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1133 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1134 CPUID_DE | CPUID_FP87,
1135 .features[FEAT_1_ECX] =
1136 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1137 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1138 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1139 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1140 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1141 .features[FEAT_7_0_EBX] =
1142 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1144 .features[FEAT_8000_0001_EDX] =
1145 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1147 .features[FEAT_8000_0001_ECX] =
1149 .features[FEAT_XSAVE] =
1150 CPUID_XSAVE_XSAVEOPT,
1151 .features[FEAT_6_EAX] =
1153 .xlevel = 0x80000008,
1154 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1157 .name = "Haswell-noTSX",
1159 .vendor = CPUID_VENDOR_INTEL,
1163 .features[FEAT_1_EDX] =
1164 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1165 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1166 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1167 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1168 CPUID_DE | CPUID_FP87,
1169 .features[FEAT_1_ECX] =
1170 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1171 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1172 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1173 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1174 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1175 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1176 .features[FEAT_8000_0001_EDX] =
1177 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1179 .features[FEAT_8000_0001_ECX] =
1180 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1181 .features[FEAT_7_0_EBX] =
1182 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1183 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1184 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1185 .features[FEAT_XSAVE] =
1186 CPUID_XSAVE_XSAVEOPT,
1187 .features[FEAT_6_EAX] =
1189 .xlevel = 0x80000008,
1190 .model_id = "Intel Core Processor (Haswell, no TSX)",
1194 .vendor = CPUID_VENDOR_INTEL,
1198 .features[FEAT_1_EDX] =
1199 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1200 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1201 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1202 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1203 CPUID_DE | CPUID_FP87,
1204 .features[FEAT_1_ECX] =
1205 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1206 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1207 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1208 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1209 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1210 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1211 .features[FEAT_8000_0001_EDX] =
1212 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1214 .features[FEAT_8000_0001_ECX] =
1215 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1216 .features[FEAT_7_0_EBX] =
1217 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1218 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1219 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1221 .features[FEAT_XSAVE] =
1222 CPUID_XSAVE_XSAVEOPT,
1223 .features[FEAT_6_EAX] =
1225 .xlevel = 0x80000008,
1226 .model_id = "Intel Core Processor (Haswell)",
1229 .name = "Broadwell-noTSX",
1231 .vendor = CPUID_VENDOR_INTEL,
1235 .features[FEAT_1_EDX] =
1236 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1237 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1238 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1239 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1240 CPUID_DE | CPUID_FP87,
1241 .features[FEAT_1_ECX] =
1242 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1243 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1244 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1245 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1246 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1247 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1248 .features[FEAT_8000_0001_EDX] =
1249 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1251 .features[FEAT_8000_0001_ECX] =
1252 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1253 .features[FEAT_7_0_EBX] =
1254 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1255 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1256 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1257 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1259 .features[FEAT_XSAVE] =
1260 CPUID_XSAVE_XSAVEOPT,
1261 .features[FEAT_6_EAX] =
1263 .xlevel = 0x80000008,
1264 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1267 .name = "Broadwell",
1269 .vendor = CPUID_VENDOR_INTEL,
1273 .features[FEAT_1_EDX] =
1274 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1275 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1276 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1277 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1278 CPUID_DE | CPUID_FP87,
1279 .features[FEAT_1_ECX] =
1280 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1281 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1282 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1283 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1284 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1285 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1286 .features[FEAT_8000_0001_EDX] =
1287 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1289 .features[FEAT_8000_0001_ECX] =
1290 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1291 .features[FEAT_7_0_EBX] =
1292 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1293 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1294 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1295 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1297 .features[FEAT_XSAVE] =
1298 CPUID_XSAVE_XSAVEOPT,
1299 .features[FEAT_6_EAX] =
1301 .xlevel = 0x80000008,
1302 .model_id = "Intel Core Processor (Broadwell)",
1305 .name = "Skylake-Client",
1307 .vendor = CPUID_VENDOR_INTEL,
1311 .features[FEAT_1_EDX] =
1312 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1313 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1314 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1315 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1316 CPUID_DE | CPUID_FP87,
1317 .features[FEAT_1_ECX] =
1318 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1319 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1320 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1321 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1322 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1323 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1324 .features[FEAT_8000_0001_EDX] =
1325 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1327 .features[FEAT_8000_0001_ECX] =
1328 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1329 .features[FEAT_7_0_EBX] =
1330 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1331 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1332 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1333 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1334 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1335 /* Missing: XSAVES (not supported by some Linux versions,
1336 * including v4.1 to v4.6).
1337 * KVM doesn't yet expose any XSAVES state save component,
1338 * and the only one defined in Skylake (processor tracing)
1339 * probably will block migration anyway.
1341 .features[FEAT_XSAVE] =
1342 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1343 CPUID_XSAVE_XGETBV1,
1344 .features[FEAT_6_EAX] =
1346 .xlevel = 0x80000008,
1347 .model_id = "Intel Core Processor (Skylake)",
1350 .name = "Opteron_G1",
1352 .vendor = CPUID_VENDOR_AMD,
1356 .features[FEAT_1_EDX] =
1357 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1358 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1359 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1360 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1361 CPUID_DE | CPUID_FP87,
1362 .features[FEAT_1_ECX] =
1364 .features[FEAT_8000_0001_EDX] =
1365 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1366 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1367 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1368 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1369 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1370 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1371 .xlevel = 0x80000008,
1372 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1375 .name = "Opteron_G2",
1377 .vendor = CPUID_VENDOR_AMD,
1381 .features[FEAT_1_EDX] =
1382 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1383 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1384 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1385 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1386 CPUID_DE | CPUID_FP87,
1387 .features[FEAT_1_ECX] =
1388 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1389 /* Missing: CPUID_EXT2_RDTSCP */
1390 .features[FEAT_8000_0001_EDX] =
1391 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1392 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1393 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1394 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1395 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1396 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1397 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1398 .features[FEAT_8000_0001_ECX] =
1399 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1400 .xlevel = 0x80000008,
1401 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1404 .name = "Opteron_G3",
1406 .vendor = CPUID_VENDOR_AMD,
1410 .features[FEAT_1_EDX] =
1411 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1412 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1413 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1414 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1415 CPUID_DE | CPUID_FP87,
1416 .features[FEAT_1_ECX] =
1417 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1419 /* Missing: CPUID_EXT2_RDTSCP */
1420 .features[FEAT_8000_0001_EDX] =
1421 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1422 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1423 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1424 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1425 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1426 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1427 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1428 .features[FEAT_8000_0001_ECX] =
1429 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1430 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1431 .xlevel = 0x80000008,
1432 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1435 .name = "Opteron_G4",
1437 .vendor = CPUID_VENDOR_AMD,
1441 .features[FEAT_1_EDX] =
1442 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1443 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1444 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1445 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1446 CPUID_DE | CPUID_FP87,
1447 .features[FEAT_1_ECX] =
1448 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1449 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1450 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1452 /* Missing: CPUID_EXT2_RDTSCP */
1453 .features[FEAT_8000_0001_EDX] =
1455 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1456 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1457 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1458 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1459 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1460 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1461 .features[FEAT_8000_0001_ECX] =
1462 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1463 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1464 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1467 .xlevel = 0x8000001A,
1468 .model_id = "AMD Opteron 62xx class CPU",
1471 .name = "Opteron_G5",
1473 .vendor = CPUID_VENDOR_AMD,
1477 .features[FEAT_1_EDX] =
1478 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1479 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1480 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1481 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1482 CPUID_DE | CPUID_FP87,
1483 .features[FEAT_1_ECX] =
1484 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1485 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1486 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1487 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1488 /* Missing: CPUID_EXT2_RDTSCP */
1489 .features[FEAT_8000_0001_EDX] =
1491 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1492 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1493 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1494 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1495 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1496 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1497 .features[FEAT_8000_0001_ECX] =
1498 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1499 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1500 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1503 .xlevel = 0x8000001A,
1504 .model_id = "AMD Opteron 63xx class CPU",
1508 typedef struct PropValue {
1509 const char *prop, *value;
1512 /* KVM-specific features that are automatically added/removed
1513 * from all CPU models when KVM is enabled.
1515 static PropValue kvm_default_props[] = {
1516 { "kvmclock", "on" },
1517 { "kvm-nopiodelay", "on" },
1518 { "kvm-asyncpf", "on" },
1519 { "kvm-steal-time", "on" },
1520 { "kvm-pv-eoi", "on" },
1521 { "kvmclock-stable-bit", "on" },
1524 { "monitor", "off" },
1529 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1532 for (pv = kvm_default_props; pv->prop; pv++) {
1533 if (!strcmp(pv->prop, prop)) {
1539 /* It is valid to call this function only for properties that
1540 * are already present in the kvm_default_props table.
1545 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1546 bool migratable_only);
1550 static bool lmce_supported(void)
1554 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1558 return !!(mce_cap & MCG_LMCE_P);
1561 static int cpu_x86_fill_model_id(char *str)
1563 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1566 for (i = 0; i < 3; i++) {
1567 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1568 memcpy(str + i * 16 + 0, &eax, 4);
1569 memcpy(str + i * 16 + 4, &ebx, 4);
1570 memcpy(str + i * 16 + 8, &ecx, 4);
1571 memcpy(str + i * 16 + 12, &edx, 4);
1576 static X86CPUDefinition host_cpudef;
1578 static Property host_x86_cpu_properties[] = {
1579 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1580 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1581 DEFINE_PROP_END_OF_LIST()
1584 /* class_init for the "host" CPU model
1586 * This function may be called before KVM is initialized.
1588 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1590 DeviceClass *dc = DEVICE_CLASS(oc);
1591 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1592 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1594 xcc->kvm_required = true;
1596 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1597 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1599 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1600 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1601 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1602 host_cpudef.stepping = eax & 0x0F;
1604 cpu_x86_fill_model_id(host_cpudef.model_id);
1606 xcc->cpu_def = &host_cpudef;
1608 /* level, xlevel, xlevel2, and the feature words are initialized on
1609 * instance_init, because they require KVM to be initialized.
1612 dc->props = host_x86_cpu_properties;
1613 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1614 dc->cannot_destroy_with_object_finalize_yet = true;
1617 static void host_x86_cpu_initfn(Object *obj)
1619 X86CPU *cpu = X86_CPU(obj);
1620 CPUX86State *env = &cpu->env;
1621 KVMState *s = kvm_state;
1623 /* We can't fill the features array here because we don't know yet if
1624 * "migratable" is true or false.
1626 cpu->host_features = true;
1628 /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
1629 if (kvm_enabled()) {
1630 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1631 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1632 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1634 if (lmce_supported()) {
1635 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1639 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1642 static const TypeInfo host_x86_cpu_type_info = {
1643 .name = X86_CPU_TYPE_NAME("host"),
1644 .parent = TYPE_X86_CPU,
1645 .instance_init = host_x86_cpu_initfn,
1646 .class_init = host_x86_cpu_class_init,
1651 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1653 FeatureWordInfo *f = &feature_word_info[w];
1656 for (i = 0; i < 32; ++i) {
1657 if ((1UL << i) & mask) {
1658 const char *reg = get_register_name_32(f->cpuid_reg);
1660 fprintf(stderr, "warning: %s doesn't support requested feature: "
1661 "CPUID.%02XH:%s%s%s [bit %d]\n",
1662 kvm_enabled() ? "host" : "TCG",
1664 f->feat_names[i] ? "." : "",
1665 f->feat_names[i] ? f->feat_names[i] : "", i);
1670 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1671 const char *name, void *opaque,
1674 X86CPU *cpu = X86_CPU(obj);
1675 CPUX86State *env = &cpu->env;
1678 value = (env->cpuid_version >> 8) & 0xf;
1680 value += (env->cpuid_version >> 20) & 0xff;
1682 visit_type_int(v, name, &value, errp);
1685 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1686 const char *name, void *opaque,
1689 X86CPU *cpu = X86_CPU(obj);
1690 CPUX86State *env = &cpu->env;
1691 const int64_t min = 0;
1692 const int64_t max = 0xff + 0xf;
1693 Error *local_err = NULL;
1696 visit_type_int(v, name, &value, &local_err);
1698 error_propagate(errp, local_err);
1701 if (value < min || value > max) {
1702 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1703 name ? name : "null", value, min, max);
1707 env->cpuid_version &= ~0xff00f00;
1709 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1711 env->cpuid_version |= value << 8;
1715 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1716 const char *name, void *opaque,
1719 X86CPU *cpu = X86_CPU(obj);
1720 CPUX86State *env = &cpu->env;
1723 value = (env->cpuid_version >> 4) & 0xf;
1724 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1725 visit_type_int(v, name, &value, errp);
1728 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1729 const char *name, void *opaque,
1732 X86CPU *cpu = X86_CPU(obj);
1733 CPUX86State *env = &cpu->env;
1734 const int64_t min = 0;
1735 const int64_t max = 0xff;
1736 Error *local_err = NULL;
1739 visit_type_int(v, name, &value, &local_err);
1741 error_propagate(errp, local_err);
1744 if (value < min || value > max) {
1745 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1746 name ? name : "null", value, min, max);
1750 env->cpuid_version &= ~0xf00f0;
1751 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1754 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1755 const char *name, void *opaque,
1758 X86CPU *cpu = X86_CPU(obj);
1759 CPUX86State *env = &cpu->env;
1762 value = env->cpuid_version & 0xf;
1763 visit_type_int(v, name, &value, errp);
1766 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1767 const char *name, void *opaque,
1770 X86CPU *cpu = X86_CPU(obj);
1771 CPUX86State *env = &cpu->env;
1772 const int64_t min = 0;
1773 const int64_t max = 0xf;
1774 Error *local_err = NULL;
1777 visit_type_int(v, name, &value, &local_err);
1779 error_propagate(errp, local_err);
1782 if (value < min || value > max) {
1783 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1784 name ? name : "null", value, min, max);
1788 env->cpuid_version &= ~0xf;
1789 env->cpuid_version |= value & 0xf;
1792 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1794 X86CPU *cpu = X86_CPU(obj);
1795 CPUX86State *env = &cpu->env;
1798 value = g_malloc(CPUID_VENDOR_SZ + 1);
1799 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1800 env->cpuid_vendor3);
1804 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1807 X86CPU *cpu = X86_CPU(obj);
1808 CPUX86State *env = &cpu->env;
1811 if (strlen(value) != CPUID_VENDOR_SZ) {
1812 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1816 env->cpuid_vendor1 = 0;
1817 env->cpuid_vendor2 = 0;
1818 env->cpuid_vendor3 = 0;
1819 for (i = 0; i < 4; i++) {
1820 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1821 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1822 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1826 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1828 X86CPU *cpu = X86_CPU(obj);
1829 CPUX86State *env = &cpu->env;
1833 value = g_malloc(48 + 1);
1834 for (i = 0; i < 48; i++) {
1835 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1841 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1844 X86CPU *cpu = X86_CPU(obj);
1845 CPUX86State *env = &cpu->env;
1848 if (model_id == NULL) {
1851 len = strlen(model_id);
1852 memset(env->cpuid_model, 0, 48);
1853 for (i = 0; i < 48; i++) {
1857 c = (uint8_t)model_id[i];
1859 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1863 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1864 void *opaque, Error **errp)
1866 X86CPU *cpu = X86_CPU(obj);
1869 value = cpu->env.tsc_khz * 1000;
1870 visit_type_int(v, name, &value, errp);
1873 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1874 void *opaque, Error **errp)
1876 X86CPU *cpu = X86_CPU(obj);
1877 const int64_t min = 0;
1878 const int64_t max = INT64_MAX;
1879 Error *local_err = NULL;
1882 visit_type_int(v, name, &value, &local_err);
1884 error_propagate(errp, local_err);
1887 if (value < min || value > max) {
1888 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1889 name ? name : "null", value, min, max);
1893 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1896 /* Generic getter for "feature-words" and "filtered-features" properties */
1897 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1898 const char *name, void *opaque,
1901 uint32_t *array = (uint32_t *)opaque;
1903 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1904 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1905 X86CPUFeatureWordInfoList *list = NULL;
1907 for (w = 0; w < FEATURE_WORDS; w++) {
1908 FeatureWordInfo *wi = &feature_word_info[w];
1909 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1910 qwi->cpuid_input_eax = wi->cpuid_eax;
1911 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1912 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1913 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1914 qwi->features = array[w];
1916 /* List will be in reverse order, but order shouldn't matter */
1917 list_entries[w].next = list;
1918 list_entries[w].value = &word_infos[w];
1919 list = &list_entries[w];
1922 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1925 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1926 void *opaque, Error **errp)
1928 X86CPU *cpu = X86_CPU(obj);
1929 int64_t value = cpu->hyperv_spinlock_attempts;
1931 visit_type_int(v, name, &value, errp);
1934 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1935 void *opaque, Error **errp)
1937 const int64_t min = 0xFFF;
1938 const int64_t max = UINT_MAX;
1939 X86CPU *cpu = X86_CPU(obj);
1943 visit_type_int(v, name, &value, &err);
1945 error_propagate(errp, err);
1949 if (value < min || value > max) {
1950 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1951 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1952 object_get_typename(obj), name ? name : "null",
1956 cpu->hyperv_spinlock_attempts = value;
1959 static PropertyInfo qdev_prop_spinlocks = {
1961 .get = x86_get_hv_spinlocks,
1962 .set = x86_set_hv_spinlocks,
1965 /* Convert all '_' in a feature string option name to '-', to make feature
1966 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1968 static inline void feat2prop(char *s)
1970 while ((s = strchr(s, '_'))) {
1975 /* Compatibily hack to maintain legacy +-feat semantic,
1976 * where +-feat overwrites any feature set by
1977 * feat=on|feat even if the later is parsed after +-feat
1978 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
1980 static FeatureWordArray plus_features = { 0 };
1981 static FeatureWordArray minus_features = { 0 };
1983 /* Parse "+feature,-feature,feature=foo" CPU feature string
1985 static void x86_cpu_parse_featurestr(const char *typename, char *features,
1988 char *featurestr; /* Single 'key=value" string being parsed */
1989 Error *local_err = NULL;
1990 static bool cpu_globals_initialized;
1992 if (cpu_globals_initialized) {
1995 cpu_globals_initialized = true;
2001 for (featurestr = strtok(features, ",");
2002 featurestr && !local_err;
2003 featurestr = strtok(NULL, ",")) {
2005 const char *val = NULL;
2008 GlobalProperty *prop;
2010 /* Compatibility syntax: */
2011 if (featurestr[0] == '+') {
2012 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
2014 } else if (featurestr[0] == '-') {
2015 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
2019 eq = strchr(featurestr, '=');
2027 feat2prop(featurestr);
2031 if (!strcmp(name, "tsc-freq")) {
2035 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
2036 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
2037 if (tsc_freq < 0 || *err) {
2038 error_setg(errp, "bad numerical value %s", val);
2041 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2043 name = "tsc-frequency";
2046 prop = g_new0(typeof(*prop), 1);
2047 prop->driver = typename;
2048 prop->property = g_strdup(name);
2049 prop->value = g_strdup(val);
2050 prop->errp = &error_fatal;
2051 qdev_prop_register_global(prop);
2055 error_propagate(errp, local_err);
2059 /* Print all cpuid feature names in featureset
2061 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2066 for (bit = 0; bit < 32; bit++) {
2067 if (featureset[bit]) {
2068 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2074 /* generate CPU information. */
2075 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2077 X86CPUDefinition *def;
2081 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2082 def = &builtin_x86_defs[i];
2083 snprintf(buf, sizeof(buf), "%s", def->name);
2084 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2087 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2088 "KVM processor with all supported host features "
2089 "(only available in KVM mode)");
2092 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2093 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2094 FeatureWordInfo *fw = &feature_word_info[i];
2096 (*cpu_fprintf)(f, " ");
2097 listflags(f, cpu_fprintf, fw->feat_names);
2098 (*cpu_fprintf)(f, "\n");
2102 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2104 CpuDefinitionInfoList *cpu_list = NULL;
2105 X86CPUDefinition *def;
2108 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2109 CpuDefinitionInfoList *entry;
2110 CpuDefinitionInfo *info;
2112 def = &builtin_x86_defs[i];
2113 info = g_malloc0(sizeof(*info));
2114 info->name = g_strdup(def->name);
2116 entry = g_malloc0(sizeof(*entry));
2117 entry->value = info;
2118 entry->next = cpu_list;
2125 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2126 bool migratable_only)
2128 FeatureWordInfo *wi = &feature_word_info[w];
2131 if (kvm_enabled()) {
2132 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2135 } else if (tcg_enabled()) {
2136 r = wi->tcg_features;
2140 if (migratable_only) {
2141 r &= x86_cpu_get_migratable_flags(w);
2147 * Filters CPU feature words based on host availability of each feature.
2149 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2151 static int x86_cpu_filter_features(X86CPU *cpu)
2153 CPUX86State *env = &cpu->env;
2157 for (w = 0; w < FEATURE_WORDS; w++) {
2158 uint32_t host_feat =
2159 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2160 uint32_t requested_features = env->features[w];
2161 env->features[w] &= host_feat;
2162 cpu->filtered_features[w] = requested_features & ~env->features[w];
2163 if (cpu->filtered_features[w]) {
2164 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2165 report_unavailable_features(w, cpu->filtered_features[w]);
2174 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2177 for (pv = props; pv->prop; pv++) {
2181 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2186 /* Load data from X86CPUDefinition
2188 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2190 CPUX86State *env = &cpu->env;
2192 char host_vendor[CPUID_VENDOR_SZ + 1];
2195 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2196 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2197 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2198 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2199 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2200 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2201 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2202 for (w = 0; w < FEATURE_WORDS; w++) {
2203 env->features[w] = def->features[w];
2206 /* Special cases not set in the X86CPUDefinition structs: */
2207 if (kvm_enabled()) {
2208 if (!kvm_irqchip_in_kernel()) {
2209 x86_cpu_change_kvm_default("x2apic", "off");
2212 x86_cpu_apply_props(cpu, kvm_default_props);
2215 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2217 /* sysenter isn't supported in compatibility mode on AMD,
2218 * syscall isn't supported in compatibility mode on Intel.
2219 * Normally we advertise the actual CPU vendor, but you can
2220 * override this using the 'vendor' property if you want to use
2221 * KVM's sysenter/syscall emulation in compatibility mode and
2222 * when doing cross vendor migration
2224 vendor = def->vendor;
2225 if (kvm_enabled()) {
2226 uint32_t ebx = 0, ecx = 0, edx = 0;
2227 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2228 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2229 vendor = host_vendor;
2232 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2236 X86CPU *cpu_x86_init(const char *cpu_model)
2238 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2241 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2243 X86CPUDefinition *cpudef = data;
2244 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2246 xcc->cpu_def = cpudef;
2249 static void x86_register_cpudef_type(X86CPUDefinition *def)
2251 char *typename = x86_cpu_type_name(def->name);
2254 .parent = TYPE_X86_CPU,
2255 .class_init = x86_cpu_cpudef_class_init,
2263 #if !defined(CONFIG_USER_ONLY)
2265 void cpu_clear_apic_feature(CPUX86State *env)
2267 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2270 #endif /* !CONFIG_USER_ONLY */
2272 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2273 uint32_t *eax, uint32_t *ebx,
2274 uint32_t *ecx, uint32_t *edx)
2276 X86CPU *cpu = x86_env_get_cpu(env);
2277 CPUState *cs = CPU(cpu);
2279 /* test if maximum index reached */
2280 if (index & 0x80000000) {
2281 if (index > env->cpuid_xlevel) {
2282 if (env->cpuid_xlevel2 > 0) {
2283 /* Handle the Centaur's CPUID instruction. */
2284 if (index > env->cpuid_xlevel2) {
2285 index = env->cpuid_xlevel2;
2286 } else if (index < 0xC0000000) {
2287 index = env->cpuid_xlevel;
2290 /* Intel documentation states that invalid EAX input will
2291 * return the same information as EAX=cpuid_level
2292 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2294 index = env->cpuid_level;
2298 if (index > env->cpuid_level)
2299 index = env->cpuid_level;
2304 *eax = env->cpuid_level;
2305 *ebx = env->cpuid_vendor1;
2306 *edx = env->cpuid_vendor2;
2307 *ecx = env->cpuid_vendor3;
2310 *eax = env->cpuid_version;
2311 *ebx = (cpu->apic_id << 24) |
2312 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2313 *ecx = env->features[FEAT_1_ECX];
2314 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2315 *ecx |= CPUID_EXT_OSXSAVE;
2317 *edx = env->features[FEAT_1_EDX];
2318 if (cs->nr_cores * cs->nr_threads > 1) {
2319 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2324 /* cache info: needed for Pentium Pro compatibility */
2325 if (cpu->cache_info_passthrough) {
2326 host_cpuid(index, 0, eax, ebx, ecx, edx);
2329 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2332 *edx = (L1D_DESCRIPTOR << 16) | \
2333 (L1I_DESCRIPTOR << 8) | \
2337 /* cache info: needed for Core compatibility */
2338 if (cpu->cache_info_passthrough) {
2339 host_cpuid(index, count, eax, ebx, ecx, edx);
2340 *eax &= ~0xFC000000;
2344 case 0: /* L1 dcache info */
2345 *eax |= CPUID_4_TYPE_DCACHE | \
2346 CPUID_4_LEVEL(1) | \
2347 CPUID_4_SELF_INIT_LEVEL;
2348 *ebx = (L1D_LINE_SIZE - 1) | \
2349 ((L1D_PARTITIONS - 1) << 12) | \
2350 ((L1D_ASSOCIATIVITY - 1) << 22);
2351 *ecx = L1D_SETS - 1;
2352 *edx = CPUID_4_NO_INVD_SHARING;
2354 case 1: /* L1 icache info */
2355 *eax |= CPUID_4_TYPE_ICACHE | \
2356 CPUID_4_LEVEL(1) | \
2357 CPUID_4_SELF_INIT_LEVEL;
2358 *ebx = (L1I_LINE_SIZE - 1) | \
2359 ((L1I_PARTITIONS - 1) << 12) | \
2360 ((L1I_ASSOCIATIVITY - 1) << 22);
2361 *ecx = L1I_SETS - 1;
2362 *edx = CPUID_4_NO_INVD_SHARING;
2364 case 2: /* L2 cache info */
2365 *eax |= CPUID_4_TYPE_UNIFIED | \
2366 CPUID_4_LEVEL(2) | \
2367 CPUID_4_SELF_INIT_LEVEL;
2368 if (cs->nr_threads > 1) {
2369 *eax |= (cs->nr_threads - 1) << 14;
2371 *ebx = (L2_LINE_SIZE - 1) | \
2372 ((L2_PARTITIONS - 1) << 12) | \
2373 ((L2_ASSOCIATIVITY - 1) << 22);
2375 *edx = CPUID_4_NO_INVD_SHARING;
2377 default: /* end of info */
2386 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2387 if ((*eax & 31) && cs->nr_cores > 1) {
2388 *eax |= (cs->nr_cores - 1) << 26;
2392 /* mwait info: needed for Core compatibility */
2393 *eax = 0; /* Smallest monitor-line size in bytes */
2394 *ebx = 0; /* Largest monitor-line size in bytes */
2395 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2399 /* Thermal and Power Leaf */
2400 *eax = env->features[FEAT_6_EAX];
2406 /* Structured Extended Feature Flags Enumeration Leaf */
2408 *eax = 0; /* Maximum ECX value for sub-leaves */
2409 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2410 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2411 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2412 *ecx |= CPUID_7_0_ECX_OSPKE;
2414 *edx = 0; /* Reserved */
2423 /* Direct Cache Access Information Leaf */
2424 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2430 /* Architectural Performance Monitoring Leaf */
2431 if (kvm_enabled() && cpu->enable_pmu) {
2432 KVMState *s = cs->kvm_state;
2434 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2435 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2436 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2437 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2446 /* Extended Topology Enumeration Leaf */
2447 if (!cpu->enable_cpuid_0xb) {
2448 *eax = *ebx = *ecx = *edx = 0;
2452 *ecx = count & 0xff;
2453 *edx = cpu->apic_id;
2457 *eax = apicid_core_offset(smp_cores, smp_threads);
2459 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2462 *eax = apicid_pkg_offset(smp_cores, smp_threads);
2463 *ebx = smp_cores * smp_threads;
2464 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2469 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2472 assert(!(*eax & ~0x1f));
2473 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2476 KVMState *s = cs->kvm_state;
2480 /* Processor Extended State */
2485 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2488 if (kvm_enabled()) {
2489 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2491 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2498 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2499 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2500 if ((env->features[esa->feature] & esa->bits) == esa->bits
2501 && ((ena_mask >> i) & 1) != 0) {
2505 *edx |= 1u << (i - 32);
2507 *ecx = MAX(*ecx, esa->offset + esa->size);
2510 *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2512 } else if (count == 1) {
2513 *eax = env->features[FEAT_XSAVE];
2514 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2515 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2516 if ((env->features[esa->feature] & esa->bits) == esa->bits
2517 && ((ena_mask >> count) & 1) != 0) {
2525 *eax = env->cpuid_xlevel;
2526 *ebx = env->cpuid_vendor1;
2527 *edx = env->cpuid_vendor2;
2528 *ecx = env->cpuid_vendor3;
2531 *eax = env->cpuid_version;
2533 *ecx = env->features[FEAT_8000_0001_ECX];
2534 *edx = env->features[FEAT_8000_0001_EDX];
2536 /* The Linux kernel checks for the CMPLegacy bit and
2537 * discards multiple thread information if it is set.
2538 * So don't set it here for Intel to make Linux guests happy.
2540 if (cs->nr_cores * cs->nr_threads > 1) {
2541 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2542 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2543 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2544 *ecx |= 1 << 1; /* CmpLegacy bit */
2551 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2552 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2553 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2554 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2557 /* cache info (L1 cache) */
2558 if (cpu->cache_info_passthrough) {
2559 host_cpuid(index, 0, eax, ebx, ecx, edx);
2562 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2563 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2564 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2565 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2566 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2567 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2568 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2569 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2572 /* cache info (L2 cache) */
2573 if (cpu->cache_info_passthrough) {
2574 host_cpuid(index, 0, eax, ebx, ecx, edx);
2577 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2578 (L2_DTLB_2M_ENTRIES << 16) | \
2579 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2580 (L2_ITLB_2M_ENTRIES);
2581 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2582 (L2_DTLB_4K_ENTRIES << 16) | \
2583 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2584 (L2_ITLB_4K_ENTRIES);
2585 *ecx = (L2_SIZE_KB_AMD << 16) | \
2586 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2587 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2588 *edx = ((L3_SIZE_KB/512) << 18) | \
2589 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2590 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2596 *edx = env->features[FEAT_8000_0007_EDX];
2599 /* virtual & phys address size in low 2 bytes. */
2600 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2601 /* 64 bit processor, 48 bits virtual, configurable
2604 *eax = 0x00003000 + cpu->phys_bits;
2606 *eax = cpu->phys_bits;
2611 if (cs->nr_cores * cs->nr_threads > 1) {
2612 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2616 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2617 *eax = 0x00000001; /* SVM Revision */
2618 *ebx = 0x00000010; /* nr of ASIDs */
2620 *edx = env->features[FEAT_SVM]; /* optional features */
2629 *eax = env->cpuid_xlevel2;
2635 /* Support for VIA CPU's CPUID instruction */
2636 *eax = env->cpuid_version;
2639 *edx = env->features[FEAT_C000_0001_EDX];
2644 /* Reserved for the future, and now filled with zero */
2651 /* reserved values: zero */
2660 /* CPUClass::reset() */
2661 static void x86_cpu_reset(CPUState *s)
2663 X86CPU *cpu = X86_CPU(s);
2664 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2665 CPUX86State *env = &cpu->env;
2670 xcc->parent_reset(s);
2672 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2676 env->old_exception = -1;
2678 /* init to reset state */
2680 env->hflags2 |= HF2_GIF_MASK;
2682 cpu_x86_update_cr0(env, 0x60000010);
2683 env->a20_mask = ~0x0;
2684 env->smbase = 0x30000;
2686 env->idt.limit = 0xffff;
2687 env->gdt.limit = 0xffff;
2688 env->ldt.limit = 0xffff;
2689 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2690 env->tr.limit = 0xffff;
2691 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2693 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2694 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2695 DESC_R_MASK | DESC_A_MASK);
2696 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2697 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2699 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2700 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2702 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2703 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2705 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2706 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2708 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2709 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2713 env->regs[R_EDX] = env->cpuid_version;
2718 for (i = 0; i < 8; i++) {
2721 cpu_set_fpuc(env, 0x37f);
2723 env->mxcsr = 0x1f80;
2724 /* All units are in INIT state. */
2727 env->pat = 0x0007040600070406ULL;
2728 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2730 memset(env->dr, 0, sizeof(env->dr));
2731 env->dr[6] = DR6_FIXED_1;
2732 env->dr[7] = DR7_FIXED_1;
2733 cpu_breakpoint_remove_all(s, BP_CPU);
2734 cpu_watchpoint_remove_all(s, BP_CPU);
2737 xcr0 = XSTATE_FP_MASK;
2739 #ifdef CONFIG_USER_ONLY
2740 /* Enable all the features for user-mode. */
2741 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2742 xcr0 |= XSTATE_SSE_MASK;
2744 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2745 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2746 if ((env->features[esa->feature] & esa->bits) == esa->bits) {
2751 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2752 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2754 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2755 cr4 |= CR4_FSGSBASE_MASK;
2760 cpu_x86_update_cr4(env, cr4);
2763 * SDM 11.11.5 requires:
2764 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2765 * - IA32_MTRR_PHYSMASKn.V = 0
2766 * All other bits are undefined. For simplification, zero it all.
2768 env->mtrr_deftype = 0;
2769 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2770 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2772 #if !defined(CONFIG_USER_ONLY)
2773 /* We hard-wire the BSP to the first CPU. */
2774 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2776 s->halted = !cpu_is_bsp(cpu);
2778 if (kvm_enabled()) {
2779 kvm_arch_reset_vcpu(cpu);
2784 #ifndef CONFIG_USER_ONLY
2785 bool cpu_is_bsp(X86CPU *cpu)
2787 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2790 /* TODO: remove me, when reset over QOM tree is implemented */
2791 static void x86_cpu_machine_reset_cb(void *opaque)
2793 X86CPU *cpu = opaque;
2794 cpu_reset(CPU(cpu));
2798 static void mce_init(X86CPU *cpu)
2800 CPUX86State *cenv = &cpu->env;
2803 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2804 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2805 (CPUID_MCE | CPUID_MCA)) {
2806 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
2807 (cpu->enable_lmce ? MCG_LMCE_P : 0);
2808 cenv->mcg_ctl = ~(uint64_t)0;
2809 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2810 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2815 #ifndef CONFIG_USER_ONLY
2816 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2818 APICCommonState *apic;
2819 const char *apic_type = "apic";
2821 if (kvm_apic_in_kernel()) {
2822 apic_type = "kvm-apic";
2823 } else if (xen_enabled()) {
2824 apic_type = "xen-apic";
2827 cpu->apic_state = DEVICE(object_new(apic_type));
2829 object_property_add_child(OBJECT(cpu), "lapic",
2830 OBJECT(cpu->apic_state), &error_abort);
2831 object_unref(OBJECT(cpu->apic_state));
2833 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2834 /* TODO: convert to link<> */
2835 apic = APIC_COMMON(cpu->apic_state);
2837 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2840 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2842 APICCommonState *apic;
2843 static bool apic_mmio_map_once;
2845 if (cpu->apic_state == NULL) {
2848 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2851 /* Map APIC MMIO area */
2852 apic = APIC_COMMON(cpu->apic_state);
2853 if (!apic_mmio_map_once) {
2854 memory_region_add_subregion_overlap(get_system_memory(),
2856 MSR_IA32_APICBASE_BASE,
2859 apic_mmio_map_once = true;
2863 static void x86_cpu_machine_done(Notifier *n, void *unused)
2865 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2866 MemoryRegion *smram =
2867 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2870 cpu->smram = g_new(MemoryRegion, 1);
2871 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2872 smram, 0, 1ull << 32);
2873 memory_region_set_enabled(cpu->smram, false);
2874 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2878 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2883 /* Note: Only safe for use on x86(-64) hosts */
2884 static uint32_t x86_host_phys_bits(void)
2887 uint32_t host_phys_bits;
2889 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
2890 if (eax >= 0x80000008) {
2891 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
2892 /* Note: According to AMD doc 25481 rev 2.34 they have a field
2893 * at 23:16 that can specify a maximum physical address bits for
2894 * the guest that can override this value; but I've not seen
2895 * anything with that set.
2897 host_phys_bits = eax & 0xff;
2899 /* It's an odd 64 bit machine that doesn't have the leaf for
2900 * physical address bits; fall back to 36 that's most older
2903 host_phys_bits = 36;
2906 return host_phys_bits;
2909 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2910 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2911 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2912 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2913 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2914 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2915 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2917 CPUState *cs = CPU(dev);
2918 X86CPU *cpu = X86_CPU(dev);
2919 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2920 CPUX86State *env = &cpu->env;
2921 Error *local_err = NULL;
2922 static bool ht_warned;
2925 if (xcc->kvm_required && !kvm_enabled()) {
2926 char *name = x86_cpu_class_get_model_name(xcc);
2927 error_setg(&local_err, "CPU model '%s' requires KVM", name);
2932 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
2933 error_setg(errp, "apic-id property was not initialized properly");
2937 /*TODO: cpu->host_features incorrectly overwrites features
2938 * set using "feat=on|off". Once we fix this, we can convert
2939 * plus_features & minus_features to global properties
2940 * inside x86_cpu_parse_featurestr() too.
2942 if (cpu->host_features) {
2943 for (w = 0; w < FEATURE_WORDS; w++) {
2945 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2949 for (w = 0; w < FEATURE_WORDS; w++) {
2950 cpu->env.features[w] |= plus_features[w];
2951 cpu->env.features[w] &= ~minus_features[w];
2954 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2955 env->cpuid_level = 7;
2958 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2959 error_setg(&local_err,
2961 "Host doesn't support requested features" :
2962 "TCG doesn't support requested features");
2966 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2969 if (IS_AMD_CPU(env)) {
2970 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2971 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2972 & CPUID_EXT2_AMD_ALIASES);
2975 /* For 64bit systems think about the number of physical bits to present.
2976 * ideally this should be the same as the host; anything other than matching
2977 * the host can cause incorrect guest behaviour.
2978 * QEMU used to pick the magic value of 40 bits that corresponds to
2979 * consumer AMD devices but nothing else.
2981 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2982 if (kvm_enabled()) {
2983 uint32_t host_phys_bits = x86_host_phys_bits();
2986 if (cpu->host_phys_bits) {
2987 /* The user asked for us to use the host physical bits */
2988 cpu->phys_bits = host_phys_bits;
2991 /* Print a warning if the user set it to a value that's not the
2994 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
2996 error_report("Warning: Host physical bits (%u)"
2997 " does not match phys-bits property (%u)",
2998 host_phys_bits, cpu->phys_bits);
3002 if (cpu->phys_bits &&
3003 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3004 cpu->phys_bits < 32)) {
3005 error_setg(errp, "phys-bits should be between 32 and %u "
3007 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3011 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3012 error_setg(errp, "TCG only supports phys-bits=%u",
3013 TCG_PHYS_ADDR_BITS);
3017 /* 0 means it was not explicitly set by the user (or by machine
3018 * compat_props or by the host code above). In this case, the default
3019 * is the value used by TCG (40).
3021 if (cpu->phys_bits == 0) {
3022 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3025 /* For 32 bit systems don't use the user set value, but keep
3026 * phys_bits consistent with what we tell the guest.
3028 if (cpu->phys_bits != 0) {
3029 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3033 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3034 cpu->phys_bits = 36;
3036 cpu->phys_bits = 32;
3039 cpu_exec_init(cs, &error_abort);
3041 if (tcg_enabled()) {
3045 #ifndef CONFIG_USER_ONLY
3046 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3048 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3049 x86_cpu_apic_create(cpu, &local_err);
3050 if (local_err != NULL) {
3058 #ifndef CONFIG_USER_ONLY
3059 if (tcg_enabled()) {
3060 AddressSpace *newas = g_new(AddressSpace, 1);
3062 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3063 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3065 /* Outer container... */
3066 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3067 memory_region_set_enabled(cpu->cpu_as_root, true);
3069 /* ... with two regions inside: normal system memory with low
3072 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3073 get_system_memory(), 0, ~0ull);
3074 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3075 memory_region_set_enabled(cpu->cpu_as_mem, true);
3076 address_space_init(newas, cpu->cpu_as_root, "CPU");
3078 cpu_address_space_init(cs, newas, 0);
3080 /* ... SMRAM with higher priority, linked from /machine/smram. */
3081 cpu->machine_done.notify = x86_cpu_machine_done;
3082 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3088 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3089 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3090 * based on inputs (sockets,cores,threads), it is still better to gives
3093 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3094 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3096 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3097 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3098 " -smp options properly.");
3102 x86_cpu_apic_realize(cpu, &local_err);
3103 if (local_err != NULL) {
3108 xcc->parent_realize(dev, &local_err);
3111 if (local_err != NULL) {
3112 error_propagate(errp, local_err);
3117 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3119 X86CPU *cpu = X86_CPU(dev);
3121 #ifndef CONFIG_USER_ONLY
3122 cpu_remove_sync(CPU(dev));
3123 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3126 if (cpu->apic_state) {
3127 object_unparent(OBJECT(cpu->apic_state));
3128 cpu->apic_state = NULL;
3132 typedef struct BitProperty {
3137 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3138 void *opaque, Error **errp)
3140 BitProperty *fp = opaque;
3141 bool value = (*fp->ptr & fp->mask) == fp->mask;
3142 visit_type_bool(v, name, &value, errp);
3145 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3146 void *opaque, Error **errp)
3148 DeviceState *dev = DEVICE(obj);
3149 BitProperty *fp = opaque;
3150 Error *local_err = NULL;
3153 if (dev->realized) {
3154 qdev_prop_set_after_realize(dev, name, errp);
3158 visit_type_bool(v, name, &value, &local_err);
3160 error_propagate(errp, local_err);
3165 *fp->ptr |= fp->mask;
3167 *fp->ptr &= ~fp->mask;
3171 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3174 BitProperty *prop = opaque;
3178 /* Register a boolean property to get/set a single bit in a uint32_t field.
3180 * The same property name can be registered multiple times to make it affect
3181 * multiple bits in the same FeatureWord. In that case, the getter will return
3182 * true only if all bits are set.
3184 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3185 const char *prop_name,
3191 uint32_t mask = (1UL << bitnr);
3193 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3196 assert(fp->ptr == field);
3199 fp = g_new0(BitProperty, 1);
3202 object_property_add(OBJECT(cpu), prop_name, "bool",
3203 x86_cpu_get_bit_prop,
3204 x86_cpu_set_bit_prop,
3205 x86_cpu_release_bit_prop, fp, &error_abort);
3209 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3213 Object *obj = OBJECT(cpu);
3216 FeatureWordInfo *fi = &feature_word_info[w];
3218 if (!fi->feat_names) {
3221 if (!fi->feat_names[bitnr]) {
3225 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3227 feat2prop(names[0]);
3228 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3230 for (i = 1; names[i]; i++) {
3231 feat2prop(names[i]);
3232 object_property_add_alias(obj, names[i], obj, names[0],
3239 static void x86_cpu_initfn(Object *obj)
3241 CPUState *cs = CPU(obj);
3242 X86CPU *cpu = X86_CPU(obj);
3243 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3244 CPUX86State *env = &cpu->env;
3249 object_property_add(obj, "family", "int",
3250 x86_cpuid_version_get_family,
3251 x86_cpuid_version_set_family, NULL, NULL, NULL);
3252 object_property_add(obj, "model", "int",
3253 x86_cpuid_version_get_model,
3254 x86_cpuid_version_set_model, NULL, NULL, NULL);
3255 object_property_add(obj, "stepping", "int",
3256 x86_cpuid_version_get_stepping,
3257 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3258 object_property_add_str(obj, "vendor",
3259 x86_cpuid_get_vendor,
3260 x86_cpuid_set_vendor, NULL);
3261 object_property_add_str(obj, "model-id",
3262 x86_cpuid_get_model_id,
3263 x86_cpuid_set_model_id, NULL);
3264 object_property_add(obj, "tsc-frequency", "int",
3265 x86_cpuid_get_tsc_freq,
3266 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3267 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3268 x86_cpu_get_feature_words,
3269 NULL, NULL, (void *)env->features, NULL);
3270 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3271 x86_cpu_get_feature_words,
3272 NULL, NULL, (void *)cpu->filtered_features, NULL);
3274 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3276 for (w = 0; w < FEATURE_WORDS; w++) {
3279 for (bitnr = 0; bitnr < 32; bitnr++) {
3280 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3284 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3287 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3289 X86CPU *cpu = X86_CPU(cs);
3291 return cpu->apic_id;
3294 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3296 X86CPU *cpu = X86_CPU(cs);
3298 return cpu->env.cr[0] & CR0_PG_MASK;
3301 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3303 X86CPU *cpu = X86_CPU(cs);
3305 cpu->env.eip = value;
3308 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3310 X86CPU *cpu = X86_CPU(cs);
3312 cpu->env.eip = tb->pc - tb->cs_base;
3315 static bool x86_cpu_has_work(CPUState *cs)
3317 X86CPU *cpu = X86_CPU(cs);
3318 CPUX86State *env = &cpu->env;
3320 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3321 CPU_INTERRUPT_POLL)) &&
3322 (env->eflags & IF_MASK)) ||
3323 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3324 CPU_INTERRUPT_INIT |
3325 CPU_INTERRUPT_SIPI |
3326 CPU_INTERRUPT_MCE)) ||
3327 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3328 !(env->hflags & HF_SMM_MASK));
3331 static Property x86_cpu_properties[] = {
3332 #ifdef CONFIG_USER_ONLY
3333 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3334 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
3335 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
3336 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
3337 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
3339 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
3340 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
3341 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
3342 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
3344 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3345 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3346 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3347 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3348 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3349 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3350 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3351 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3352 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3353 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3354 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3355 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3356 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3357 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3358 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
3359 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
3360 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
3361 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3362 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3363 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3364 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3365 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3366 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
3367 DEFINE_PROP_END_OF_LIST()
3370 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3372 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3373 CPUClass *cc = CPU_CLASS(oc);
3374 DeviceClass *dc = DEVICE_CLASS(oc);
3376 xcc->parent_realize = dc->realize;
3377 dc->realize = x86_cpu_realizefn;
3378 dc->unrealize = x86_cpu_unrealizefn;
3379 dc->props = x86_cpu_properties;
3381 xcc->parent_reset = cc->reset;
3382 cc->reset = x86_cpu_reset;
3383 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3385 cc->class_by_name = x86_cpu_class_by_name;
3386 cc->parse_features = x86_cpu_parse_featurestr;
3387 cc->has_work = x86_cpu_has_work;
3388 cc->do_interrupt = x86_cpu_do_interrupt;
3389 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3390 cc->dump_state = x86_cpu_dump_state;
3391 cc->set_pc = x86_cpu_set_pc;
3392 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3393 cc->gdb_read_register = x86_cpu_gdb_read_register;
3394 cc->gdb_write_register = x86_cpu_gdb_write_register;
3395 cc->get_arch_id = x86_cpu_get_arch_id;
3396 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3397 #ifdef CONFIG_USER_ONLY
3398 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3400 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3401 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3402 cc->write_elf64_note = x86_cpu_write_elf64_note;
3403 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3404 cc->write_elf32_note = x86_cpu_write_elf32_note;
3405 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3406 cc->vmsd = &vmstate_x86_cpu;
3408 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3409 #ifndef CONFIG_USER_ONLY
3410 cc->debug_excp_handler = breakpoint_handler;
3412 cc->cpu_exec_enter = x86_cpu_exec_enter;
3413 cc->cpu_exec_exit = x86_cpu_exec_exit;
3415 dc->cannot_instantiate_with_device_add_yet = false;
3417 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3418 * object in cpus -> dangling pointer after final object_unref().
3420 dc->cannot_destroy_with_object_finalize_yet = true;
3423 static const TypeInfo x86_cpu_type_info = {
3424 .name = TYPE_X86_CPU,
3426 .instance_size = sizeof(X86CPU),
3427 .instance_init = x86_cpu_initfn,
3429 .class_size = sizeof(X86CPUClass),
3430 .class_init = x86_cpu_common_class_init,
3433 static void x86_cpu_register_types(void)
3437 type_register_static(&x86_cpu_type_info);
3438 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3439 x86_register_cpudef_type(&builtin_x86_defs[i]);
3442 type_register_static(&host_x86_cpu_type_info);
3446 type_init(x86_cpu_register_types)