2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 /* CPUID Leaf 4 constants: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
69 #define CPUID_4_LEVEL(l) ((l) << 5)
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
79 #define ASSOC_FULL 0xFF
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
96 /* Definitions of the hardcoded cache entries we expose: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
140 /* TLB definitions: */
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
173 dst[CPUID_VENDOR_SZ] = '\0';
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
248 static const char *hyperv_priv_feature_name[] = {
249 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
250 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
251 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
252 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
253 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
254 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
257 NULL, NULL, NULL, NULL,
258 NULL, NULL, NULL, NULL,
259 NULL, NULL, NULL, NULL,
262 static const char *hyperv_ident_feature_name[] = {
263 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
264 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
265 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
266 NULL /* hv_create_port */, NULL /* hv_connect_port */,
267 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
268 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
276 static const char *hyperv_misc_feature_name[] = {
277 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
278 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
279 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
281 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
286 NULL, NULL, NULL, NULL,
289 static const char *svm_feature_name[] = {
290 "npt", "lbrv", "svm_lock", "nrip_save",
291 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
292 NULL, NULL, "pause_filter", NULL,
293 "pfthreshold", NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
297 NULL, NULL, NULL, NULL,
300 static const char *cpuid_7_0_ebx_feature_name[] = {
301 "fsgsbase", "tsc_adjust", NULL, "bmi1",
302 "hle", "avx2", NULL, "smep",
303 "bmi2", "erms", "invpcid", "rtm",
304 NULL, NULL, "mpx", NULL,
305 "avx512f", "avx512dq", "rdseed", "adx",
306 "smap", "avx512ifma", "pcommit", "clflushopt",
307 "clwb", NULL, "avx512pf", "avx512er",
308 "avx512cd", NULL, "avx512bw", "avx512vl",
311 static const char *cpuid_7_0_ecx_feature_name[] = {
312 NULL, "avx512vbmi", "umip", "pku",
313 "ospke", NULL, NULL, NULL,
314 NULL, NULL, NULL, NULL,
315 NULL, NULL, NULL, NULL,
316 NULL, NULL, NULL, NULL,
317 NULL, NULL, "rdpid", NULL,
318 NULL, NULL, NULL, NULL,
319 NULL, NULL, NULL, NULL,
322 static const char *cpuid_apm_edx_feature_name[] = {
323 NULL, NULL, NULL, NULL,
324 NULL, NULL, NULL, NULL,
325 "invtsc", NULL, NULL, NULL,
326 NULL, NULL, NULL, NULL,
327 NULL, NULL, NULL, NULL,
328 NULL, NULL, NULL, NULL,
329 NULL, NULL, NULL, NULL,
330 NULL, NULL, NULL, NULL,
333 static const char *cpuid_xsave_feature_name[] = {
334 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
335 NULL, NULL, NULL, NULL,
336 NULL, NULL, NULL, NULL,
337 NULL, NULL, NULL, NULL,
338 NULL, NULL, NULL, NULL,
339 NULL, NULL, NULL, NULL,
340 NULL, NULL, NULL, NULL,
341 NULL, NULL, NULL, NULL,
344 static const char *cpuid_6_feature_name[] = {
345 NULL, NULL, "arat", NULL,
346 NULL, NULL, NULL, NULL,
347 NULL, NULL, NULL, NULL,
348 NULL, NULL, NULL, NULL,
349 NULL, NULL, NULL, NULL,
350 NULL, NULL, NULL, NULL,
351 NULL, NULL, NULL, NULL,
352 NULL, NULL, NULL, NULL,
355 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
356 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
357 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
358 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
359 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
360 CPUID_PSE36 | CPUID_FXSR)
361 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
362 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
363 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
364 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
365 CPUID_PAE | CPUID_SEP | CPUID_APIC)
367 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
368 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
369 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
370 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
371 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
372 /* partly implemented:
373 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
375 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
376 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
377 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
378 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
379 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
380 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
382 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
383 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
384 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
385 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
386 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
389 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
391 #define TCG_EXT2_X86_64_FEATURES 0
394 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
395 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
396 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
397 TCG_EXT2_X86_64_FEATURES)
398 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
399 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
400 #define TCG_EXT4_FEATURES 0
401 #define TCG_SVM_FEATURES 0
402 #define TCG_KVM_FEATURES 0
403 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
404 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
405 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
406 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
409 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
410 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
411 CPUID_7_0_EBX_RDSEED */
412 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
413 #define TCG_APM_FEATURES 0
414 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
415 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
417 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
419 typedef struct FeatureWordInfo {
420 const char **feat_names;
421 uint32_t cpuid_eax; /* Input EAX for CPUID */
422 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
423 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
424 int cpuid_reg; /* output register (R_* constant) */
425 uint32_t tcg_features; /* Feature flags supported by TCG */
426 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
429 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
431 .feat_names = feature_name,
432 .cpuid_eax = 1, .cpuid_reg = R_EDX,
433 .tcg_features = TCG_FEATURES,
436 .feat_names = ext_feature_name,
437 .cpuid_eax = 1, .cpuid_reg = R_ECX,
438 .tcg_features = TCG_EXT_FEATURES,
440 [FEAT_8000_0001_EDX] = {
441 .feat_names = ext2_feature_name,
442 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
443 .tcg_features = TCG_EXT2_FEATURES,
445 [FEAT_8000_0001_ECX] = {
446 .feat_names = ext3_feature_name,
447 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
448 .tcg_features = TCG_EXT3_FEATURES,
450 [FEAT_C000_0001_EDX] = {
451 .feat_names = ext4_feature_name,
452 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
453 .tcg_features = TCG_EXT4_FEATURES,
456 .feat_names = kvm_feature_name,
457 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
458 .tcg_features = TCG_KVM_FEATURES,
460 [FEAT_HYPERV_EAX] = {
461 .feat_names = hyperv_priv_feature_name,
462 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
464 [FEAT_HYPERV_EBX] = {
465 .feat_names = hyperv_ident_feature_name,
466 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
468 [FEAT_HYPERV_EDX] = {
469 .feat_names = hyperv_misc_feature_name,
470 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
473 .feat_names = svm_feature_name,
474 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
475 .tcg_features = TCG_SVM_FEATURES,
478 .feat_names = cpuid_7_0_ebx_feature_name,
480 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
482 .tcg_features = TCG_7_0_EBX_FEATURES,
485 .feat_names = cpuid_7_0_ecx_feature_name,
487 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
489 .tcg_features = TCG_7_0_ECX_FEATURES,
491 [FEAT_8000_0007_EDX] = {
492 .feat_names = cpuid_apm_edx_feature_name,
493 .cpuid_eax = 0x80000007,
495 .tcg_features = TCG_APM_FEATURES,
496 .unmigratable_flags = CPUID_APM_INVTSC,
499 .feat_names = cpuid_xsave_feature_name,
501 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
503 .tcg_features = TCG_XSAVE_FEATURES,
506 .feat_names = cpuid_6_feature_name,
507 .cpuid_eax = 6, .cpuid_reg = R_EAX,
508 .tcg_features = TCG_6_EAX_FEATURES,
512 typedef struct X86RegisterInfo32 {
513 /* Name of register */
515 /* QAPI enum value register */
516 X86CPURegister32 qapi_enum;
519 #define REGISTER(reg) \
520 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
521 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
533 const ExtSaveArea x86_ext_save_areas[] = {
535 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
536 .offset = offsetof(X86XSaveArea, avx_state),
537 .size = sizeof(XSaveAVX) },
538 [XSTATE_BNDREGS_BIT] =
539 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
540 .offset = offsetof(X86XSaveArea, bndreg_state),
541 .size = sizeof(XSaveBNDREG) },
542 [XSTATE_BNDCSR_BIT] =
543 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
544 .offset = offsetof(X86XSaveArea, bndcsr_state),
545 .size = sizeof(XSaveBNDCSR) },
546 [XSTATE_OPMASK_BIT] =
547 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
548 .offset = offsetof(X86XSaveArea, opmask_state),
549 .size = sizeof(XSaveOpmask) },
550 [XSTATE_ZMM_Hi256_BIT] =
551 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
552 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
553 .size = sizeof(XSaveZMM_Hi256) },
554 [XSTATE_Hi16_ZMM_BIT] =
555 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
556 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
557 .size = sizeof(XSaveHi16_ZMM) },
559 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
560 .offset = offsetof(X86XSaveArea, pkru_state),
561 .size = sizeof(XSavePKRU) },
564 const char *get_register_name_32(unsigned int reg)
566 if (reg >= CPU_NB_REGS32) {
569 return x86_reg_info_32[reg].name;
573 * Returns the set of feature flags that are supported and migratable by
574 * QEMU, for a given FeatureWord.
576 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
578 FeatureWordInfo *wi = &feature_word_info[w];
582 for (i = 0; i < 32; i++) {
583 uint32_t f = 1U << i;
584 /* If the feature name is unknown, it is not supported by QEMU yet */
585 if (!wi->feat_names[i]) {
588 /* Skip features known to QEMU, but explicitly marked as unmigratable */
589 if (wi->unmigratable_flags & f) {
597 void host_cpuid(uint32_t function, uint32_t count,
598 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
604 : "=a"(vec[0]), "=b"(vec[1]),
605 "=c"(vec[2]), "=d"(vec[3])
606 : "0"(function), "c"(count) : "cc");
607 #elif defined(__i386__)
608 asm volatile("pusha \n\t"
610 "mov %%eax, 0(%2) \n\t"
611 "mov %%ebx, 4(%2) \n\t"
612 "mov %%ecx, 8(%2) \n\t"
613 "mov %%edx, 12(%2) \n\t"
615 : : "a"(function), "c"(count), "S"(vec)
631 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
633 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
634 * a substring. ex if !NULL points to the first char after a substring,
635 * otherwise the string is assumed to sized by a terminating nul.
636 * Return lexical ordering of *s1:*s2.
638 static int sstrcmp(const char *s1, const char *e1,
639 const char *s2, const char *e2)
642 if (!*s1 || !*s2 || *s1 != *s2)
645 if (s1 == e1 && s2 == e2)
654 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
655 * '|' delimited (possibly empty) strings in which case search for a match
656 * within the alternatives proceeds left to right. Return 0 for success,
657 * non-zero otherwise.
659 static int altcmp(const char *s, const char *e, const char *altstr)
663 for (q = p = altstr; ; ) {
664 while (*p && *p != '|')
666 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
675 /* search featureset for flag *[s..e), if found set corresponding bit in
676 * *pval and return true, otherwise return false
678 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
679 const char **featureset)
685 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
686 if (*ppc && !altcmp(s, e, *ppc)) {
694 static void add_flagname_to_bitmaps(const char *flagname,
695 FeatureWordArray words,
699 for (w = 0; w < FEATURE_WORDS; w++) {
700 FeatureWordInfo *wi = &feature_word_info[w];
701 if (wi->feat_names &&
702 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
706 if (w == FEATURE_WORDS) {
707 error_setg(errp, "CPU feature %s not found", flagname);
711 /* CPU class name definitions: */
713 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
714 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
716 /* Return type name for a given CPU model name
717 * Caller is responsible for freeing the returned string.
719 static char *x86_cpu_type_name(const char *model_name)
721 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
724 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
729 if (cpu_model == NULL) {
733 typename = x86_cpu_type_name(cpu_model);
734 oc = object_class_by_name(typename);
739 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
741 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
742 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
743 return g_strndup(class_name,
744 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
747 struct X86CPUDefinition {
752 /* vendor is zero-terminated, 12 character ASCII string */
753 char vendor[CPUID_VENDOR_SZ + 1];
757 FeatureWordArray features;
761 static X86CPUDefinition builtin_x86_defs[] = {
765 .vendor = CPUID_VENDOR_AMD,
769 .features[FEAT_1_EDX] =
771 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
773 .features[FEAT_1_ECX] =
774 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
775 .features[FEAT_8000_0001_EDX] =
776 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
777 .features[FEAT_8000_0001_ECX] =
778 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
779 .xlevel = 0x8000000A,
780 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
785 .vendor = CPUID_VENDOR_AMD,
789 /* Missing: CPUID_HT */
790 .features[FEAT_1_EDX] =
792 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
793 CPUID_PSE36 | CPUID_VME,
794 .features[FEAT_1_ECX] =
795 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
797 .features[FEAT_8000_0001_EDX] =
798 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
799 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
800 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
801 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
803 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
804 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
805 .features[FEAT_8000_0001_ECX] =
806 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
807 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
808 /* Missing: CPUID_SVM_LBRV */
809 .features[FEAT_SVM] =
811 .xlevel = 0x8000001A,
812 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
817 .vendor = CPUID_VENDOR_INTEL,
821 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
822 .features[FEAT_1_EDX] =
824 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
825 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
826 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
827 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
828 .features[FEAT_1_ECX] =
829 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
831 .features[FEAT_8000_0001_EDX] =
832 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
833 .features[FEAT_8000_0001_ECX] =
835 .xlevel = 0x80000008,
836 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
841 .vendor = CPUID_VENDOR_INTEL,
845 /* Missing: CPUID_HT */
846 .features[FEAT_1_EDX] =
847 PPRO_FEATURES | CPUID_VME |
848 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
850 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
851 .features[FEAT_1_ECX] =
852 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
853 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
854 .features[FEAT_8000_0001_EDX] =
855 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
856 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
857 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
858 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
859 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
860 .features[FEAT_8000_0001_ECX] =
862 .xlevel = 0x80000008,
863 .model_id = "Common KVM processor"
868 .vendor = CPUID_VENDOR_INTEL,
872 .features[FEAT_1_EDX] =
874 .features[FEAT_1_ECX] =
876 .xlevel = 0x80000004,
877 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
882 .vendor = CPUID_VENDOR_INTEL,
886 .features[FEAT_1_EDX] =
887 PPRO_FEATURES | CPUID_VME |
888 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
889 .features[FEAT_1_ECX] =
891 .features[FEAT_8000_0001_ECX] =
893 .xlevel = 0x80000008,
894 .model_id = "Common 32-bit KVM processor"
899 .vendor = CPUID_VENDOR_INTEL,
903 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
904 .features[FEAT_1_EDX] =
905 PPRO_FEATURES | CPUID_VME |
906 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
908 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
909 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
910 .features[FEAT_1_ECX] =
911 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
912 .features[FEAT_8000_0001_EDX] =
914 .xlevel = 0x80000008,
915 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
920 .vendor = CPUID_VENDOR_INTEL,
924 .features[FEAT_1_EDX] =
931 .vendor = CPUID_VENDOR_INTEL,
935 .features[FEAT_1_EDX] =
942 .vendor = CPUID_VENDOR_INTEL,
946 .features[FEAT_1_EDX] =
953 .vendor = CPUID_VENDOR_INTEL,
957 .features[FEAT_1_EDX] =
964 .vendor = CPUID_VENDOR_AMD,
968 .features[FEAT_1_EDX] =
969 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
971 .features[FEAT_8000_0001_EDX] =
972 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
973 .xlevel = 0x80000008,
974 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
979 .vendor = CPUID_VENDOR_INTEL,
983 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
984 .features[FEAT_1_EDX] =
986 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
987 CPUID_ACPI | CPUID_SS,
988 /* Some CPUs got no CPUID_SEP */
989 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
991 .features[FEAT_1_ECX] =
992 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
994 .features[FEAT_8000_0001_EDX] =
996 .features[FEAT_8000_0001_ECX] =
998 .xlevel = 0x80000008,
999 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1004 .vendor = CPUID_VENDOR_INTEL,
1008 .features[FEAT_1_EDX] =
1009 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1010 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1011 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1012 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1013 CPUID_DE | CPUID_FP87,
1014 .features[FEAT_1_ECX] =
1015 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1016 .features[FEAT_8000_0001_EDX] =
1017 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1018 .features[FEAT_8000_0001_ECX] =
1020 .xlevel = 0x80000008,
1021 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1026 .vendor = CPUID_VENDOR_INTEL,
1030 .features[FEAT_1_EDX] =
1031 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1032 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1033 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1034 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1035 CPUID_DE | CPUID_FP87,
1036 .features[FEAT_1_ECX] =
1037 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1039 .features[FEAT_8000_0001_EDX] =
1040 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1041 .features[FEAT_8000_0001_ECX] =
1043 .xlevel = 0x80000008,
1044 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1049 .vendor = CPUID_VENDOR_INTEL,
1053 .features[FEAT_1_EDX] =
1054 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1055 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1056 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1057 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1058 CPUID_DE | CPUID_FP87,
1059 .features[FEAT_1_ECX] =
1060 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1061 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1062 .features[FEAT_8000_0001_EDX] =
1063 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1064 .features[FEAT_8000_0001_ECX] =
1066 .xlevel = 0x80000008,
1067 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1072 .vendor = CPUID_VENDOR_INTEL,
1076 .features[FEAT_1_EDX] =
1077 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1078 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1079 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1080 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1081 CPUID_DE | CPUID_FP87,
1082 .features[FEAT_1_ECX] =
1083 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1084 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1085 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1086 .features[FEAT_8000_0001_EDX] =
1087 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1088 .features[FEAT_8000_0001_ECX] =
1090 .features[FEAT_6_EAX] =
1092 .xlevel = 0x80000008,
1093 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1096 .name = "SandyBridge",
1098 .vendor = CPUID_VENDOR_INTEL,
1102 .features[FEAT_1_EDX] =
1103 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1104 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1105 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1106 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1107 CPUID_DE | CPUID_FP87,
1108 .features[FEAT_1_ECX] =
1109 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1110 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1111 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1112 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1114 .features[FEAT_8000_0001_EDX] =
1115 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1117 .features[FEAT_8000_0001_ECX] =
1119 .features[FEAT_XSAVE] =
1120 CPUID_XSAVE_XSAVEOPT,
1121 .features[FEAT_6_EAX] =
1123 .xlevel = 0x80000008,
1124 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1127 .name = "IvyBridge",
1129 .vendor = CPUID_VENDOR_INTEL,
1133 .features[FEAT_1_EDX] =
1134 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1135 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1136 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1137 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1138 CPUID_DE | CPUID_FP87,
1139 .features[FEAT_1_ECX] =
1140 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1141 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1142 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1143 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1144 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1145 .features[FEAT_7_0_EBX] =
1146 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1148 .features[FEAT_8000_0001_EDX] =
1149 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1151 .features[FEAT_8000_0001_ECX] =
1153 .features[FEAT_XSAVE] =
1154 CPUID_XSAVE_XSAVEOPT,
1155 .features[FEAT_6_EAX] =
1157 .xlevel = 0x80000008,
1158 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1161 .name = "Haswell-noTSX",
1163 .vendor = CPUID_VENDOR_INTEL,
1167 .features[FEAT_1_EDX] =
1168 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1169 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1170 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1171 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1172 CPUID_DE | CPUID_FP87,
1173 .features[FEAT_1_ECX] =
1174 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1175 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1176 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1177 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1178 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1179 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1180 .features[FEAT_8000_0001_EDX] =
1181 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1183 .features[FEAT_8000_0001_ECX] =
1184 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1185 .features[FEAT_7_0_EBX] =
1186 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1187 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1188 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1189 .features[FEAT_XSAVE] =
1190 CPUID_XSAVE_XSAVEOPT,
1191 .features[FEAT_6_EAX] =
1193 .xlevel = 0x80000008,
1194 .model_id = "Intel Core Processor (Haswell, no TSX)",
1198 .vendor = CPUID_VENDOR_INTEL,
1202 .features[FEAT_1_EDX] =
1203 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1204 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1205 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1206 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1207 CPUID_DE | CPUID_FP87,
1208 .features[FEAT_1_ECX] =
1209 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1210 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1211 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1212 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1213 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1214 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1215 .features[FEAT_8000_0001_EDX] =
1216 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1218 .features[FEAT_8000_0001_ECX] =
1219 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1220 .features[FEAT_7_0_EBX] =
1221 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1222 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1223 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1225 .features[FEAT_XSAVE] =
1226 CPUID_XSAVE_XSAVEOPT,
1227 .features[FEAT_6_EAX] =
1229 .xlevel = 0x80000008,
1230 .model_id = "Intel Core Processor (Haswell)",
1233 .name = "Broadwell-noTSX",
1235 .vendor = CPUID_VENDOR_INTEL,
1239 .features[FEAT_1_EDX] =
1240 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1241 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1242 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1243 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1244 CPUID_DE | CPUID_FP87,
1245 .features[FEAT_1_ECX] =
1246 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1247 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1248 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1249 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1250 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1251 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1252 .features[FEAT_8000_0001_EDX] =
1253 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1255 .features[FEAT_8000_0001_ECX] =
1256 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1257 .features[FEAT_7_0_EBX] =
1258 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1259 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1260 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1261 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1263 .features[FEAT_XSAVE] =
1264 CPUID_XSAVE_XSAVEOPT,
1265 .features[FEAT_6_EAX] =
1267 .xlevel = 0x80000008,
1268 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1271 .name = "Broadwell",
1273 .vendor = CPUID_VENDOR_INTEL,
1277 .features[FEAT_1_EDX] =
1278 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1279 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1280 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1281 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1282 CPUID_DE | CPUID_FP87,
1283 .features[FEAT_1_ECX] =
1284 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1285 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1286 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1287 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1288 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1289 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1290 .features[FEAT_8000_0001_EDX] =
1291 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1293 .features[FEAT_8000_0001_ECX] =
1294 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1295 .features[FEAT_7_0_EBX] =
1296 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1297 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1298 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1299 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1301 .features[FEAT_XSAVE] =
1302 CPUID_XSAVE_XSAVEOPT,
1303 .features[FEAT_6_EAX] =
1305 .xlevel = 0x80000008,
1306 .model_id = "Intel Core Processor (Broadwell)",
1309 .name = "Skylake-Client",
1311 .vendor = CPUID_VENDOR_INTEL,
1315 .features[FEAT_1_EDX] =
1316 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1317 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1318 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1319 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1320 CPUID_DE | CPUID_FP87,
1321 .features[FEAT_1_ECX] =
1322 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1323 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1324 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1325 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1326 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1327 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1328 .features[FEAT_8000_0001_EDX] =
1329 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1331 .features[FEAT_8000_0001_ECX] =
1332 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1333 .features[FEAT_7_0_EBX] =
1334 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1335 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1336 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1337 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1338 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1339 /* Missing: XSAVES (not supported by some Linux versions,
1340 * including v4.1 to v4.6).
1341 * KVM doesn't yet expose any XSAVES state save component,
1342 * and the only one defined in Skylake (processor tracing)
1343 * probably will block migration anyway.
1345 .features[FEAT_XSAVE] =
1346 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1347 CPUID_XSAVE_XGETBV1,
1348 .features[FEAT_6_EAX] =
1350 .xlevel = 0x80000008,
1351 .model_id = "Intel Core Processor (Skylake)",
1354 .name = "Opteron_G1",
1356 .vendor = CPUID_VENDOR_AMD,
1360 .features[FEAT_1_EDX] =
1361 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1362 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1363 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1364 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1365 CPUID_DE | CPUID_FP87,
1366 .features[FEAT_1_ECX] =
1368 .features[FEAT_8000_0001_EDX] =
1369 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1370 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1371 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1372 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1373 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1374 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1375 .xlevel = 0x80000008,
1376 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1379 .name = "Opteron_G2",
1381 .vendor = CPUID_VENDOR_AMD,
1385 .features[FEAT_1_EDX] =
1386 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1387 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1388 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1389 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1390 CPUID_DE | CPUID_FP87,
1391 .features[FEAT_1_ECX] =
1392 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1393 /* Missing: CPUID_EXT2_RDTSCP */
1394 .features[FEAT_8000_0001_EDX] =
1395 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1396 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1397 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1398 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1399 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1400 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1401 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1402 .features[FEAT_8000_0001_ECX] =
1403 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1404 .xlevel = 0x80000008,
1405 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1408 .name = "Opteron_G3",
1410 .vendor = CPUID_VENDOR_AMD,
1414 .features[FEAT_1_EDX] =
1415 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1416 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1417 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1418 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1419 CPUID_DE | CPUID_FP87,
1420 .features[FEAT_1_ECX] =
1421 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1423 /* Missing: CPUID_EXT2_RDTSCP */
1424 .features[FEAT_8000_0001_EDX] =
1425 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1426 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1427 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1428 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1429 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1430 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1431 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1432 .features[FEAT_8000_0001_ECX] =
1433 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1434 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1435 .xlevel = 0x80000008,
1436 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1439 .name = "Opteron_G4",
1441 .vendor = CPUID_VENDOR_AMD,
1445 .features[FEAT_1_EDX] =
1446 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1447 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1448 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1449 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1450 CPUID_DE | CPUID_FP87,
1451 .features[FEAT_1_ECX] =
1452 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1453 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1454 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1456 /* Missing: CPUID_EXT2_RDTSCP */
1457 .features[FEAT_8000_0001_EDX] =
1459 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1460 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1461 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1462 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1463 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1464 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1465 .features[FEAT_8000_0001_ECX] =
1466 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1467 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1468 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1471 .xlevel = 0x8000001A,
1472 .model_id = "AMD Opteron 62xx class CPU",
1475 .name = "Opteron_G5",
1477 .vendor = CPUID_VENDOR_AMD,
1481 .features[FEAT_1_EDX] =
1482 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1483 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1484 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1485 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1486 CPUID_DE | CPUID_FP87,
1487 .features[FEAT_1_ECX] =
1488 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1489 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1490 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1491 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1492 /* Missing: CPUID_EXT2_RDTSCP */
1493 .features[FEAT_8000_0001_EDX] =
1495 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1496 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1497 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1498 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1499 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1500 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1501 .features[FEAT_8000_0001_ECX] =
1502 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1503 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1504 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1507 .xlevel = 0x8000001A,
1508 .model_id = "AMD Opteron 63xx class CPU",
1512 typedef struct PropValue {
1513 const char *prop, *value;
1516 /* KVM-specific features that are automatically added/removed
1517 * from all CPU models when KVM is enabled.
1519 static PropValue kvm_default_props[] = {
1520 { "kvmclock", "on" },
1521 { "kvm-nopiodelay", "on" },
1522 { "kvm-asyncpf", "on" },
1523 { "kvm-steal-time", "on" },
1524 { "kvm-pv-eoi", "on" },
1525 { "kvmclock-stable-bit", "on" },
1528 { "monitor", "off" },
1533 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1536 for (pv = kvm_default_props; pv->prop; pv++) {
1537 if (!strcmp(pv->prop, prop)) {
1543 /* It is valid to call this function only for properties that
1544 * are already present in the kvm_default_props table.
1549 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1550 bool migratable_only);
1554 static bool lmce_supported(void)
1558 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1562 return !!(mce_cap & MCG_LMCE_P);
1565 static int cpu_x86_fill_model_id(char *str)
1567 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1570 for (i = 0; i < 3; i++) {
1571 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1572 memcpy(str + i * 16 + 0, &eax, 4);
1573 memcpy(str + i * 16 + 4, &ebx, 4);
1574 memcpy(str + i * 16 + 8, &ecx, 4);
1575 memcpy(str + i * 16 + 12, &edx, 4);
1580 static X86CPUDefinition host_cpudef;
1582 static Property host_x86_cpu_properties[] = {
1583 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1584 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1585 DEFINE_PROP_END_OF_LIST()
1588 /* class_init for the "host" CPU model
1590 * This function may be called before KVM is initialized.
1592 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1594 DeviceClass *dc = DEVICE_CLASS(oc);
1595 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1596 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1598 xcc->kvm_required = true;
1600 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1601 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1603 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1604 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1605 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1606 host_cpudef.stepping = eax & 0x0F;
1608 cpu_x86_fill_model_id(host_cpudef.model_id);
1610 xcc->cpu_def = &host_cpudef;
1612 /* level, xlevel, xlevel2, and the feature words are initialized on
1613 * instance_init, because they require KVM to be initialized.
1616 dc->props = host_x86_cpu_properties;
1617 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1618 dc->cannot_destroy_with_object_finalize_yet = true;
1621 static void host_x86_cpu_initfn(Object *obj)
1623 X86CPU *cpu = X86_CPU(obj);
1624 CPUX86State *env = &cpu->env;
1625 KVMState *s = kvm_state;
1627 /* We can't fill the features array here because we don't know yet if
1628 * "migratable" is true or false.
1630 cpu->host_features = true;
1632 /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
1633 if (kvm_enabled()) {
1634 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1635 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1636 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1638 if (lmce_supported()) {
1639 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1643 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1646 static const TypeInfo host_x86_cpu_type_info = {
1647 .name = X86_CPU_TYPE_NAME("host"),
1648 .parent = TYPE_X86_CPU,
1649 .instance_init = host_x86_cpu_initfn,
1650 .class_init = host_x86_cpu_class_init,
1655 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1657 FeatureWordInfo *f = &feature_word_info[w];
1660 for (i = 0; i < 32; ++i) {
1661 if ((1UL << i) & mask) {
1662 const char *reg = get_register_name_32(f->cpuid_reg);
1664 fprintf(stderr, "warning: %s doesn't support requested feature: "
1665 "CPUID.%02XH:%s%s%s [bit %d]\n",
1666 kvm_enabled() ? "host" : "TCG",
1668 f->feat_names[i] ? "." : "",
1669 f->feat_names[i] ? f->feat_names[i] : "", i);
1674 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1675 const char *name, void *opaque,
1678 X86CPU *cpu = X86_CPU(obj);
1679 CPUX86State *env = &cpu->env;
1682 value = (env->cpuid_version >> 8) & 0xf;
1684 value += (env->cpuid_version >> 20) & 0xff;
1686 visit_type_int(v, name, &value, errp);
1689 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1690 const char *name, void *opaque,
1693 X86CPU *cpu = X86_CPU(obj);
1694 CPUX86State *env = &cpu->env;
1695 const int64_t min = 0;
1696 const int64_t max = 0xff + 0xf;
1697 Error *local_err = NULL;
1700 visit_type_int(v, name, &value, &local_err);
1702 error_propagate(errp, local_err);
1705 if (value < min || value > max) {
1706 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1707 name ? name : "null", value, min, max);
1711 env->cpuid_version &= ~0xff00f00;
1713 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1715 env->cpuid_version |= value << 8;
1719 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1720 const char *name, void *opaque,
1723 X86CPU *cpu = X86_CPU(obj);
1724 CPUX86State *env = &cpu->env;
1727 value = (env->cpuid_version >> 4) & 0xf;
1728 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1729 visit_type_int(v, name, &value, errp);
1732 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1733 const char *name, void *opaque,
1736 X86CPU *cpu = X86_CPU(obj);
1737 CPUX86State *env = &cpu->env;
1738 const int64_t min = 0;
1739 const int64_t max = 0xff;
1740 Error *local_err = NULL;
1743 visit_type_int(v, name, &value, &local_err);
1745 error_propagate(errp, local_err);
1748 if (value < min || value > max) {
1749 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1750 name ? name : "null", value, min, max);
1754 env->cpuid_version &= ~0xf00f0;
1755 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1758 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1759 const char *name, void *opaque,
1762 X86CPU *cpu = X86_CPU(obj);
1763 CPUX86State *env = &cpu->env;
1766 value = env->cpuid_version & 0xf;
1767 visit_type_int(v, name, &value, errp);
1770 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1771 const char *name, void *opaque,
1774 X86CPU *cpu = X86_CPU(obj);
1775 CPUX86State *env = &cpu->env;
1776 const int64_t min = 0;
1777 const int64_t max = 0xf;
1778 Error *local_err = NULL;
1781 visit_type_int(v, name, &value, &local_err);
1783 error_propagate(errp, local_err);
1786 if (value < min || value > max) {
1787 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1788 name ? name : "null", value, min, max);
1792 env->cpuid_version &= ~0xf;
1793 env->cpuid_version |= value & 0xf;
1796 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1798 X86CPU *cpu = X86_CPU(obj);
1799 CPUX86State *env = &cpu->env;
1802 value = g_malloc(CPUID_VENDOR_SZ + 1);
1803 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1804 env->cpuid_vendor3);
1808 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1811 X86CPU *cpu = X86_CPU(obj);
1812 CPUX86State *env = &cpu->env;
1815 if (strlen(value) != CPUID_VENDOR_SZ) {
1816 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1820 env->cpuid_vendor1 = 0;
1821 env->cpuid_vendor2 = 0;
1822 env->cpuid_vendor3 = 0;
1823 for (i = 0; i < 4; i++) {
1824 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1825 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1826 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1830 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1832 X86CPU *cpu = X86_CPU(obj);
1833 CPUX86State *env = &cpu->env;
1837 value = g_malloc(48 + 1);
1838 for (i = 0; i < 48; i++) {
1839 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1845 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1848 X86CPU *cpu = X86_CPU(obj);
1849 CPUX86State *env = &cpu->env;
1852 if (model_id == NULL) {
1855 len = strlen(model_id);
1856 memset(env->cpuid_model, 0, 48);
1857 for (i = 0; i < 48; i++) {
1861 c = (uint8_t)model_id[i];
1863 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1867 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1868 void *opaque, Error **errp)
1870 X86CPU *cpu = X86_CPU(obj);
1873 value = cpu->env.tsc_khz * 1000;
1874 visit_type_int(v, name, &value, errp);
1877 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1878 void *opaque, Error **errp)
1880 X86CPU *cpu = X86_CPU(obj);
1881 const int64_t min = 0;
1882 const int64_t max = INT64_MAX;
1883 Error *local_err = NULL;
1886 visit_type_int(v, name, &value, &local_err);
1888 error_propagate(errp, local_err);
1891 if (value < min || value > max) {
1892 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1893 name ? name : "null", value, min, max);
1897 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1900 /* Generic getter for "feature-words" and "filtered-features" properties */
1901 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1902 const char *name, void *opaque,
1905 uint32_t *array = (uint32_t *)opaque;
1907 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1908 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1909 X86CPUFeatureWordInfoList *list = NULL;
1911 for (w = 0; w < FEATURE_WORDS; w++) {
1912 FeatureWordInfo *wi = &feature_word_info[w];
1913 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1914 qwi->cpuid_input_eax = wi->cpuid_eax;
1915 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1916 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1917 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1918 qwi->features = array[w];
1920 /* List will be in reverse order, but order shouldn't matter */
1921 list_entries[w].next = list;
1922 list_entries[w].value = &word_infos[w];
1923 list = &list_entries[w];
1926 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1929 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1930 void *opaque, Error **errp)
1932 X86CPU *cpu = X86_CPU(obj);
1933 int64_t value = cpu->hyperv_spinlock_attempts;
1935 visit_type_int(v, name, &value, errp);
1938 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1939 void *opaque, Error **errp)
1941 const int64_t min = 0xFFF;
1942 const int64_t max = UINT_MAX;
1943 X86CPU *cpu = X86_CPU(obj);
1947 visit_type_int(v, name, &value, &err);
1949 error_propagate(errp, err);
1953 if (value < min || value > max) {
1954 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1955 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1956 object_get_typename(obj), name ? name : "null",
1960 cpu->hyperv_spinlock_attempts = value;
1963 static PropertyInfo qdev_prop_spinlocks = {
1965 .get = x86_get_hv_spinlocks,
1966 .set = x86_set_hv_spinlocks,
1969 /* Convert all '_' in a feature string option name to '-', to make feature
1970 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1972 static inline void feat2prop(char *s)
1974 while ((s = strchr(s, '_'))) {
1979 /* Compatibily hack to maintain legacy +-feat semantic,
1980 * where +-feat overwrites any feature set by
1981 * feat=on|feat even if the later is parsed after +-feat
1982 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
1984 static FeatureWordArray plus_features = { 0 };
1985 static FeatureWordArray minus_features = { 0 };
1987 /* Parse "+feature,-feature,feature=foo" CPU feature string
1989 static void x86_cpu_parse_featurestr(const char *typename, char *features,
1992 char *featurestr; /* Single 'key=value" string being parsed */
1993 Error *local_err = NULL;
1994 static bool cpu_globals_initialized;
1996 if (cpu_globals_initialized) {
1999 cpu_globals_initialized = true;
2005 for (featurestr = strtok(features, ",");
2006 featurestr && !local_err;
2007 featurestr = strtok(NULL, ",")) {
2009 const char *val = NULL;
2012 GlobalProperty *prop;
2014 /* Compatibility syntax: */
2015 if (featurestr[0] == '+') {
2016 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
2018 } else if (featurestr[0] == '-') {
2019 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
2023 eq = strchr(featurestr, '=');
2031 feat2prop(featurestr);
2035 if (!strcmp(name, "tsc-freq")) {
2039 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
2040 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
2041 if (tsc_freq < 0 || *err) {
2042 error_setg(errp, "bad numerical value %s", val);
2045 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2047 name = "tsc-frequency";
2050 prop = g_new0(typeof(*prop), 1);
2051 prop->driver = typename;
2052 prop->property = g_strdup(name);
2053 prop->value = g_strdup(val);
2054 prop->errp = &error_fatal;
2055 qdev_prop_register_global(prop);
2059 error_propagate(errp, local_err);
2063 /* Print all cpuid feature names in featureset
2065 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2070 for (bit = 0; bit < 32; bit++) {
2071 if (featureset[bit]) {
2072 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2078 /* generate CPU information. */
2079 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2081 X86CPUDefinition *def;
2085 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2086 def = &builtin_x86_defs[i];
2087 snprintf(buf, sizeof(buf), "%s", def->name);
2088 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2091 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2092 "KVM processor with all supported host features "
2093 "(only available in KVM mode)");
2096 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2097 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2098 FeatureWordInfo *fw = &feature_word_info[i];
2100 (*cpu_fprintf)(f, " ");
2101 listflags(f, cpu_fprintf, fw->feat_names);
2102 (*cpu_fprintf)(f, "\n");
2106 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2108 CpuDefinitionInfoList *cpu_list = NULL;
2109 X86CPUDefinition *def;
2112 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2113 CpuDefinitionInfoList *entry;
2114 CpuDefinitionInfo *info;
2116 def = &builtin_x86_defs[i];
2117 info = g_malloc0(sizeof(*info));
2118 info->name = g_strdup(def->name);
2120 entry = g_malloc0(sizeof(*entry));
2121 entry->value = info;
2122 entry->next = cpu_list;
2129 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2130 bool migratable_only)
2132 FeatureWordInfo *wi = &feature_word_info[w];
2135 if (kvm_enabled()) {
2136 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2139 } else if (tcg_enabled()) {
2140 r = wi->tcg_features;
2144 if (migratable_only) {
2145 r &= x86_cpu_get_migratable_flags(w);
2151 * Filters CPU feature words based on host availability of each feature.
2153 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2155 static int x86_cpu_filter_features(X86CPU *cpu)
2157 CPUX86State *env = &cpu->env;
2161 for (w = 0; w < FEATURE_WORDS; w++) {
2162 uint32_t host_feat =
2163 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2164 uint32_t requested_features = env->features[w];
2165 env->features[w] &= host_feat;
2166 cpu->filtered_features[w] = requested_features & ~env->features[w];
2167 if (cpu->filtered_features[w]) {
2168 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2169 report_unavailable_features(w, cpu->filtered_features[w]);
2178 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2181 for (pv = props; pv->prop; pv++) {
2185 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2190 /* Load data from X86CPUDefinition
2192 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2194 CPUX86State *env = &cpu->env;
2196 char host_vendor[CPUID_VENDOR_SZ + 1];
2199 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2200 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2201 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2202 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2203 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2204 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2205 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2206 for (w = 0; w < FEATURE_WORDS; w++) {
2207 env->features[w] = def->features[w];
2210 /* Special cases not set in the X86CPUDefinition structs: */
2211 if (kvm_enabled()) {
2212 if (!kvm_irqchip_in_kernel()) {
2213 x86_cpu_change_kvm_default("x2apic", "off");
2216 x86_cpu_apply_props(cpu, kvm_default_props);
2219 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2221 /* sysenter isn't supported in compatibility mode on AMD,
2222 * syscall isn't supported in compatibility mode on Intel.
2223 * Normally we advertise the actual CPU vendor, but you can
2224 * override this using the 'vendor' property if you want to use
2225 * KVM's sysenter/syscall emulation in compatibility mode and
2226 * when doing cross vendor migration
2228 vendor = def->vendor;
2229 if (kvm_enabled()) {
2230 uint32_t ebx = 0, ecx = 0, edx = 0;
2231 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2232 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2233 vendor = host_vendor;
2236 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2240 X86CPU *cpu_x86_init(const char *cpu_model)
2242 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2245 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2247 X86CPUDefinition *cpudef = data;
2248 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2250 xcc->cpu_def = cpudef;
2253 static void x86_register_cpudef_type(X86CPUDefinition *def)
2255 char *typename = x86_cpu_type_name(def->name);
2258 .parent = TYPE_X86_CPU,
2259 .class_init = x86_cpu_cpudef_class_init,
2267 #if !defined(CONFIG_USER_ONLY)
2269 void cpu_clear_apic_feature(CPUX86State *env)
2271 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2274 #endif /* !CONFIG_USER_ONLY */
2276 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2277 uint32_t *eax, uint32_t *ebx,
2278 uint32_t *ecx, uint32_t *edx)
2280 X86CPU *cpu = x86_env_get_cpu(env);
2281 CPUState *cs = CPU(cpu);
2283 /* test if maximum index reached */
2284 if (index & 0x80000000) {
2285 if (index > env->cpuid_xlevel) {
2286 if (env->cpuid_xlevel2 > 0) {
2287 /* Handle the Centaur's CPUID instruction. */
2288 if (index > env->cpuid_xlevel2) {
2289 index = env->cpuid_xlevel2;
2290 } else if (index < 0xC0000000) {
2291 index = env->cpuid_xlevel;
2294 /* Intel documentation states that invalid EAX input will
2295 * return the same information as EAX=cpuid_level
2296 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2298 index = env->cpuid_level;
2302 if (index > env->cpuid_level)
2303 index = env->cpuid_level;
2308 *eax = env->cpuid_level;
2309 *ebx = env->cpuid_vendor1;
2310 *edx = env->cpuid_vendor2;
2311 *ecx = env->cpuid_vendor3;
2314 *eax = env->cpuid_version;
2315 *ebx = (cpu->apic_id << 24) |
2316 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2317 *ecx = env->features[FEAT_1_ECX];
2318 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2319 *ecx |= CPUID_EXT_OSXSAVE;
2321 *edx = env->features[FEAT_1_EDX];
2322 if (cs->nr_cores * cs->nr_threads > 1) {
2323 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2328 /* cache info: needed for Pentium Pro compatibility */
2329 if (cpu->cache_info_passthrough) {
2330 host_cpuid(index, 0, eax, ebx, ecx, edx);
2333 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2336 *edx = (L1D_DESCRIPTOR << 16) | \
2337 (L1I_DESCRIPTOR << 8) | \
2341 /* cache info: needed for Core compatibility */
2342 if (cpu->cache_info_passthrough) {
2343 host_cpuid(index, count, eax, ebx, ecx, edx);
2344 *eax &= ~0xFC000000;
2348 case 0: /* L1 dcache info */
2349 *eax |= CPUID_4_TYPE_DCACHE | \
2350 CPUID_4_LEVEL(1) | \
2351 CPUID_4_SELF_INIT_LEVEL;
2352 *ebx = (L1D_LINE_SIZE - 1) | \
2353 ((L1D_PARTITIONS - 1) << 12) | \
2354 ((L1D_ASSOCIATIVITY - 1) << 22);
2355 *ecx = L1D_SETS - 1;
2356 *edx = CPUID_4_NO_INVD_SHARING;
2358 case 1: /* L1 icache info */
2359 *eax |= CPUID_4_TYPE_ICACHE | \
2360 CPUID_4_LEVEL(1) | \
2361 CPUID_4_SELF_INIT_LEVEL;
2362 *ebx = (L1I_LINE_SIZE - 1) | \
2363 ((L1I_PARTITIONS - 1) << 12) | \
2364 ((L1I_ASSOCIATIVITY - 1) << 22);
2365 *ecx = L1I_SETS - 1;
2366 *edx = CPUID_4_NO_INVD_SHARING;
2368 case 2: /* L2 cache info */
2369 *eax |= CPUID_4_TYPE_UNIFIED | \
2370 CPUID_4_LEVEL(2) | \
2371 CPUID_4_SELF_INIT_LEVEL;
2372 if (cs->nr_threads > 1) {
2373 *eax |= (cs->nr_threads - 1) << 14;
2375 *ebx = (L2_LINE_SIZE - 1) | \
2376 ((L2_PARTITIONS - 1) << 12) | \
2377 ((L2_ASSOCIATIVITY - 1) << 22);
2379 *edx = CPUID_4_NO_INVD_SHARING;
2381 default: /* end of info */
2390 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2391 if ((*eax & 31) && cs->nr_cores > 1) {
2392 *eax |= (cs->nr_cores - 1) << 26;
2396 /* mwait info: needed for Core compatibility */
2397 *eax = 0; /* Smallest monitor-line size in bytes */
2398 *ebx = 0; /* Largest monitor-line size in bytes */
2399 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2403 /* Thermal and Power Leaf */
2404 *eax = env->features[FEAT_6_EAX];
2410 /* Structured Extended Feature Flags Enumeration Leaf */
2412 *eax = 0; /* Maximum ECX value for sub-leaves */
2413 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2414 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2415 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2416 *ecx |= CPUID_7_0_ECX_OSPKE;
2418 *edx = 0; /* Reserved */
2427 /* Direct Cache Access Information Leaf */
2428 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2434 /* Architectural Performance Monitoring Leaf */
2435 if (kvm_enabled() && cpu->enable_pmu) {
2436 KVMState *s = cs->kvm_state;
2438 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2439 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2440 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2441 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2450 /* Extended Topology Enumeration Leaf */
2451 if (!cpu->enable_cpuid_0xb) {
2452 *eax = *ebx = *ecx = *edx = 0;
2456 *ecx = count & 0xff;
2457 *edx = cpu->apic_id;
2461 *eax = apicid_core_offset(smp_cores, smp_threads);
2463 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2466 *eax = apicid_pkg_offset(smp_cores, smp_threads);
2467 *ebx = smp_cores * smp_threads;
2468 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2473 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2476 assert(!(*eax & ~0x1f));
2477 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2480 KVMState *s = cs->kvm_state;
2484 /* Processor Extended State */
2489 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2492 if (kvm_enabled()) {
2493 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2495 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2502 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2503 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2504 if ((env->features[esa->feature] & esa->bits) == esa->bits
2505 && ((ena_mask >> i) & 1) != 0) {
2509 *edx |= 1u << (i - 32);
2511 *ecx = MAX(*ecx, esa->offset + esa->size);
2514 *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2516 } else if (count == 1) {
2517 *eax = env->features[FEAT_XSAVE];
2518 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2519 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2520 if ((env->features[esa->feature] & esa->bits) == esa->bits
2521 && ((ena_mask >> count) & 1) != 0) {
2529 *eax = env->cpuid_xlevel;
2530 *ebx = env->cpuid_vendor1;
2531 *edx = env->cpuid_vendor2;
2532 *ecx = env->cpuid_vendor3;
2535 *eax = env->cpuid_version;
2537 *ecx = env->features[FEAT_8000_0001_ECX];
2538 *edx = env->features[FEAT_8000_0001_EDX];
2540 /* The Linux kernel checks for the CMPLegacy bit and
2541 * discards multiple thread information if it is set.
2542 * So don't set it here for Intel to make Linux guests happy.
2544 if (cs->nr_cores * cs->nr_threads > 1) {
2545 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2546 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2547 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2548 *ecx |= 1 << 1; /* CmpLegacy bit */
2555 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2556 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2557 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2558 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2561 /* cache info (L1 cache) */
2562 if (cpu->cache_info_passthrough) {
2563 host_cpuid(index, 0, eax, ebx, ecx, edx);
2566 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2567 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2568 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2569 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2570 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2571 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2572 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2573 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2576 /* cache info (L2 cache) */
2577 if (cpu->cache_info_passthrough) {
2578 host_cpuid(index, 0, eax, ebx, ecx, edx);
2581 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2582 (L2_DTLB_2M_ENTRIES << 16) | \
2583 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2584 (L2_ITLB_2M_ENTRIES);
2585 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2586 (L2_DTLB_4K_ENTRIES << 16) | \
2587 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2588 (L2_ITLB_4K_ENTRIES);
2589 *ecx = (L2_SIZE_KB_AMD << 16) | \
2590 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2591 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2592 *edx = ((L3_SIZE_KB/512) << 18) | \
2593 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2594 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2600 *edx = env->features[FEAT_8000_0007_EDX];
2603 /* virtual & phys address size in low 2 bytes. */
2604 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2605 /* 64 bit processor, 48 bits virtual, configurable
2608 *eax = 0x00003000 + cpu->phys_bits;
2610 *eax = cpu->phys_bits;
2615 if (cs->nr_cores * cs->nr_threads > 1) {
2616 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2620 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2621 *eax = 0x00000001; /* SVM Revision */
2622 *ebx = 0x00000010; /* nr of ASIDs */
2624 *edx = env->features[FEAT_SVM]; /* optional features */
2633 *eax = env->cpuid_xlevel2;
2639 /* Support for VIA CPU's CPUID instruction */
2640 *eax = env->cpuid_version;
2643 *edx = env->features[FEAT_C000_0001_EDX];
2648 /* Reserved for the future, and now filled with zero */
2655 /* reserved values: zero */
2664 /* CPUClass::reset() */
2665 static void x86_cpu_reset(CPUState *s)
2667 X86CPU *cpu = X86_CPU(s);
2668 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2669 CPUX86State *env = &cpu->env;
2674 xcc->parent_reset(s);
2676 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2680 env->old_exception = -1;
2682 /* init to reset state */
2684 env->hflags2 |= HF2_GIF_MASK;
2686 cpu_x86_update_cr0(env, 0x60000010);
2687 env->a20_mask = ~0x0;
2688 env->smbase = 0x30000;
2690 env->idt.limit = 0xffff;
2691 env->gdt.limit = 0xffff;
2692 env->ldt.limit = 0xffff;
2693 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2694 env->tr.limit = 0xffff;
2695 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2697 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2698 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2699 DESC_R_MASK | DESC_A_MASK);
2700 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2701 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2703 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2704 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2706 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2707 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2709 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2710 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2712 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2713 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2717 env->regs[R_EDX] = env->cpuid_version;
2722 for (i = 0; i < 8; i++) {
2725 cpu_set_fpuc(env, 0x37f);
2727 env->mxcsr = 0x1f80;
2728 /* All units are in INIT state. */
2731 env->pat = 0x0007040600070406ULL;
2732 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2734 memset(env->dr, 0, sizeof(env->dr));
2735 env->dr[6] = DR6_FIXED_1;
2736 env->dr[7] = DR7_FIXED_1;
2737 cpu_breakpoint_remove_all(s, BP_CPU);
2738 cpu_watchpoint_remove_all(s, BP_CPU);
2741 xcr0 = XSTATE_FP_MASK;
2743 #ifdef CONFIG_USER_ONLY
2744 /* Enable all the features for user-mode. */
2745 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2746 xcr0 |= XSTATE_SSE_MASK;
2748 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2749 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2750 if ((env->features[esa->feature] & esa->bits) == esa->bits) {
2755 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2756 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2758 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2759 cr4 |= CR4_FSGSBASE_MASK;
2764 cpu_x86_update_cr4(env, cr4);
2767 * SDM 11.11.5 requires:
2768 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2769 * - IA32_MTRR_PHYSMASKn.V = 0
2770 * All other bits are undefined. For simplification, zero it all.
2772 env->mtrr_deftype = 0;
2773 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2774 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2776 #if !defined(CONFIG_USER_ONLY)
2777 /* We hard-wire the BSP to the first CPU. */
2778 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2780 s->halted = !cpu_is_bsp(cpu);
2782 if (kvm_enabled()) {
2783 kvm_arch_reset_vcpu(cpu);
2788 #ifndef CONFIG_USER_ONLY
2789 bool cpu_is_bsp(X86CPU *cpu)
2791 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2794 /* TODO: remove me, when reset over QOM tree is implemented */
2795 static void x86_cpu_machine_reset_cb(void *opaque)
2797 X86CPU *cpu = opaque;
2798 cpu_reset(CPU(cpu));
2802 static void mce_init(X86CPU *cpu)
2804 CPUX86State *cenv = &cpu->env;
2807 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2808 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2809 (CPUID_MCE | CPUID_MCA)) {
2810 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
2811 (cpu->enable_lmce ? MCG_LMCE_P : 0);
2812 cenv->mcg_ctl = ~(uint64_t)0;
2813 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2814 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2819 #ifndef CONFIG_USER_ONLY
2820 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2822 APICCommonState *apic;
2823 const char *apic_type = "apic";
2825 if (kvm_apic_in_kernel()) {
2826 apic_type = "kvm-apic";
2827 } else if (xen_enabled()) {
2828 apic_type = "xen-apic";
2831 cpu->apic_state = DEVICE(object_new(apic_type));
2833 object_property_add_child(OBJECT(cpu), "lapic",
2834 OBJECT(cpu->apic_state), &error_abort);
2835 object_unref(OBJECT(cpu->apic_state));
2837 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2838 /* TODO: convert to link<> */
2839 apic = APIC_COMMON(cpu->apic_state);
2841 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2844 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2846 APICCommonState *apic;
2847 static bool apic_mmio_map_once;
2849 if (cpu->apic_state == NULL) {
2852 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2855 /* Map APIC MMIO area */
2856 apic = APIC_COMMON(cpu->apic_state);
2857 if (!apic_mmio_map_once) {
2858 memory_region_add_subregion_overlap(get_system_memory(),
2860 MSR_IA32_APICBASE_BASE,
2863 apic_mmio_map_once = true;
2867 static void x86_cpu_machine_done(Notifier *n, void *unused)
2869 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2870 MemoryRegion *smram =
2871 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2874 cpu->smram = g_new(MemoryRegion, 1);
2875 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2876 smram, 0, 1ull << 32);
2877 memory_region_set_enabled(cpu->smram, false);
2878 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2882 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2887 /* Note: Only safe for use on x86(-64) hosts */
2888 static uint32_t x86_host_phys_bits(void)
2891 uint32_t host_phys_bits;
2893 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
2894 if (eax >= 0x80000008) {
2895 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
2896 /* Note: According to AMD doc 25481 rev 2.34 they have a field
2897 * at 23:16 that can specify a maximum physical address bits for
2898 * the guest that can override this value; but I've not seen
2899 * anything with that set.
2901 host_phys_bits = eax & 0xff;
2903 /* It's an odd 64 bit machine that doesn't have the leaf for
2904 * physical address bits; fall back to 36 that's most older
2907 host_phys_bits = 36;
2910 return host_phys_bits;
2913 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2914 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2915 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2916 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2917 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2918 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2919 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2921 CPUState *cs = CPU(dev);
2922 X86CPU *cpu = X86_CPU(dev);
2923 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2924 CPUX86State *env = &cpu->env;
2925 Error *local_err = NULL;
2926 static bool ht_warned;
2929 if (xcc->kvm_required && !kvm_enabled()) {
2930 char *name = x86_cpu_class_get_model_name(xcc);
2931 error_setg(&local_err, "CPU model '%s' requires KVM", name);
2936 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
2937 error_setg(errp, "apic-id property was not initialized properly");
2941 /*TODO: cpu->host_features incorrectly overwrites features
2942 * set using "feat=on|off". Once we fix this, we can convert
2943 * plus_features & minus_features to global properties
2944 * inside x86_cpu_parse_featurestr() too.
2946 if (cpu->host_features) {
2947 for (w = 0; w < FEATURE_WORDS; w++) {
2949 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2953 for (w = 0; w < FEATURE_WORDS; w++) {
2954 cpu->env.features[w] |= plus_features[w];
2955 cpu->env.features[w] &= ~minus_features[w];
2958 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2959 env->cpuid_level = 7;
2962 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2963 error_setg(&local_err,
2965 "Host doesn't support requested features" :
2966 "TCG doesn't support requested features");
2970 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2973 if (IS_AMD_CPU(env)) {
2974 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2975 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2976 & CPUID_EXT2_AMD_ALIASES);
2979 /* For 64bit systems think about the number of physical bits to present.
2980 * ideally this should be the same as the host; anything other than matching
2981 * the host can cause incorrect guest behaviour.
2982 * QEMU used to pick the magic value of 40 bits that corresponds to
2983 * consumer AMD devices but nothing else.
2985 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2986 if (kvm_enabled()) {
2987 uint32_t host_phys_bits = x86_host_phys_bits();
2990 if (cpu->host_phys_bits) {
2991 /* The user asked for us to use the host physical bits */
2992 cpu->phys_bits = host_phys_bits;
2995 /* Print a warning if the user set it to a value that's not the
2998 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3000 error_report("Warning: Host physical bits (%u)"
3001 " does not match phys-bits property (%u)",
3002 host_phys_bits, cpu->phys_bits);
3006 if (cpu->phys_bits &&
3007 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3008 cpu->phys_bits < 32)) {
3009 error_setg(errp, "phys-bits should be between 32 and %u "
3011 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3015 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3016 error_setg(errp, "TCG only supports phys-bits=%u",
3017 TCG_PHYS_ADDR_BITS);
3021 /* 0 means it was not explicitly set by the user (or by machine
3022 * compat_props or by the host code above). In this case, the default
3023 * is the value used by TCG (40).
3025 if (cpu->phys_bits == 0) {
3026 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3029 /* For 32 bit systems don't use the user set value, but keep
3030 * phys_bits consistent with what we tell the guest.
3032 if (cpu->phys_bits != 0) {
3033 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3037 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3038 cpu->phys_bits = 36;
3040 cpu->phys_bits = 32;
3043 cpu_exec_init(cs, &error_abort);
3045 if (tcg_enabled()) {
3049 #ifndef CONFIG_USER_ONLY
3050 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3052 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3053 x86_cpu_apic_create(cpu, &local_err);
3054 if (local_err != NULL) {
3062 #ifndef CONFIG_USER_ONLY
3063 if (tcg_enabled()) {
3064 AddressSpace *newas = g_new(AddressSpace, 1);
3066 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3067 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3069 /* Outer container... */
3070 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3071 memory_region_set_enabled(cpu->cpu_as_root, true);
3073 /* ... with two regions inside: normal system memory with low
3076 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3077 get_system_memory(), 0, ~0ull);
3078 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3079 memory_region_set_enabled(cpu->cpu_as_mem, true);
3080 address_space_init(newas, cpu->cpu_as_root, "CPU");
3082 cpu_address_space_init(cs, newas, 0);
3084 /* ... SMRAM with higher priority, linked from /machine/smram. */
3085 cpu->machine_done.notify = x86_cpu_machine_done;
3086 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3092 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3093 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3094 * based on inputs (sockets,cores,threads), it is still better to gives
3097 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3098 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3100 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3101 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3102 " -smp options properly.");
3106 x86_cpu_apic_realize(cpu, &local_err);
3107 if (local_err != NULL) {
3112 xcc->parent_realize(dev, &local_err);
3115 if (local_err != NULL) {
3116 error_propagate(errp, local_err);
3121 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3123 X86CPU *cpu = X86_CPU(dev);
3125 #ifndef CONFIG_USER_ONLY
3126 cpu_remove_sync(CPU(dev));
3127 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3130 if (cpu->apic_state) {
3131 object_unparent(OBJECT(cpu->apic_state));
3132 cpu->apic_state = NULL;
3136 typedef struct BitProperty {
3141 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3142 void *opaque, Error **errp)
3144 BitProperty *fp = opaque;
3145 bool value = (*fp->ptr & fp->mask) == fp->mask;
3146 visit_type_bool(v, name, &value, errp);
3149 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3150 void *opaque, Error **errp)
3152 DeviceState *dev = DEVICE(obj);
3153 BitProperty *fp = opaque;
3154 Error *local_err = NULL;
3157 if (dev->realized) {
3158 qdev_prop_set_after_realize(dev, name, errp);
3162 visit_type_bool(v, name, &value, &local_err);
3164 error_propagate(errp, local_err);
3169 *fp->ptr |= fp->mask;
3171 *fp->ptr &= ~fp->mask;
3175 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3178 BitProperty *prop = opaque;
3182 /* Register a boolean property to get/set a single bit in a uint32_t field.
3184 * The same property name can be registered multiple times to make it affect
3185 * multiple bits in the same FeatureWord. In that case, the getter will return
3186 * true only if all bits are set.
3188 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3189 const char *prop_name,
3195 uint32_t mask = (1UL << bitnr);
3197 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3200 assert(fp->ptr == field);
3203 fp = g_new0(BitProperty, 1);
3206 object_property_add(OBJECT(cpu), prop_name, "bool",
3207 x86_cpu_get_bit_prop,
3208 x86_cpu_set_bit_prop,
3209 x86_cpu_release_bit_prop, fp, &error_abort);
3213 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3217 Object *obj = OBJECT(cpu);
3220 FeatureWordInfo *fi = &feature_word_info[w];
3222 if (!fi->feat_names) {
3225 if (!fi->feat_names[bitnr]) {
3229 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3231 feat2prop(names[0]);
3232 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3234 for (i = 1; names[i]; i++) {
3235 feat2prop(names[i]);
3236 object_property_add_alias(obj, names[i], obj, names[0],
3243 static void x86_cpu_initfn(Object *obj)
3245 CPUState *cs = CPU(obj);
3246 X86CPU *cpu = X86_CPU(obj);
3247 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3248 CPUX86State *env = &cpu->env;
3253 object_property_add(obj, "family", "int",
3254 x86_cpuid_version_get_family,
3255 x86_cpuid_version_set_family, NULL, NULL, NULL);
3256 object_property_add(obj, "model", "int",
3257 x86_cpuid_version_get_model,
3258 x86_cpuid_version_set_model, NULL, NULL, NULL);
3259 object_property_add(obj, "stepping", "int",
3260 x86_cpuid_version_get_stepping,
3261 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3262 object_property_add_str(obj, "vendor",
3263 x86_cpuid_get_vendor,
3264 x86_cpuid_set_vendor, NULL);
3265 object_property_add_str(obj, "model-id",
3266 x86_cpuid_get_model_id,
3267 x86_cpuid_set_model_id, NULL);
3268 object_property_add(obj, "tsc-frequency", "int",
3269 x86_cpuid_get_tsc_freq,
3270 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3271 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3272 x86_cpu_get_feature_words,
3273 NULL, NULL, (void *)env->features, NULL);
3274 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3275 x86_cpu_get_feature_words,
3276 NULL, NULL, (void *)cpu->filtered_features, NULL);
3278 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3280 for (w = 0; w < FEATURE_WORDS; w++) {
3283 for (bitnr = 0; bitnr < 32; bitnr++) {
3284 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3288 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3291 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3293 X86CPU *cpu = X86_CPU(cs);
3295 return cpu->apic_id;
3298 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3300 X86CPU *cpu = X86_CPU(cs);
3302 return cpu->env.cr[0] & CR0_PG_MASK;
3305 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3307 X86CPU *cpu = X86_CPU(cs);
3309 cpu->env.eip = value;
3312 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3314 X86CPU *cpu = X86_CPU(cs);
3316 cpu->env.eip = tb->pc - tb->cs_base;
3319 static bool x86_cpu_has_work(CPUState *cs)
3321 X86CPU *cpu = X86_CPU(cs);
3322 CPUX86State *env = &cpu->env;
3324 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3325 CPU_INTERRUPT_POLL)) &&
3326 (env->eflags & IF_MASK)) ||
3327 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3328 CPU_INTERRUPT_INIT |
3329 CPU_INTERRUPT_SIPI |
3330 CPU_INTERRUPT_MCE)) ||
3331 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3332 !(env->hflags & HF_SMM_MASK));
3335 static Property x86_cpu_properties[] = {
3336 #ifdef CONFIG_USER_ONLY
3337 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3338 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
3339 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
3340 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
3341 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
3343 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
3344 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
3345 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
3346 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
3348 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3349 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3350 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3351 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3352 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3353 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3354 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3355 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3356 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3357 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3358 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3359 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3360 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3361 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3362 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
3363 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
3364 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
3365 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3366 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3367 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3368 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3369 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3370 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
3371 DEFINE_PROP_END_OF_LIST()
3374 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3376 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3377 CPUClass *cc = CPU_CLASS(oc);
3378 DeviceClass *dc = DEVICE_CLASS(oc);
3380 xcc->parent_realize = dc->realize;
3381 dc->realize = x86_cpu_realizefn;
3382 dc->unrealize = x86_cpu_unrealizefn;
3383 dc->props = x86_cpu_properties;
3385 xcc->parent_reset = cc->reset;
3386 cc->reset = x86_cpu_reset;
3387 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3389 cc->class_by_name = x86_cpu_class_by_name;
3390 cc->parse_features = x86_cpu_parse_featurestr;
3391 cc->has_work = x86_cpu_has_work;
3392 cc->do_interrupt = x86_cpu_do_interrupt;
3393 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3394 cc->dump_state = x86_cpu_dump_state;
3395 cc->set_pc = x86_cpu_set_pc;
3396 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3397 cc->gdb_read_register = x86_cpu_gdb_read_register;
3398 cc->gdb_write_register = x86_cpu_gdb_write_register;
3399 cc->get_arch_id = x86_cpu_get_arch_id;
3400 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3401 #ifdef CONFIG_USER_ONLY
3402 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3404 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3405 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3406 cc->write_elf64_note = x86_cpu_write_elf64_note;
3407 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3408 cc->write_elf32_note = x86_cpu_write_elf32_note;
3409 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3410 cc->vmsd = &vmstate_x86_cpu;
3412 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3413 #ifndef CONFIG_USER_ONLY
3414 cc->debug_excp_handler = breakpoint_handler;
3416 cc->cpu_exec_enter = x86_cpu_exec_enter;
3417 cc->cpu_exec_exit = x86_cpu_exec_exit;
3419 dc->cannot_instantiate_with_device_add_yet = false;
3421 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3422 * object in cpus -> dangling pointer after final object_unref().
3424 dc->cannot_destroy_with_object_finalize_yet = true;
3427 static const TypeInfo x86_cpu_type_info = {
3428 .name = TYPE_X86_CPU,
3430 .instance_size = sizeof(X86CPU),
3431 .instance_init = x86_cpu_initfn,
3433 .class_size = sizeof(X86CPUClass),
3434 .class_init = x86_cpu_common_class_init,
3437 static void x86_cpu_register_types(void)
3441 type_register_static(&x86_cpu_type_info);
3442 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3443 x86_register_cpudef_type(&builtin_x86_defs[i]);
3446 type_register_static(&host_x86_cpu_type_info);
3450 type_init(x86_cpu_register_types)