2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32 #include "qapi/qmp/qstring.h"
33 #include "qapi/qmp/qdict.h"
34 #include "qapi/qmp/qbool.h"
35 #include "qapi/qmp/qint.h"
36 #include "qapi/qmp/qfloat.h"
38 #include "qapi-types.h"
39 #include "qapi-visit.h"
40 #include "qapi/visitor.h"
41 #include "qom/qom-qobject.h"
42 #include "sysemu/arch_init.h"
44 #if defined(CONFIG_KVM)
45 #include <linux/kvm_para.h>
48 #include "sysemu/sysemu.h"
49 #include "hw/qdev-properties.h"
50 #include "hw/i386/topology.h"
51 #ifndef CONFIG_USER_ONLY
52 #include "exec/address-spaces.h"
54 #include "hw/xen/xen.h"
55 #include "hw/i386/apic_internal.h"
59 /* Cache topology CPUID constants: */
61 /* CPUID Leaf 2 Descriptors */
63 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
64 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
65 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
66 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
69 /* CPUID Leaf 4 constants: */
72 #define CPUID_4_TYPE_DCACHE 1
73 #define CPUID_4_TYPE_ICACHE 2
74 #define CPUID_4_TYPE_UNIFIED 3
76 #define CPUID_4_LEVEL(l) ((l) << 5)
78 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
79 #define CPUID_4_FULLY_ASSOC (1 << 9)
82 #define CPUID_4_NO_INVD_SHARING (1 << 0)
83 #define CPUID_4_INCLUSIVE (1 << 1)
84 #define CPUID_4_COMPLEX_IDX (1 << 2)
86 #define ASSOC_FULL 0xFF
88 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
89 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
99 a == ASSOC_FULL ? 0xF : \
100 0 /* invalid value */)
103 /* Definitions of the hardcoded cache entries we expose: */
106 #define L1D_LINE_SIZE 64
107 #define L1D_ASSOCIATIVITY 8
109 #define L1D_PARTITIONS 1
110 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
111 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
112 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
113 #define L1D_LINES_PER_TAG 1
114 #define L1D_SIZE_KB_AMD 64
115 #define L1D_ASSOCIATIVITY_AMD 2
117 /* L1 instruction cache: */
118 #define L1I_LINE_SIZE 64
119 #define L1I_ASSOCIATIVITY 8
121 #define L1I_PARTITIONS 1
122 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
123 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
124 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
125 #define L1I_LINES_PER_TAG 1
126 #define L1I_SIZE_KB_AMD 64
127 #define L1I_ASSOCIATIVITY_AMD 2
129 /* Level 2 unified cache: */
130 #define L2_LINE_SIZE 64
131 #define L2_ASSOCIATIVITY 16
133 #define L2_PARTITIONS 1
134 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
135 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
136 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
137 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
138 #define L2_LINES_PER_TAG 1
139 #define L2_SIZE_KB_AMD 512
141 /* Level 3 unified cache: */
142 #define L3_SIZE_KB 0 /* disabled */
143 #define L3_ASSOCIATIVITY 0 /* disabled */
144 #define L3_LINES_PER_TAG 0 /* disabled */
145 #define L3_LINE_SIZE 0 /* disabled */
146 #define L3_N_LINE_SIZE 64
147 #define L3_N_ASSOCIATIVITY 16
148 #define L3_N_SETS 16384
149 #define L3_N_PARTITIONS 1
150 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
151 #define L3_N_LINES_PER_TAG 1
152 #define L3_N_SIZE_KB_AMD 16384
154 /* TLB definitions: */
156 #define L1_DTLB_2M_ASSOC 1
157 #define L1_DTLB_2M_ENTRIES 255
158 #define L1_DTLB_4K_ASSOC 1
159 #define L1_DTLB_4K_ENTRIES 255
161 #define L1_ITLB_2M_ASSOC 1
162 #define L1_ITLB_2M_ENTRIES 255
163 #define L1_ITLB_4K_ASSOC 1
164 #define L1_ITLB_4K_ENTRIES 255
166 #define L2_DTLB_2M_ASSOC 0 /* disabled */
167 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
168 #define L2_DTLB_4K_ASSOC 4
169 #define L2_DTLB_4K_ENTRIES 512
171 #define L2_ITLB_2M_ASSOC 0 /* disabled */
172 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
173 #define L2_ITLB_4K_ASSOC 4
174 #define L2_ITLB_4K_ENTRIES 512
178 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
179 uint32_t vendor2, uint32_t vendor3)
182 for (i = 0; i < 4; i++) {
183 dst[i] = vendor1 >> (8 * i);
184 dst[i + 4] = vendor2 >> (8 * i);
185 dst[i + 8] = vendor3 >> (8 * i);
187 dst[CPUID_VENDOR_SZ] = '\0';
190 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
191 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
192 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
193 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
194 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
195 CPUID_PSE36 | CPUID_FXSR)
196 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
197 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
198 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
199 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
200 CPUID_PAE | CPUID_SEP | CPUID_APIC)
202 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
203 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
204 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
205 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
206 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
207 /* partly implemented:
208 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
210 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
211 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
212 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
213 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
214 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
215 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
217 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
218 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
219 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
220 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
221 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
224 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
226 #define TCG_EXT2_X86_64_FEATURES 0
229 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
230 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
231 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
232 TCG_EXT2_X86_64_FEATURES)
233 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
234 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
235 #define TCG_EXT4_FEATURES 0
236 #define TCG_SVM_FEATURES 0
237 #define TCG_KVM_FEATURES 0
238 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
239 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
240 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
241 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
244 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
245 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
246 CPUID_7_0_EBX_RDSEED */
247 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
249 #define TCG_7_0_EDX_FEATURES 0
250 #define TCG_APM_FEATURES 0
251 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
252 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
254 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
256 typedef struct FeatureWordInfo {
257 /* feature flags names are taken from "Intel Processor Identification and
258 * the CPUID Instruction" and AMD's "CPUID Specification".
259 * In cases of disagreement between feature naming conventions,
260 * aliases may be added.
262 const char *feat_names[32];
263 uint32_t cpuid_eax; /* Input EAX for CPUID */
264 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
265 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
266 int cpuid_reg; /* output register (R_* constant) */
267 uint32_t tcg_features; /* Feature flags supported by TCG */
268 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
269 uint32_t migratable_flags; /* Feature flags known to be migratable */
272 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
275 "fpu", "vme", "de", "pse",
276 "tsc", "msr", "pae", "mce",
277 "cx8", "apic", NULL, "sep",
278 "mtrr", "pge", "mca", "cmov",
279 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
280 NULL, "ds" /* Intel dts */, "acpi", "mmx",
281 "fxsr", "sse", "sse2", "ss",
282 "ht" /* Intel htt */, "tm", "ia64", "pbe",
284 .cpuid_eax = 1, .cpuid_reg = R_EDX,
285 .tcg_features = TCG_FEATURES,
289 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
290 "ds-cpl", "vmx", "smx", "est",
291 "tm2", "ssse3", "cid", NULL,
292 "fma", "cx16", "xtpr", "pdcm",
293 NULL, "pcid", "dca", "sse4.1",
294 "sse4.2", "x2apic", "movbe", "popcnt",
295 "tsc-deadline", "aes", "xsave", "osxsave",
296 "avx", "f16c", "rdrand", "hypervisor",
298 .cpuid_eax = 1, .cpuid_reg = R_ECX,
299 .tcg_features = TCG_EXT_FEATURES,
301 /* Feature names that are already defined on feature_name[] but
302 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
303 * names on feat_names below. They are copied automatically
304 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
306 [FEAT_8000_0001_EDX] = {
308 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
309 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
310 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
311 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
312 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
313 "nx", NULL, "mmxext", NULL /* mmx */,
314 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
315 NULL, "lm", "3dnowext", "3dnow",
317 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
318 .tcg_features = TCG_EXT2_FEATURES,
320 [FEAT_8000_0001_ECX] = {
322 "lahf-lm", "cmp-legacy", "svm", "extapic",
323 "cr8legacy", "abm", "sse4a", "misalignsse",
324 "3dnowprefetch", "osvw", "ibs", "xop",
325 "skinit", "wdt", NULL, "lwp",
326 "fma4", "tce", NULL, "nodeid-msr",
327 NULL, "tbm", "topoext", "perfctr-core",
328 "perfctr-nb", NULL, NULL, NULL,
329 NULL, NULL, NULL, NULL,
331 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
332 .tcg_features = TCG_EXT3_FEATURES,
334 [FEAT_C000_0001_EDX] = {
336 NULL, NULL, "xstore", "xstore-en",
337 NULL, NULL, "xcrypt", "xcrypt-en",
338 "ace2", "ace2-en", "phe", "phe-en",
339 "pmm", "pmm-en", NULL, NULL,
340 NULL, NULL, NULL, NULL,
341 NULL, NULL, NULL, NULL,
342 NULL, NULL, NULL, NULL,
343 NULL, NULL, NULL, NULL,
345 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
346 .tcg_features = TCG_EXT4_FEATURES,
350 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
351 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
352 NULL, NULL, NULL, NULL,
353 NULL, NULL, NULL, NULL,
354 NULL, NULL, NULL, NULL,
355 NULL, NULL, NULL, NULL,
356 "kvmclock-stable-bit", NULL, NULL, NULL,
357 NULL, NULL, NULL, NULL,
359 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
360 .tcg_features = TCG_KVM_FEATURES,
362 [FEAT_HYPERV_EAX] = {
364 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
365 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
366 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
367 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
368 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
369 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
370 NULL, NULL, NULL, NULL,
371 NULL, NULL, NULL, NULL,
372 NULL, NULL, NULL, NULL,
373 NULL, NULL, NULL, NULL,
374 NULL, NULL, NULL, NULL,
376 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
378 [FEAT_HYPERV_EBX] = {
380 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
381 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
382 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
383 NULL /* hv_create_port */, NULL /* hv_connect_port */,
384 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
385 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
387 NULL, NULL, NULL, NULL,
388 NULL, NULL, NULL, NULL,
389 NULL, NULL, NULL, NULL,
390 NULL, NULL, NULL, NULL,
392 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
394 [FEAT_HYPERV_EDX] = {
396 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
397 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
398 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
400 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
401 NULL, NULL, NULL, NULL,
402 NULL, NULL, NULL, NULL,
403 NULL, NULL, NULL, NULL,
404 NULL, NULL, NULL, NULL,
405 NULL, NULL, NULL, NULL,
407 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
411 "npt", "lbrv", "svm-lock", "nrip-save",
412 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
413 NULL, NULL, "pause-filter", NULL,
414 "pfthreshold", NULL, NULL, NULL,
415 NULL, NULL, NULL, NULL,
416 NULL, NULL, NULL, NULL,
417 NULL, NULL, NULL, NULL,
418 NULL, NULL, NULL, NULL,
420 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
421 .tcg_features = TCG_SVM_FEATURES,
425 "fsgsbase", "tsc-adjust", NULL, "bmi1",
426 "hle", "avx2", NULL, "smep",
427 "bmi2", "erms", "invpcid", "rtm",
428 NULL, NULL, "mpx", NULL,
429 "avx512f", "avx512dq", "rdseed", "adx",
430 "smap", "avx512ifma", "pcommit", "clflushopt",
431 "clwb", NULL, "avx512pf", "avx512er",
432 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
435 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
437 .tcg_features = TCG_7_0_EBX_FEATURES,
441 NULL, "avx512vbmi", "umip", "pku",
442 "ospke", NULL, NULL, NULL,
443 NULL, NULL, NULL, NULL,
444 NULL, NULL, "avx512-vpopcntdq", NULL,
445 "la57", NULL, NULL, NULL,
446 NULL, NULL, "rdpid", NULL,
447 NULL, NULL, NULL, NULL,
448 NULL, NULL, NULL, NULL,
451 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
453 .tcg_features = TCG_7_0_ECX_FEATURES,
457 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
458 NULL, NULL, NULL, NULL,
459 NULL, NULL, NULL, NULL,
460 NULL, NULL, NULL, NULL,
461 NULL, NULL, NULL, NULL,
462 NULL, NULL, NULL, NULL,
463 NULL, NULL, NULL, NULL,
464 NULL, NULL, NULL, NULL,
467 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
469 .tcg_features = TCG_7_0_EDX_FEATURES,
471 [FEAT_8000_0007_EDX] = {
473 NULL, NULL, NULL, NULL,
474 NULL, NULL, NULL, NULL,
475 "invtsc", NULL, NULL, NULL,
476 NULL, NULL, NULL, NULL,
477 NULL, NULL, NULL, NULL,
478 NULL, NULL, NULL, NULL,
479 NULL, NULL, NULL, NULL,
480 NULL, NULL, NULL, NULL,
482 .cpuid_eax = 0x80000007,
484 .tcg_features = TCG_APM_FEATURES,
485 .unmigratable_flags = CPUID_APM_INVTSC,
489 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
490 NULL, NULL, NULL, NULL,
491 NULL, NULL, NULL, NULL,
492 NULL, NULL, NULL, NULL,
493 NULL, NULL, NULL, NULL,
494 NULL, NULL, NULL, NULL,
495 NULL, NULL, NULL, NULL,
496 NULL, NULL, NULL, NULL,
499 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
501 .tcg_features = TCG_XSAVE_FEATURES,
505 NULL, NULL, "arat", NULL,
506 NULL, NULL, NULL, NULL,
507 NULL, NULL, NULL, NULL,
508 NULL, NULL, NULL, NULL,
509 NULL, NULL, NULL, NULL,
510 NULL, NULL, NULL, NULL,
511 NULL, NULL, NULL, NULL,
512 NULL, NULL, NULL, NULL,
514 .cpuid_eax = 6, .cpuid_reg = R_EAX,
515 .tcg_features = TCG_6_EAX_FEATURES,
517 [FEAT_XSAVE_COMP_LO] = {
519 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
522 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
523 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
524 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
527 [FEAT_XSAVE_COMP_HI] = {
529 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
535 typedef struct X86RegisterInfo32 {
536 /* Name of register */
538 /* QAPI enum value register */
539 X86CPURegister32 qapi_enum;
542 #define REGISTER(reg) \
543 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
544 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
556 typedef struct ExtSaveArea {
557 uint32_t feature, bits;
558 uint32_t offset, size;
561 static const ExtSaveArea x86_ext_save_areas[] = {
563 /* x87 FP state component is always enabled if XSAVE is supported */
564 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
565 /* x87 state is in the legacy region of the XSAVE area */
567 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
570 /* SSE state component is always enabled if XSAVE is supported */
571 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
572 /* SSE state is in the legacy region of the XSAVE area */
574 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
577 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
578 .offset = offsetof(X86XSaveArea, avx_state),
579 .size = sizeof(XSaveAVX) },
580 [XSTATE_BNDREGS_BIT] =
581 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
582 .offset = offsetof(X86XSaveArea, bndreg_state),
583 .size = sizeof(XSaveBNDREG) },
584 [XSTATE_BNDCSR_BIT] =
585 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
586 .offset = offsetof(X86XSaveArea, bndcsr_state),
587 .size = sizeof(XSaveBNDCSR) },
588 [XSTATE_OPMASK_BIT] =
589 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
590 .offset = offsetof(X86XSaveArea, opmask_state),
591 .size = sizeof(XSaveOpmask) },
592 [XSTATE_ZMM_Hi256_BIT] =
593 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
594 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
595 .size = sizeof(XSaveZMM_Hi256) },
596 [XSTATE_Hi16_ZMM_BIT] =
597 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
598 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
599 .size = sizeof(XSaveHi16_ZMM) },
601 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
602 .offset = offsetof(X86XSaveArea, pkru_state),
603 .size = sizeof(XSavePKRU) },
606 static uint32_t xsave_area_size(uint64_t mask)
611 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
612 const ExtSaveArea *esa = &x86_ext_save_areas[i];
613 if ((mask >> i) & 1) {
614 ret = MAX(ret, esa->offset + esa->size);
620 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
622 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
623 cpu->env.features[FEAT_XSAVE_COMP_LO];
626 const char *get_register_name_32(unsigned int reg)
628 if (reg >= CPU_NB_REGS32) {
631 return x86_reg_info_32[reg].name;
635 * Returns the set of feature flags that are supported and migratable by
636 * QEMU, for a given FeatureWord.
638 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
640 FeatureWordInfo *wi = &feature_word_info[w];
644 for (i = 0; i < 32; i++) {
645 uint32_t f = 1U << i;
647 /* If the feature name is known, it is implicitly considered migratable,
648 * unless it is explicitly set in unmigratable_flags */
649 if ((wi->migratable_flags & f) ||
650 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
657 void host_cpuid(uint32_t function, uint32_t count,
658 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
664 : "=a"(vec[0]), "=b"(vec[1]),
665 "=c"(vec[2]), "=d"(vec[3])
666 : "0"(function), "c"(count) : "cc");
667 #elif defined(__i386__)
668 asm volatile("pusha \n\t"
670 "mov %%eax, 0(%2) \n\t"
671 "mov %%ebx, 4(%2) \n\t"
672 "mov %%ecx, 8(%2) \n\t"
673 "mov %%edx, 12(%2) \n\t"
675 : : "a"(function), "c"(count), "S"(vec)
691 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
693 uint32_t eax, ebx, ecx, edx;
695 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
696 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
698 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
700 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
703 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
706 *stepping = eax & 0x0F;
710 /* CPU class name definitions: */
712 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
713 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
715 /* Return type name for a given CPU model name
716 * Caller is responsible for freeing the returned string.
718 static char *x86_cpu_type_name(const char *model_name)
720 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
723 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
728 if (cpu_model == NULL) {
732 typename = x86_cpu_type_name(cpu_model);
733 oc = object_class_by_name(typename);
738 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
740 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
741 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
742 return g_strndup(class_name,
743 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
746 struct X86CPUDefinition {
750 /* vendor is zero-terminated, 12 character ASCII string */
751 char vendor[CPUID_VENDOR_SZ + 1];
755 FeatureWordArray features;
759 static X86CPUDefinition builtin_x86_defs[] = {
763 .vendor = CPUID_VENDOR_AMD,
767 .features[FEAT_1_EDX] =
769 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
771 .features[FEAT_1_ECX] =
772 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
773 .features[FEAT_8000_0001_EDX] =
774 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
775 .features[FEAT_8000_0001_ECX] =
776 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
777 .xlevel = 0x8000000A,
778 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
783 .vendor = CPUID_VENDOR_AMD,
787 /* Missing: CPUID_HT */
788 .features[FEAT_1_EDX] =
790 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
791 CPUID_PSE36 | CPUID_VME,
792 .features[FEAT_1_ECX] =
793 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
795 .features[FEAT_8000_0001_EDX] =
796 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
797 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
798 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
799 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
801 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
802 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
803 .features[FEAT_8000_0001_ECX] =
804 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
805 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
806 /* Missing: CPUID_SVM_LBRV */
807 .features[FEAT_SVM] =
809 .xlevel = 0x8000001A,
810 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
815 .vendor = CPUID_VENDOR_INTEL,
819 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
820 .features[FEAT_1_EDX] =
822 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
823 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
824 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
825 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
826 .features[FEAT_1_ECX] =
827 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
829 .features[FEAT_8000_0001_EDX] =
830 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
831 .features[FEAT_8000_0001_ECX] =
833 .xlevel = 0x80000008,
834 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
839 .vendor = CPUID_VENDOR_INTEL,
843 /* Missing: CPUID_HT */
844 .features[FEAT_1_EDX] =
845 PPRO_FEATURES | CPUID_VME |
846 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
848 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
849 .features[FEAT_1_ECX] =
850 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
851 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
852 .features[FEAT_8000_0001_EDX] =
853 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
854 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
855 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
856 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
857 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
858 .features[FEAT_8000_0001_ECX] =
860 .xlevel = 0x80000008,
861 .model_id = "Common KVM processor"
866 .vendor = CPUID_VENDOR_INTEL,
870 .features[FEAT_1_EDX] =
872 .features[FEAT_1_ECX] =
874 .xlevel = 0x80000004,
875 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
880 .vendor = CPUID_VENDOR_INTEL,
884 .features[FEAT_1_EDX] =
885 PPRO_FEATURES | CPUID_VME |
886 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
887 .features[FEAT_1_ECX] =
889 .features[FEAT_8000_0001_ECX] =
891 .xlevel = 0x80000008,
892 .model_id = "Common 32-bit KVM processor"
897 .vendor = CPUID_VENDOR_INTEL,
901 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
902 .features[FEAT_1_EDX] =
903 PPRO_FEATURES | CPUID_VME |
904 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
906 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
907 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
908 .features[FEAT_1_ECX] =
909 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
910 .features[FEAT_8000_0001_EDX] =
912 .xlevel = 0x80000008,
913 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
918 .vendor = CPUID_VENDOR_INTEL,
922 .features[FEAT_1_EDX] =
929 .vendor = CPUID_VENDOR_INTEL,
933 .features[FEAT_1_EDX] =
940 .vendor = CPUID_VENDOR_INTEL,
944 .features[FEAT_1_EDX] =
951 .vendor = CPUID_VENDOR_INTEL,
955 .features[FEAT_1_EDX] =
962 .vendor = CPUID_VENDOR_AMD,
966 .features[FEAT_1_EDX] =
967 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
969 .features[FEAT_8000_0001_EDX] =
970 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
971 .xlevel = 0x80000008,
972 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
977 .vendor = CPUID_VENDOR_INTEL,
981 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
982 .features[FEAT_1_EDX] =
984 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
985 CPUID_ACPI | CPUID_SS,
986 /* Some CPUs got no CPUID_SEP */
987 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
989 .features[FEAT_1_ECX] =
990 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
992 .features[FEAT_8000_0001_EDX] =
994 .features[FEAT_8000_0001_ECX] =
996 .xlevel = 0x80000008,
997 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1002 .vendor = CPUID_VENDOR_INTEL,
1006 .features[FEAT_1_EDX] =
1007 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1008 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1009 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1010 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1011 CPUID_DE | CPUID_FP87,
1012 .features[FEAT_1_ECX] =
1013 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1014 .features[FEAT_8000_0001_EDX] =
1015 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1016 .features[FEAT_8000_0001_ECX] =
1018 .xlevel = 0x80000008,
1019 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1024 .vendor = CPUID_VENDOR_INTEL,
1028 .features[FEAT_1_EDX] =
1029 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1030 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1031 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1032 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1033 CPUID_DE | CPUID_FP87,
1034 .features[FEAT_1_ECX] =
1035 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1037 .features[FEAT_8000_0001_EDX] =
1038 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1039 .features[FEAT_8000_0001_ECX] =
1041 .xlevel = 0x80000008,
1042 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1047 .vendor = CPUID_VENDOR_INTEL,
1051 .features[FEAT_1_EDX] =
1052 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1053 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1054 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1055 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1056 CPUID_DE | CPUID_FP87,
1057 .features[FEAT_1_ECX] =
1058 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1059 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1060 .features[FEAT_8000_0001_EDX] =
1061 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1062 .features[FEAT_8000_0001_ECX] =
1064 .xlevel = 0x80000008,
1065 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1070 .vendor = CPUID_VENDOR_INTEL,
1074 .features[FEAT_1_EDX] =
1075 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1076 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1077 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1078 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1079 CPUID_DE | CPUID_FP87,
1080 .features[FEAT_1_ECX] =
1081 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1082 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1083 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1084 .features[FEAT_8000_0001_EDX] =
1085 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1086 .features[FEAT_8000_0001_ECX] =
1088 .features[FEAT_6_EAX] =
1090 .xlevel = 0x80000008,
1091 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1094 .name = "SandyBridge",
1096 .vendor = CPUID_VENDOR_INTEL,
1100 .features[FEAT_1_EDX] =
1101 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1102 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1103 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1104 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1105 CPUID_DE | CPUID_FP87,
1106 .features[FEAT_1_ECX] =
1107 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1108 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1109 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1110 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1112 .features[FEAT_8000_0001_EDX] =
1113 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1115 .features[FEAT_8000_0001_ECX] =
1117 .features[FEAT_XSAVE] =
1118 CPUID_XSAVE_XSAVEOPT,
1119 .features[FEAT_6_EAX] =
1121 .xlevel = 0x80000008,
1122 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1125 .name = "IvyBridge",
1127 .vendor = CPUID_VENDOR_INTEL,
1131 .features[FEAT_1_EDX] =
1132 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1133 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1134 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1135 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1136 CPUID_DE | CPUID_FP87,
1137 .features[FEAT_1_ECX] =
1138 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1139 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1140 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1141 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1142 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1143 .features[FEAT_7_0_EBX] =
1144 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1146 .features[FEAT_8000_0001_EDX] =
1147 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1149 .features[FEAT_8000_0001_ECX] =
1151 .features[FEAT_XSAVE] =
1152 CPUID_XSAVE_XSAVEOPT,
1153 .features[FEAT_6_EAX] =
1155 .xlevel = 0x80000008,
1156 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1159 .name = "Haswell-noTSX",
1161 .vendor = CPUID_VENDOR_INTEL,
1165 .features[FEAT_1_EDX] =
1166 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1167 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1168 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1169 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1170 CPUID_DE | CPUID_FP87,
1171 .features[FEAT_1_ECX] =
1172 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1173 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1174 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1175 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1176 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1177 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1178 .features[FEAT_8000_0001_EDX] =
1179 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1181 .features[FEAT_8000_0001_ECX] =
1182 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1183 .features[FEAT_7_0_EBX] =
1184 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1185 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1186 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1187 .features[FEAT_XSAVE] =
1188 CPUID_XSAVE_XSAVEOPT,
1189 .features[FEAT_6_EAX] =
1191 .xlevel = 0x80000008,
1192 .model_id = "Intel Core Processor (Haswell, no TSX)",
1196 .vendor = CPUID_VENDOR_INTEL,
1200 .features[FEAT_1_EDX] =
1201 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1202 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1203 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1204 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1205 CPUID_DE | CPUID_FP87,
1206 .features[FEAT_1_ECX] =
1207 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1208 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1209 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1210 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1211 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1212 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1213 .features[FEAT_8000_0001_EDX] =
1214 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1216 .features[FEAT_8000_0001_ECX] =
1217 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1218 .features[FEAT_7_0_EBX] =
1219 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1220 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1221 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1223 .features[FEAT_XSAVE] =
1224 CPUID_XSAVE_XSAVEOPT,
1225 .features[FEAT_6_EAX] =
1227 .xlevel = 0x80000008,
1228 .model_id = "Intel Core Processor (Haswell)",
1231 .name = "Broadwell-noTSX",
1233 .vendor = CPUID_VENDOR_INTEL,
1237 .features[FEAT_1_EDX] =
1238 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1239 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1240 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1241 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1242 CPUID_DE | CPUID_FP87,
1243 .features[FEAT_1_ECX] =
1244 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1245 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1246 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1247 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1248 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1249 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1250 .features[FEAT_8000_0001_EDX] =
1251 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1253 .features[FEAT_8000_0001_ECX] =
1254 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1255 .features[FEAT_7_0_EBX] =
1256 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1257 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1258 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1259 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1261 .features[FEAT_XSAVE] =
1262 CPUID_XSAVE_XSAVEOPT,
1263 .features[FEAT_6_EAX] =
1265 .xlevel = 0x80000008,
1266 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1269 .name = "Broadwell",
1271 .vendor = CPUID_VENDOR_INTEL,
1275 .features[FEAT_1_EDX] =
1276 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1277 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1278 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1279 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1280 CPUID_DE | CPUID_FP87,
1281 .features[FEAT_1_ECX] =
1282 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1283 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1284 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1285 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1286 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1287 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1288 .features[FEAT_8000_0001_EDX] =
1289 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1291 .features[FEAT_8000_0001_ECX] =
1292 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1293 .features[FEAT_7_0_EBX] =
1294 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1295 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1296 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1297 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1299 .features[FEAT_XSAVE] =
1300 CPUID_XSAVE_XSAVEOPT,
1301 .features[FEAT_6_EAX] =
1303 .xlevel = 0x80000008,
1304 .model_id = "Intel Core Processor (Broadwell)",
1307 .name = "Skylake-Client",
1309 .vendor = CPUID_VENDOR_INTEL,
1313 .features[FEAT_1_EDX] =
1314 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1315 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1316 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1317 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1318 CPUID_DE | CPUID_FP87,
1319 .features[FEAT_1_ECX] =
1320 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1321 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1322 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1323 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1324 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1325 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1326 .features[FEAT_8000_0001_EDX] =
1327 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1329 .features[FEAT_8000_0001_ECX] =
1330 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1331 .features[FEAT_7_0_EBX] =
1332 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1333 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1334 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1335 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1336 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1337 /* Missing: XSAVES (not supported by some Linux versions,
1338 * including v4.1 to v4.6).
1339 * KVM doesn't yet expose any XSAVES state save component,
1340 * and the only one defined in Skylake (processor tracing)
1341 * probably will block migration anyway.
1343 .features[FEAT_XSAVE] =
1344 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1345 CPUID_XSAVE_XGETBV1,
1346 .features[FEAT_6_EAX] =
1348 .xlevel = 0x80000008,
1349 .model_id = "Intel Core Processor (Skylake)",
1352 .name = "Opteron_G1",
1354 .vendor = CPUID_VENDOR_AMD,
1358 .features[FEAT_1_EDX] =
1359 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1360 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1361 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1362 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1363 CPUID_DE | CPUID_FP87,
1364 .features[FEAT_1_ECX] =
1366 .features[FEAT_8000_0001_EDX] =
1367 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1368 .xlevel = 0x80000008,
1369 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1372 .name = "Opteron_G2",
1374 .vendor = CPUID_VENDOR_AMD,
1378 .features[FEAT_1_EDX] =
1379 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1380 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1381 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1382 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1383 CPUID_DE | CPUID_FP87,
1384 .features[FEAT_1_ECX] =
1385 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1386 /* Missing: CPUID_EXT2_RDTSCP */
1387 .features[FEAT_8000_0001_EDX] =
1388 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1389 .features[FEAT_8000_0001_ECX] =
1390 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1391 .xlevel = 0x80000008,
1392 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1395 .name = "Opteron_G3",
1397 .vendor = CPUID_VENDOR_AMD,
1401 .features[FEAT_1_EDX] =
1402 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1403 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1404 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1405 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1406 CPUID_DE | CPUID_FP87,
1407 .features[FEAT_1_ECX] =
1408 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1410 /* Missing: CPUID_EXT2_RDTSCP */
1411 .features[FEAT_8000_0001_EDX] =
1412 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1413 .features[FEAT_8000_0001_ECX] =
1414 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1415 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1416 .xlevel = 0x80000008,
1417 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1420 .name = "Opteron_G4",
1422 .vendor = CPUID_VENDOR_AMD,
1426 .features[FEAT_1_EDX] =
1427 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1428 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1429 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1430 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1431 CPUID_DE | CPUID_FP87,
1432 .features[FEAT_1_ECX] =
1433 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1434 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1435 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1437 /* Missing: CPUID_EXT2_RDTSCP */
1438 .features[FEAT_8000_0001_EDX] =
1439 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1441 .features[FEAT_8000_0001_ECX] =
1442 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1443 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1444 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1447 .xlevel = 0x8000001A,
1448 .model_id = "AMD Opteron 62xx class CPU",
1451 .name = "Opteron_G5",
1453 .vendor = CPUID_VENDOR_AMD,
1457 .features[FEAT_1_EDX] =
1458 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1459 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1460 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1461 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1462 CPUID_DE | CPUID_FP87,
1463 .features[FEAT_1_ECX] =
1464 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1465 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1466 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1467 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1468 /* Missing: CPUID_EXT2_RDTSCP */
1469 .features[FEAT_8000_0001_EDX] =
1470 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1472 .features[FEAT_8000_0001_ECX] =
1473 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1474 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1475 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1478 .xlevel = 0x8000001A,
1479 .model_id = "AMD Opteron 63xx class CPU",
1483 typedef struct PropValue {
1484 const char *prop, *value;
1487 /* KVM-specific features that are automatically added/removed
1488 * from all CPU models when KVM is enabled.
1490 static PropValue kvm_default_props[] = {
1491 { "kvmclock", "on" },
1492 { "kvm-nopiodelay", "on" },
1493 { "kvm-asyncpf", "on" },
1494 { "kvm-steal-time", "on" },
1495 { "kvm-pv-eoi", "on" },
1496 { "kvmclock-stable-bit", "on" },
1499 { "monitor", "off" },
1504 /* TCG-specific defaults that override all CPU models when using TCG
1506 static PropValue tcg_default_props[] = {
1512 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1515 for (pv = kvm_default_props; pv->prop; pv++) {
1516 if (!strcmp(pv->prop, prop)) {
1522 /* It is valid to call this function only for properties that
1523 * are already present in the kvm_default_props table.
1528 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1529 bool migratable_only);
1531 static bool lmce_supported(void)
1533 uint64_t mce_cap = 0;
1536 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1541 return !!(mce_cap & MCG_LMCE_P);
1544 static int cpu_x86_fill_model_id(char *str)
1546 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1549 for (i = 0; i < 3; i++) {
1550 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1551 memcpy(str + i * 16 + 0, &eax, 4);
1552 memcpy(str + i * 16 + 4, &ebx, 4);
1553 memcpy(str + i * 16 + 8, &ecx, 4);
1554 memcpy(str + i * 16 + 12, &edx, 4);
1559 static Property max_x86_cpu_properties[] = {
1560 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1561 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1562 DEFINE_PROP_END_OF_LIST()
1565 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
1567 DeviceClass *dc = DEVICE_CLASS(oc);
1568 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1572 xcc->model_description =
1573 "Enables all features supported by the accelerator in the current host";
1575 dc->props = max_x86_cpu_properties;
1578 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
1580 static void max_x86_cpu_initfn(Object *obj)
1582 X86CPU *cpu = X86_CPU(obj);
1583 CPUX86State *env = &cpu->env;
1584 KVMState *s = kvm_state;
1586 /* We can't fill the features array here because we don't know yet if
1587 * "migratable" is true or false.
1589 cpu->max_features = true;
1591 if (kvm_enabled()) {
1592 X86CPUDefinition host_cpudef = { };
1593 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1595 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1596 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1598 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1599 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1600 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1601 host_cpudef.stepping = eax & 0x0F;
1603 cpu_x86_fill_model_id(host_cpudef.model_id);
1605 x86_cpu_load_def(cpu, &host_cpudef, &error_abort);
1607 env->cpuid_min_level =
1608 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1609 env->cpuid_min_xlevel =
1610 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1611 env->cpuid_min_xlevel2 =
1612 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1614 if (lmce_supported()) {
1615 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1618 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
1619 "vendor", &error_abort);
1620 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
1621 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
1622 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
1623 object_property_set_str(OBJECT(cpu),
1624 "QEMU TCG CPU version " QEMU_HW_VERSION,
1625 "model-id", &error_abort);
1628 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1631 static const TypeInfo max_x86_cpu_type_info = {
1632 .name = X86_CPU_TYPE_NAME("max"),
1633 .parent = TYPE_X86_CPU,
1634 .instance_init = max_x86_cpu_initfn,
1635 .class_init = max_x86_cpu_class_init,
1640 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1642 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1644 xcc->kvm_required = true;
1647 xcc->model_description =
1648 "KVM processor with all supported host features "
1649 "(only available in KVM mode)";
1652 static const TypeInfo host_x86_cpu_type_info = {
1653 .name = X86_CPU_TYPE_NAME("host"),
1654 .parent = X86_CPU_TYPE_NAME("max"),
1655 .class_init = host_x86_cpu_class_init,
1660 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1662 FeatureWordInfo *f = &feature_word_info[w];
1665 for (i = 0; i < 32; ++i) {
1666 if ((1UL << i) & mask) {
1667 const char *reg = get_register_name_32(f->cpuid_reg);
1669 fprintf(stderr, "warning: %s doesn't support requested feature: "
1670 "CPUID.%02XH:%s%s%s [bit %d]\n",
1671 kvm_enabled() ? "host" : "TCG",
1673 f->feat_names[i] ? "." : "",
1674 f->feat_names[i] ? f->feat_names[i] : "", i);
1679 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1680 const char *name, void *opaque,
1683 X86CPU *cpu = X86_CPU(obj);
1684 CPUX86State *env = &cpu->env;
1687 value = (env->cpuid_version >> 8) & 0xf;
1689 value += (env->cpuid_version >> 20) & 0xff;
1691 visit_type_int(v, name, &value, errp);
1694 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1695 const char *name, void *opaque,
1698 X86CPU *cpu = X86_CPU(obj);
1699 CPUX86State *env = &cpu->env;
1700 const int64_t min = 0;
1701 const int64_t max = 0xff + 0xf;
1702 Error *local_err = NULL;
1705 visit_type_int(v, name, &value, &local_err);
1707 error_propagate(errp, local_err);
1710 if (value < min || value > max) {
1711 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1712 name ? name : "null", value, min, max);
1716 env->cpuid_version &= ~0xff00f00;
1718 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1720 env->cpuid_version |= value << 8;
1724 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1725 const char *name, void *opaque,
1728 X86CPU *cpu = X86_CPU(obj);
1729 CPUX86State *env = &cpu->env;
1732 value = (env->cpuid_version >> 4) & 0xf;
1733 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1734 visit_type_int(v, name, &value, errp);
1737 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1738 const char *name, void *opaque,
1741 X86CPU *cpu = X86_CPU(obj);
1742 CPUX86State *env = &cpu->env;
1743 const int64_t min = 0;
1744 const int64_t max = 0xff;
1745 Error *local_err = NULL;
1748 visit_type_int(v, name, &value, &local_err);
1750 error_propagate(errp, local_err);
1753 if (value < min || value > max) {
1754 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1755 name ? name : "null", value, min, max);
1759 env->cpuid_version &= ~0xf00f0;
1760 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1763 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1764 const char *name, void *opaque,
1767 X86CPU *cpu = X86_CPU(obj);
1768 CPUX86State *env = &cpu->env;
1771 value = env->cpuid_version & 0xf;
1772 visit_type_int(v, name, &value, errp);
1775 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1776 const char *name, void *opaque,
1779 X86CPU *cpu = X86_CPU(obj);
1780 CPUX86State *env = &cpu->env;
1781 const int64_t min = 0;
1782 const int64_t max = 0xf;
1783 Error *local_err = NULL;
1786 visit_type_int(v, name, &value, &local_err);
1788 error_propagate(errp, local_err);
1791 if (value < min || value > max) {
1792 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1793 name ? name : "null", value, min, max);
1797 env->cpuid_version &= ~0xf;
1798 env->cpuid_version |= value & 0xf;
1801 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1803 X86CPU *cpu = X86_CPU(obj);
1804 CPUX86State *env = &cpu->env;
1807 value = g_malloc(CPUID_VENDOR_SZ + 1);
1808 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1809 env->cpuid_vendor3);
1813 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1816 X86CPU *cpu = X86_CPU(obj);
1817 CPUX86State *env = &cpu->env;
1820 if (strlen(value) != CPUID_VENDOR_SZ) {
1821 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1825 env->cpuid_vendor1 = 0;
1826 env->cpuid_vendor2 = 0;
1827 env->cpuid_vendor3 = 0;
1828 for (i = 0; i < 4; i++) {
1829 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1830 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1831 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1835 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1837 X86CPU *cpu = X86_CPU(obj);
1838 CPUX86State *env = &cpu->env;
1842 value = g_malloc(48 + 1);
1843 for (i = 0; i < 48; i++) {
1844 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1850 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1853 X86CPU *cpu = X86_CPU(obj);
1854 CPUX86State *env = &cpu->env;
1857 if (model_id == NULL) {
1860 len = strlen(model_id);
1861 memset(env->cpuid_model, 0, 48);
1862 for (i = 0; i < 48; i++) {
1866 c = (uint8_t)model_id[i];
1868 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1872 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1873 void *opaque, Error **errp)
1875 X86CPU *cpu = X86_CPU(obj);
1878 value = cpu->env.tsc_khz * 1000;
1879 visit_type_int(v, name, &value, errp);
1882 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1883 void *opaque, Error **errp)
1885 X86CPU *cpu = X86_CPU(obj);
1886 const int64_t min = 0;
1887 const int64_t max = INT64_MAX;
1888 Error *local_err = NULL;
1891 visit_type_int(v, name, &value, &local_err);
1893 error_propagate(errp, local_err);
1896 if (value < min || value > max) {
1897 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1898 name ? name : "null", value, min, max);
1902 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1905 /* Generic getter for "feature-words" and "filtered-features" properties */
1906 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1907 const char *name, void *opaque,
1910 uint32_t *array = (uint32_t *)opaque;
1912 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1913 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1914 X86CPUFeatureWordInfoList *list = NULL;
1916 for (w = 0; w < FEATURE_WORDS; w++) {
1917 FeatureWordInfo *wi = &feature_word_info[w];
1918 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1919 qwi->cpuid_input_eax = wi->cpuid_eax;
1920 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1921 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1922 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1923 qwi->features = array[w];
1925 /* List will be in reverse order, but order shouldn't matter */
1926 list_entries[w].next = list;
1927 list_entries[w].value = &word_infos[w];
1928 list = &list_entries[w];
1931 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1934 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1935 void *opaque, Error **errp)
1937 X86CPU *cpu = X86_CPU(obj);
1938 int64_t value = cpu->hyperv_spinlock_attempts;
1940 visit_type_int(v, name, &value, errp);
1943 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1944 void *opaque, Error **errp)
1946 const int64_t min = 0xFFF;
1947 const int64_t max = UINT_MAX;
1948 X86CPU *cpu = X86_CPU(obj);
1952 visit_type_int(v, name, &value, &err);
1954 error_propagate(errp, err);
1958 if (value < min || value > max) {
1959 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1960 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1961 object_get_typename(obj), name ? name : "null",
1965 cpu->hyperv_spinlock_attempts = value;
1968 static PropertyInfo qdev_prop_spinlocks = {
1970 .get = x86_get_hv_spinlocks,
1971 .set = x86_set_hv_spinlocks,
1974 /* Convert all '_' in a feature string option name to '-', to make feature
1975 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1977 static inline void feat2prop(char *s)
1979 while ((s = strchr(s, '_'))) {
1984 /* Return the feature property name for a feature flag bit */
1985 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
1987 /* XSAVE components are automatically enabled by other features,
1988 * so return the original feature name instead
1990 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
1991 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
1993 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
1994 x86_ext_save_areas[comp].bits) {
1995 w = x86_ext_save_areas[comp].feature;
1996 bitnr = ctz32(x86_ext_save_areas[comp].bits);
2001 assert(w < FEATURE_WORDS);
2002 return feature_word_info[w].feat_names[bitnr];
2005 /* Compatibily hack to maintain legacy +-feat semantic,
2006 * where +-feat overwrites any feature set by
2007 * feat=on|feat even if the later is parsed after +-feat
2008 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
2010 static GList *plus_features, *minus_features;
2012 static gint compare_string(gconstpointer a, gconstpointer b)
2014 return g_strcmp0(a, b);
2017 /* Parse "+feature,-feature,feature=foo" CPU feature string
2019 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2022 char *featurestr; /* Single 'key=value" string being parsed */
2023 static bool cpu_globals_initialized;
2024 bool ambiguous = false;
2026 if (cpu_globals_initialized) {
2029 cpu_globals_initialized = true;
2035 for (featurestr = strtok(features, ",");
2037 featurestr = strtok(NULL, ",")) {
2039 const char *val = NULL;
2042 GlobalProperty *prop;
2044 /* Compatibility syntax: */
2045 if (featurestr[0] == '+') {
2046 plus_features = g_list_append(plus_features,
2047 g_strdup(featurestr + 1));
2049 } else if (featurestr[0] == '-') {
2050 minus_features = g_list_append(minus_features,
2051 g_strdup(featurestr + 1));
2055 eq = strchr(featurestr, '=');
2063 feat2prop(featurestr);
2066 if (g_list_find_custom(plus_features, name, compare_string)) {
2067 error_report("warning: Ambiguous CPU model string. "
2068 "Don't mix both \"+%s\" and \"%s=%s\"",
2072 if (g_list_find_custom(minus_features, name, compare_string)) {
2073 error_report("warning: Ambiguous CPU model string. "
2074 "Don't mix both \"-%s\" and \"%s=%s\"",
2080 if (!strcmp(name, "tsc-freq")) {
2084 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
2085 if (ret < 0 || tsc_freq > INT64_MAX) {
2086 error_setg(errp, "bad numerical value %s", val);
2089 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2091 name = "tsc-frequency";
2094 prop = g_new0(typeof(*prop), 1);
2095 prop->driver = typename;
2096 prop->property = g_strdup(name);
2097 prop->value = g_strdup(val);
2098 prop->errp = &error_fatal;
2099 qdev_prop_register_global(prop);
2103 error_report("warning: Compatibility of ambiguous CPU model "
2104 "strings won't be kept on future QEMU versions");
2108 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
2109 static int x86_cpu_filter_features(X86CPU *cpu);
2111 /* Check for missing features that may prevent the CPU class from
2112 * running using the current machine and accelerator.
2114 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
2115 strList **missing_feats)
2120 strList **next = missing_feats;
2122 if (xcc->kvm_required && !kvm_enabled()) {
2123 strList *new = g_new0(strList, 1);
2124 new->value = g_strdup("kvm");;
2125 *missing_feats = new;
2129 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2131 x86_cpu_expand_features(xc, &err);
2133 /* Errors at x86_cpu_expand_features should never happen,
2134 * but in case it does, just report the model as not
2135 * runnable at all using the "type" property.
2137 strList *new = g_new0(strList, 1);
2138 new->value = g_strdup("type");
2143 x86_cpu_filter_features(xc);
2145 for (w = 0; w < FEATURE_WORDS; w++) {
2146 uint32_t filtered = xc->filtered_features[w];
2148 for (i = 0; i < 32; i++) {
2149 if (filtered & (1UL << i)) {
2150 strList *new = g_new0(strList, 1);
2151 new->value = g_strdup(x86_cpu_feature_name(w, i));
2158 object_unref(OBJECT(xc));
2161 /* Print all cpuid feature names in featureset
2163 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2168 for (bit = 0; bit < 32; bit++) {
2169 if (featureset[bit]) {
2170 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2176 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
2177 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2179 ObjectClass *class_a = (ObjectClass *)a;
2180 ObjectClass *class_b = (ObjectClass *)b;
2181 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2182 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2183 const char *name_a, *name_b;
2185 if (cc_a->ordering != cc_b->ordering) {
2186 return cc_a->ordering - cc_b->ordering;
2188 name_a = object_class_get_name(class_a);
2189 name_b = object_class_get_name(class_b);
2190 return strcmp(name_a, name_b);
2194 static GSList *get_sorted_cpu_model_list(void)
2196 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2197 list = g_slist_sort(list, x86_cpu_list_compare);
2201 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2203 ObjectClass *oc = data;
2204 X86CPUClass *cc = X86_CPU_CLASS(oc);
2205 CPUListState *s = user_data;
2206 char *name = x86_cpu_class_get_model_name(cc);
2207 const char *desc = cc->model_description;
2208 if (!desc && cc->cpu_def) {
2209 desc = cc->cpu_def->model_id;
2212 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2217 /* list available CPU models and flags */
2218 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2223 .cpu_fprintf = cpu_fprintf,
2227 (*cpu_fprintf)(f, "Available CPUs:\n");
2228 list = get_sorted_cpu_model_list();
2229 g_slist_foreach(list, x86_cpu_list_entry, &s);
2232 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2233 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2234 FeatureWordInfo *fw = &feature_word_info[i];
2236 (*cpu_fprintf)(f, " ");
2237 listflags(f, cpu_fprintf, fw->feat_names);
2238 (*cpu_fprintf)(f, "\n");
2242 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2244 ObjectClass *oc = data;
2245 X86CPUClass *cc = X86_CPU_CLASS(oc);
2246 CpuDefinitionInfoList **cpu_list = user_data;
2247 CpuDefinitionInfoList *entry;
2248 CpuDefinitionInfo *info;
2250 info = g_malloc0(sizeof(*info));
2251 info->name = x86_cpu_class_get_model_name(cc);
2252 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
2253 info->has_unavailable_features = true;
2254 info->q_typename = g_strdup(object_class_get_name(oc));
2255 info->migration_safe = cc->migration_safe;
2256 info->has_migration_safe = true;
2257 info->q_static = cc->static_model;
2259 entry = g_malloc0(sizeof(*entry));
2260 entry->value = info;
2261 entry->next = *cpu_list;
2265 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2267 CpuDefinitionInfoList *cpu_list = NULL;
2268 GSList *list = get_sorted_cpu_model_list();
2269 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2274 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2275 bool migratable_only)
2277 FeatureWordInfo *wi = &feature_word_info[w];
2280 if (kvm_enabled()) {
2281 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2284 } else if (tcg_enabled()) {
2285 r = wi->tcg_features;
2289 if (migratable_only) {
2290 r &= x86_cpu_get_migratable_flags(w);
2295 static void x86_cpu_report_filtered_features(X86CPU *cpu)
2299 for (w = 0; w < FEATURE_WORDS; w++) {
2300 report_unavailable_features(w, cpu->filtered_features[w]);
2304 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2307 for (pv = props; pv->prop; pv++) {
2311 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2316 /* Load data from X86CPUDefinition into a X86CPU object
2318 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2320 CPUX86State *env = &cpu->env;
2322 char host_vendor[CPUID_VENDOR_SZ + 1];
2325 /*NOTE: any property set by this function should be returned by
2326 * x86_cpu_static_props(), so static expansion of
2327 * query-cpu-model-expansion is always complete.
2330 /* CPU models only set _minimum_ values for level/xlevel: */
2331 object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
2332 object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2334 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2335 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2336 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2337 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2338 for (w = 0; w < FEATURE_WORDS; w++) {
2339 env->features[w] = def->features[w];
2342 /* Special cases not set in the X86CPUDefinition structs: */
2343 if (kvm_enabled()) {
2344 if (!kvm_irqchip_in_kernel()) {
2345 x86_cpu_change_kvm_default("x2apic", "off");
2348 x86_cpu_apply_props(cpu, kvm_default_props);
2349 } else if (tcg_enabled()) {
2350 x86_cpu_apply_props(cpu, tcg_default_props);
2353 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2355 /* sysenter isn't supported in compatibility mode on AMD,
2356 * syscall isn't supported in compatibility mode on Intel.
2357 * Normally we advertise the actual CPU vendor, but you can
2358 * override this using the 'vendor' property if you want to use
2359 * KVM's sysenter/syscall emulation in compatibility mode and
2360 * when doing cross vendor migration
2362 vendor = def->vendor;
2363 if (kvm_enabled()) {
2364 uint32_t ebx = 0, ecx = 0, edx = 0;
2365 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2366 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2367 vendor = host_vendor;
2370 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2374 /* Return a QDict containing keys for all properties that can be included
2375 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
2376 * must be included in the dictionary.
2378 static QDict *x86_cpu_static_props(void)
2382 static const char *props[] = {
2400 for (i = 0; props[i]; i++) {
2401 qdict_put_obj(d, props[i], qnull());
2404 for (w = 0; w < FEATURE_WORDS; w++) {
2405 FeatureWordInfo *fi = &feature_word_info[w];
2407 for (bit = 0; bit < 32; bit++) {
2408 if (!fi->feat_names[bit]) {
2411 qdict_put_obj(d, fi->feat_names[bit], qnull());
2418 /* Add an entry to @props dict, with the value for property. */
2419 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
2421 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
2424 qdict_put_obj(props, prop, value);
2427 /* Convert CPU model data from X86CPU object to a property dictionary
2428 * that can recreate exactly the same CPU model.
2430 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
2432 QDict *sprops = x86_cpu_static_props();
2433 const QDictEntry *e;
2435 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
2436 const char *prop = qdict_entry_key(e);
2437 x86_cpu_expand_prop(cpu, props, prop);
2441 /* Convert CPU model data from X86CPU object to a property dictionary
2442 * that can recreate exactly the same CPU model, including every
2443 * writeable QOM property.
2445 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
2447 ObjectPropertyIterator iter;
2448 ObjectProperty *prop;
2450 object_property_iter_init(&iter, OBJECT(cpu));
2451 while ((prop = object_property_iter_next(&iter))) {
2452 /* skip read-only or write-only properties */
2453 if (!prop->get || !prop->set) {
2457 /* "hotplugged" is the only property that is configurable
2458 * on the command-line but will be set differently on CPUs
2459 * created using "-cpu ... -smp ..." and by CPUs created
2460 * on the fly by x86_cpu_from_model() for querying. Skip it.
2462 if (!strcmp(prop->name, "hotplugged")) {
2465 x86_cpu_expand_prop(cpu, props, prop->name);
2469 static void object_apply_props(Object *obj, QDict *props, Error **errp)
2471 const QDictEntry *prop;
2474 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
2475 object_property_set_qobject(obj, qdict_entry_value(prop),
2476 qdict_entry_key(prop), &err);
2482 error_propagate(errp, err);
2485 /* Create X86CPU object according to model+props specification */
2486 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
2492 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
2494 error_setg(&err, "CPU model '%s' not found", model);
2498 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2500 object_apply_props(OBJECT(xc), props, &err);
2506 x86_cpu_expand_features(xc, &err);
2513 error_propagate(errp, err);
2514 object_unref(OBJECT(xc));
2520 CpuModelExpansionInfo *
2521 arch_query_cpu_model_expansion(CpuModelExpansionType type,
2522 CpuModelInfo *model,
2527 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
2528 QDict *props = NULL;
2529 const char *base_name;
2531 xc = x86_cpu_from_model(model->name,
2533 qobject_to_qdict(model->props) :
2539 props = qdict_new();
2542 case CPU_MODEL_EXPANSION_TYPE_STATIC:
2543 /* Static expansion will be based on "base" only */
2545 x86_cpu_to_dict(xc, props);
2547 case CPU_MODEL_EXPANSION_TYPE_FULL:
2548 /* As we don't return every single property, full expansion needs
2549 * to keep the original model name+props, and add extra
2550 * properties on top of that.
2552 base_name = model->name;
2553 x86_cpu_to_dict_full(xc, props);
2556 error_setg(&err, "Unsupportted expansion type");
2561 props = qdict_new();
2563 x86_cpu_to_dict(xc, props);
2565 ret->model = g_new0(CpuModelInfo, 1);
2566 ret->model->name = g_strdup(base_name);
2567 ret->model->props = QOBJECT(props);
2568 ret->model->has_props = true;
2571 object_unref(OBJECT(xc));
2573 error_propagate(errp, err);
2574 qapi_free_CpuModelExpansionInfo(ret);
2580 static gchar *x86_gdb_arch_name(CPUState *cs)
2582 #ifdef TARGET_X86_64
2583 return g_strdup("i386:x86-64");
2585 return g_strdup("i386");
2589 X86CPU *cpu_x86_init(const char *cpu_model)
2591 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2594 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2596 X86CPUDefinition *cpudef = data;
2597 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2599 xcc->cpu_def = cpudef;
2600 xcc->migration_safe = true;
2603 static void x86_register_cpudef_type(X86CPUDefinition *def)
2605 char *typename = x86_cpu_type_name(def->name);
2608 .parent = TYPE_X86_CPU,
2609 .class_init = x86_cpu_cpudef_class_init,
2613 /* AMD aliases are handled at runtime based on CPUID vendor, so
2614 * they shouldn't be set on the CPU model table.
2616 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
2622 #if !defined(CONFIG_USER_ONLY)
2624 void cpu_clear_apic_feature(CPUX86State *env)
2626 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2629 #endif /* !CONFIG_USER_ONLY */
2631 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2632 uint32_t *eax, uint32_t *ebx,
2633 uint32_t *ecx, uint32_t *edx)
2635 X86CPU *cpu = x86_env_get_cpu(env);
2636 CPUState *cs = CPU(cpu);
2637 uint32_t pkg_offset;
2640 /* Calculate & apply limits for different index ranges */
2641 if (index >= 0xC0000000) {
2642 limit = env->cpuid_xlevel2;
2643 } else if (index >= 0x80000000) {
2644 limit = env->cpuid_xlevel;
2646 limit = env->cpuid_level;
2649 if (index > limit) {
2650 /* Intel documentation states that invalid EAX input will
2651 * return the same information as EAX=cpuid_level
2652 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2654 index = env->cpuid_level;
2659 *eax = env->cpuid_level;
2660 *ebx = env->cpuid_vendor1;
2661 *edx = env->cpuid_vendor2;
2662 *ecx = env->cpuid_vendor3;
2665 *eax = env->cpuid_version;
2666 *ebx = (cpu->apic_id << 24) |
2667 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2668 *ecx = env->features[FEAT_1_ECX];
2669 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2670 *ecx |= CPUID_EXT_OSXSAVE;
2672 *edx = env->features[FEAT_1_EDX];
2673 if (cs->nr_cores * cs->nr_threads > 1) {
2674 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2679 /* cache info: needed for Pentium Pro compatibility */
2680 if (cpu->cache_info_passthrough) {
2681 host_cpuid(index, 0, eax, ebx, ecx, edx);
2684 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2686 if (!cpu->enable_l3_cache) {
2689 *ecx = L3_N_DESCRIPTOR;
2691 *edx = (L1D_DESCRIPTOR << 16) | \
2692 (L1I_DESCRIPTOR << 8) | \
2696 /* cache info: needed for Core compatibility */
2697 if (cpu->cache_info_passthrough) {
2698 host_cpuid(index, count, eax, ebx, ecx, edx);
2699 *eax &= ~0xFC000000;
2703 case 0: /* L1 dcache info */
2704 *eax |= CPUID_4_TYPE_DCACHE | \
2705 CPUID_4_LEVEL(1) | \
2706 CPUID_4_SELF_INIT_LEVEL;
2707 *ebx = (L1D_LINE_SIZE - 1) | \
2708 ((L1D_PARTITIONS - 1) << 12) | \
2709 ((L1D_ASSOCIATIVITY - 1) << 22);
2710 *ecx = L1D_SETS - 1;
2711 *edx = CPUID_4_NO_INVD_SHARING;
2713 case 1: /* L1 icache info */
2714 *eax |= CPUID_4_TYPE_ICACHE | \
2715 CPUID_4_LEVEL(1) | \
2716 CPUID_4_SELF_INIT_LEVEL;
2717 *ebx = (L1I_LINE_SIZE - 1) | \
2718 ((L1I_PARTITIONS - 1) << 12) | \
2719 ((L1I_ASSOCIATIVITY - 1) << 22);
2720 *ecx = L1I_SETS - 1;
2721 *edx = CPUID_4_NO_INVD_SHARING;
2723 case 2: /* L2 cache info */
2724 *eax |= CPUID_4_TYPE_UNIFIED | \
2725 CPUID_4_LEVEL(2) | \
2726 CPUID_4_SELF_INIT_LEVEL;
2727 if (cs->nr_threads > 1) {
2728 *eax |= (cs->nr_threads - 1) << 14;
2730 *ebx = (L2_LINE_SIZE - 1) | \
2731 ((L2_PARTITIONS - 1) << 12) | \
2732 ((L2_ASSOCIATIVITY - 1) << 22);
2734 *edx = CPUID_4_NO_INVD_SHARING;
2736 case 3: /* L3 cache info */
2737 if (!cpu->enable_l3_cache) {
2744 *eax |= CPUID_4_TYPE_UNIFIED | \
2745 CPUID_4_LEVEL(3) | \
2746 CPUID_4_SELF_INIT_LEVEL;
2747 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2748 *eax |= ((1 << pkg_offset) - 1) << 14;
2749 *ebx = (L3_N_LINE_SIZE - 1) | \
2750 ((L3_N_PARTITIONS - 1) << 12) | \
2751 ((L3_N_ASSOCIATIVITY - 1) << 22);
2752 *ecx = L3_N_SETS - 1;
2753 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2755 default: /* end of info */
2764 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2765 if ((*eax & 31) && cs->nr_cores > 1) {
2766 *eax |= (cs->nr_cores - 1) << 26;
2770 /* mwait info: needed for Core compatibility */
2771 *eax = 0; /* Smallest monitor-line size in bytes */
2772 *ebx = 0; /* Largest monitor-line size in bytes */
2773 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2777 /* Thermal and Power Leaf */
2778 *eax = env->features[FEAT_6_EAX];
2784 /* Structured Extended Feature Flags Enumeration Leaf */
2786 *eax = 0; /* Maximum ECX value for sub-leaves */
2787 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2788 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2789 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2790 *ecx |= CPUID_7_0_ECX_OSPKE;
2792 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
2801 /* Direct Cache Access Information Leaf */
2802 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2808 /* Architectural Performance Monitoring Leaf */
2809 if (kvm_enabled() && cpu->enable_pmu) {
2810 KVMState *s = cs->kvm_state;
2812 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2813 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2814 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2815 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2824 /* Extended Topology Enumeration Leaf */
2825 if (!cpu->enable_cpuid_0xb) {
2826 *eax = *ebx = *ecx = *edx = 0;
2830 *ecx = count & 0xff;
2831 *edx = cpu->apic_id;
2835 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2836 *ebx = cs->nr_threads;
2837 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2840 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2841 *ebx = cs->nr_cores * cs->nr_threads;
2842 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2847 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2850 assert(!(*eax & ~0x1f));
2851 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2854 /* Processor Extended State */
2859 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2864 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
2865 *eax = env->features[FEAT_XSAVE_COMP_LO];
2866 *edx = env->features[FEAT_XSAVE_COMP_HI];
2868 } else if (count == 1) {
2869 *eax = env->features[FEAT_XSAVE];
2870 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2871 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
2872 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2880 *eax = env->cpuid_xlevel;
2881 *ebx = env->cpuid_vendor1;
2882 *edx = env->cpuid_vendor2;
2883 *ecx = env->cpuid_vendor3;
2886 *eax = env->cpuid_version;
2888 *ecx = env->features[FEAT_8000_0001_ECX];
2889 *edx = env->features[FEAT_8000_0001_EDX];
2891 /* The Linux kernel checks for the CMPLegacy bit and
2892 * discards multiple thread information if it is set.
2893 * So don't set it here for Intel to make Linux guests happy.
2895 if (cs->nr_cores * cs->nr_threads > 1) {
2896 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2897 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2898 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2899 *ecx |= 1 << 1; /* CmpLegacy bit */
2906 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2907 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2908 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2909 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2912 /* cache info (L1 cache) */
2913 if (cpu->cache_info_passthrough) {
2914 host_cpuid(index, 0, eax, ebx, ecx, edx);
2917 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2918 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2919 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2920 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2921 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2922 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2923 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2924 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2927 /* cache info (L2 cache) */
2928 if (cpu->cache_info_passthrough) {
2929 host_cpuid(index, 0, eax, ebx, ecx, edx);
2932 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2933 (L2_DTLB_2M_ENTRIES << 16) | \
2934 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2935 (L2_ITLB_2M_ENTRIES);
2936 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2937 (L2_DTLB_4K_ENTRIES << 16) | \
2938 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2939 (L2_ITLB_4K_ENTRIES);
2940 *ecx = (L2_SIZE_KB_AMD << 16) | \
2941 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2942 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2943 if (!cpu->enable_l3_cache) {
2944 *edx = ((L3_SIZE_KB / 512) << 18) | \
2945 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2946 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2948 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
2949 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
2950 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
2957 *edx = env->features[FEAT_8000_0007_EDX];
2960 /* virtual & phys address size in low 2 bytes. */
2961 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2962 /* 64 bit processor */
2963 *eax = cpu->phys_bits; /* configurable physical bits */
2964 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
2965 *eax |= 0x00003900; /* 57 bits virtual */
2967 *eax |= 0x00003000; /* 48 bits virtual */
2970 *eax = cpu->phys_bits;
2975 if (cs->nr_cores * cs->nr_threads > 1) {
2976 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2980 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2981 *eax = 0x00000001; /* SVM Revision */
2982 *ebx = 0x00000010; /* nr of ASIDs */
2984 *edx = env->features[FEAT_SVM]; /* optional features */
2993 *eax = env->cpuid_xlevel2;
2999 /* Support for VIA CPU's CPUID instruction */
3000 *eax = env->cpuid_version;
3003 *edx = env->features[FEAT_C000_0001_EDX];
3008 /* Reserved for the future, and now filled with zero */
3015 /* reserved values: zero */
3024 /* CPUClass::reset() */
3025 static void x86_cpu_reset(CPUState *s)
3027 X86CPU *cpu = X86_CPU(s);
3028 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
3029 CPUX86State *env = &cpu->env;
3034 xcc->parent_reset(s);
3036 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
3038 env->old_exception = -1;
3040 /* init to reset state */
3042 env->hflags2 |= HF2_GIF_MASK;
3044 cpu_x86_update_cr0(env, 0x60000010);
3045 env->a20_mask = ~0x0;
3046 env->smbase = 0x30000;
3048 env->idt.limit = 0xffff;
3049 env->gdt.limit = 0xffff;
3050 env->ldt.limit = 0xffff;
3051 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
3052 env->tr.limit = 0xffff;
3053 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
3055 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
3056 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
3057 DESC_R_MASK | DESC_A_MASK);
3058 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
3059 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3061 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
3062 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3064 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
3065 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3067 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
3068 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3070 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
3071 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3075 env->regs[R_EDX] = env->cpuid_version;
3080 for (i = 0; i < 8; i++) {
3083 cpu_set_fpuc(env, 0x37f);
3085 env->mxcsr = 0x1f80;
3086 /* All units are in INIT state. */
3089 env->pat = 0x0007040600070406ULL;
3090 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
3092 memset(env->dr, 0, sizeof(env->dr));
3093 env->dr[6] = DR6_FIXED_1;
3094 env->dr[7] = DR7_FIXED_1;
3095 cpu_breakpoint_remove_all(s, BP_CPU);
3096 cpu_watchpoint_remove_all(s, BP_CPU);
3099 xcr0 = XSTATE_FP_MASK;
3101 #ifdef CONFIG_USER_ONLY
3102 /* Enable all the features for user-mode. */
3103 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
3104 xcr0 |= XSTATE_SSE_MASK;
3106 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3107 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3108 if (env->features[esa->feature] & esa->bits) {
3113 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
3114 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
3116 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
3117 cr4 |= CR4_FSGSBASE_MASK;
3122 cpu_x86_update_cr4(env, cr4);
3125 * SDM 11.11.5 requires:
3126 * - IA32_MTRR_DEF_TYPE MSR.E = 0
3127 * - IA32_MTRR_PHYSMASKn.V = 0
3128 * All other bits are undefined. For simplification, zero it all.
3130 env->mtrr_deftype = 0;
3131 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
3132 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
3134 #if !defined(CONFIG_USER_ONLY)
3135 /* We hard-wire the BSP to the first CPU. */
3136 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
3138 s->halted = !cpu_is_bsp(cpu);
3140 if (kvm_enabled()) {
3141 kvm_arch_reset_vcpu(cpu);
3146 #ifndef CONFIG_USER_ONLY
3147 bool cpu_is_bsp(X86CPU *cpu)
3149 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
3152 /* TODO: remove me, when reset over QOM tree is implemented */
3153 static void x86_cpu_machine_reset_cb(void *opaque)
3155 X86CPU *cpu = opaque;
3156 cpu_reset(CPU(cpu));
3160 static void mce_init(X86CPU *cpu)
3162 CPUX86State *cenv = &cpu->env;
3165 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
3166 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
3167 (CPUID_MCE | CPUID_MCA)) {
3168 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
3169 (cpu->enable_lmce ? MCG_LMCE_P : 0);
3170 cenv->mcg_ctl = ~(uint64_t)0;
3171 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
3172 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
3177 #ifndef CONFIG_USER_ONLY
3178 APICCommonClass *apic_get_class(void)
3180 const char *apic_type = "apic";
3182 if (kvm_apic_in_kernel()) {
3183 apic_type = "kvm-apic";
3184 } else if (xen_enabled()) {
3185 apic_type = "xen-apic";
3188 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
3191 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
3193 APICCommonState *apic;
3194 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
3196 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
3198 object_property_add_child(OBJECT(cpu), "lapic",
3199 OBJECT(cpu->apic_state), &error_abort);
3200 object_unref(OBJECT(cpu->apic_state));
3202 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
3203 /* TODO: convert to link<> */
3204 apic = APIC_COMMON(cpu->apic_state);
3206 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
3209 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3211 APICCommonState *apic;
3212 static bool apic_mmio_map_once;
3214 if (cpu->apic_state == NULL) {
3217 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
3220 /* Map APIC MMIO area */
3221 apic = APIC_COMMON(cpu->apic_state);
3222 if (!apic_mmio_map_once) {
3223 memory_region_add_subregion_overlap(get_system_memory(),
3225 MSR_IA32_APICBASE_BASE,
3228 apic_mmio_map_once = true;
3232 static void x86_cpu_machine_done(Notifier *n, void *unused)
3234 X86CPU *cpu = container_of(n, X86CPU, machine_done);
3235 MemoryRegion *smram =
3236 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
3239 cpu->smram = g_new(MemoryRegion, 1);
3240 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
3241 smram, 0, 1ull << 32);
3242 memory_region_set_enabled(cpu->smram, true);
3243 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
3247 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3252 /* Note: Only safe for use on x86(-64) hosts */
3253 static uint32_t x86_host_phys_bits(void)
3256 uint32_t host_phys_bits;
3258 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
3259 if (eax >= 0x80000008) {
3260 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
3261 /* Note: According to AMD doc 25481 rev 2.34 they have a field
3262 * at 23:16 that can specify a maximum physical address bits for
3263 * the guest that can override this value; but I've not seen
3264 * anything with that set.
3266 host_phys_bits = eax & 0xff;
3268 /* It's an odd 64 bit machine that doesn't have the leaf for
3269 * physical address bits; fall back to 36 that's most older
3272 host_phys_bits = 36;
3275 return host_phys_bits;
3278 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
3285 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
3286 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
3288 CPUX86State *env = &cpu->env;
3289 FeatureWordInfo *fi = &feature_word_info[w];
3290 uint32_t eax = fi->cpuid_eax;
3291 uint32_t region = eax & 0xF0000000;
3293 if (!env->features[w]) {
3299 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
3302 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
3305 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
3310 /* Calculate XSAVE components based on the configured CPU feature flags */
3311 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
3313 CPUX86State *env = &cpu->env;
3317 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3322 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3323 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3324 if (env->features[esa->feature] & esa->bits) {
3325 mask |= (1ULL << i);
3329 env->features[FEAT_XSAVE_COMP_LO] = mask;
3330 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
3333 /***** Steps involved on loading and filtering CPUID data
3335 * When initializing and realizing a CPU object, the steps
3336 * involved in setting up CPUID data are:
3338 * 1) Loading CPU model definition (X86CPUDefinition). This is
3339 * implemented by x86_cpu_load_def() and should be completely
3340 * transparent, as it is done automatically by instance_init.
3341 * No code should need to look at X86CPUDefinition structs
3342 * outside instance_init.
3344 * 2) CPU expansion. This is done by realize before CPUID
3345 * filtering, and will make sure host/accelerator data is
3346 * loaded for CPU models that depend on host capabilities
3347 * (e.g. "host"). Done by x86_cpu_expand_features().
3349 * 3) CPUID filtering. This initializes extra data related to
3350 * CPUID, and checks if the host supports all capabilities
3351 * required by the CPU. Runnability of a CPU model is
3352 * determined at this step. Done by x86_cpu_filter_features().
3354 * Some operations don't require all steps to be performed.
3357 * - CPU instance creation (instance_init) will run only CPU
3358 * model loading. CPU expansion can't run at instance_init-time
3359 * because host/accelerator data may be not available yet.
3360 * - CPU realization will perform both CPU model expansion and CPUID
3361 * filtering, and return an error in case one of them fails.
3362 * - query-cpu-definitions needs to run all 3 steps. It needs
3363 * to run CPUID filtering, as the 'unavailable-features'
3364 * field is set based on the filtering results.
3365 * - The query-cpu-model-expansion QMP command only needs to run
3366 * CPU model loading and CPU expansion. It should not filter
3367 * any CPUID data based on host capabilities.
3370 /* Expand CPU configuration data, based on configured features
3371 * and host/accelerator capabilities when appropriate.
3373 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
3375 CPUX86State *env = &cpu->env;
3378 Error *local_err = NULL;
3380 /*TODO: Now cpu->max_features doesn't overwrite features
3381 * set using QOM properties, and we can convert
3382 * plus_features & minus_features to global properties
3383 * inside x86_cpu_parse_featurestr() too.
3385 if (cpu->max_features) {
3386 for (w = 0; w < FEATURE_WORDS; w++) {
3387 /* Override only features that weren't set explicitly
3391 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
3392 ~env->user_features[w];
3396 for (l = plus_features; l; l = l->next) {
3397 const char *prop = l->data;
3398 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
3404 for (l = minus_features; l; l = l->next) {
3405 const char *prop = l->data;
3406 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
3412 if (!kvm_enabled() || !cpu->expose_kvm) {
3413 env->features[FEAT_KVM] = 0;
3416 x86_cpu_enable_xsave_components(cpu);
3418 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3419 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3420 if (cpu->full_cpuid_auto_level) {
3421 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3422 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3423 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3424 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3425 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3426 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3427 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3428 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3429 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3430 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3431 /* SVM requires CPUID[0x8000000A] */
3432 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3433 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3437 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3438 if (env->cpuid_level == UINT32_MAX) {
3439 env->cpuid_level = env->cpuid_min_level;
3441 if (env->cpuid_xlevel == UINT32_MAX) {
3442 env->cpuid_xlevel = env->cpuid_min_xlevel;
3444 if (env->cpuid_xlevel2 == UINT32_MAX) {
3445 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3449 if (local_err != NULL) {
3450 error_propagate(errp, local_err);
3455 * Finishes initialization of CPUID data, filters CPU feature
3456 * words based on host availability of each feature.
3458 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
3460 static int x86_cpu_filter_features(X86CPU *cpu)
3462 CPUX86State *env = &cpu->env;
3466 for (w = 0; w < FEATURE_WORDS; w++) {
3467 uint32_t host_feat =
3468 x86_cpu_get_supported_feature_word(w, false);
3469 uint32_t requested_features = env->features[w];
3470 env->features[w] &= host_feat;
3471 cpu->filtered_features[w] = requested_features & ~env->features[w];
3472 if (cpu->filtered_features[w]) {
3480 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
3481 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
3482 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
3483 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
3484 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
3485 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
3486 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
3488 CPUState *cs = CPU(dev);
3489 X86CPU *cpu = X86_CPU(dev);
3490 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3491 CPUX86State *env = &cpu->env;
3492 Error *local_err = NULL;
3493 static bool ht_warned;
3495 if (xcc->kvm_required && !kvm_enabled()) {
3496 char *name = x86_cpu_class_get_model_name(xcc);
3497 error_setg(&local_err, "CPU model '%s' requires KVM", name);
3502 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
3503 error_setg(errp, "apic-id property was not initialized properly");
3507 x86_cpu_expand_features(cpu, &local_err);
3512 if (x86_cpu_filter_features(cpu) &&
3513 (cpu->check_cpuid || cpu->enforce_cpuid)) {
3514 x86_cpu_report_filtered_features(cpu);
3515 if (cpu->enforce_cpuid) {
3516 error_setg(&local_err,
3518 "Host doesn't support requested features" :
3519 "TCG doesn't support requested features");
3524 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3527 if (IS_AMD_CPU(env)) {
3528 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3529 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3530 & CPUID_EXT2_AMD_ALIASES);
3533 /* For 64bit systems think about the number of physical bits to present.
3534 * ideally this should be the same as the host; anything other than matching
3535 * the host can cause incorrect guest behaviour.
3536 * QEMU used to pick the magic value of 40 bits that corresponds to
3537 * consumer AMD devices but nothing else.
3539 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3540 if (kvm_enabled()) {
3541 uint32_t host_phys_bits = x86_host_phys_bits();
3544 if (cpu->host_phys_bits) {
3545 /* The user asked for us to use the host physical bits */
3546 cpu->phys_bits = host_phys_bits;
3549 /* Print a warning if the user set it to a value that's not the
3552 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3554 error_report("Warning: Host physical bits (%u)"
3555 " does not match phys-bits property (%u)",
3556 host_phys_bits, cpu->phys_bits);
3560 if (cpu->phys_bits &&
3561 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3562 cpu->phys_bits < 32)) {
3563 error_setg(errp, "phys-bits should be between 32 and %u "
3565 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3569 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3570 error_setg(errp, "TCG only supports phys-bits=%u",
3571 TCG_PHYS_ADDR_BITS);
3575 /* 0 means it was not explicitly set by the user (or by machine
3576 * compat_props or by the host code above). In this case, the default
3577 * is the value used by TCG (40).
3579 if (cpu->phys_bits == 0) {
3580 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3583 /* For 32 bit systems don't use the user set value, but keep
3584 * phys_bits consistent with what we tell the guest.
3586 if (cpu->phys_bits != 0) {
3587 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3591 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3592 cpu->phys_bits = 36;
3594 cpu->phys_bits = 32;
3597 cpu_exec_realizefn(cs, &local_err);
3598 if (local_err != NULL) {
3599 error_propagate(errp, local_err);
3603 if (tcg_enabled()) {
3607 #ifndef CONFIG_USER_ONLY
3608 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3610 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3611 x86_cpu_apic_create(cpu, &local_err);
3612 if (local_err != NULL) {
3620 #ifndef CONFIG_USER_ONLY
3621 if (tcg_enabled()) {
3622 AddressSpace *as_normal = address_space_init_shareable(cs->memory,
3624 AddressSpace *as_smm = g_new(AddressSpace, 1);
3626 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3627 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3629 /* Outer container... */
3630 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3631 memory_region_set_enabled(cpu->cpu_as_root, true);
3633 /* ... with two regions inside: normal system memory with low
3636 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3637 get_system_memory(), 0, ~0ull);
3638 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3639 memory_region_set_enabled(cpu->cpu_as_mem, true);
3640 address_space_init(as_smm, cpu->cpu_as_root, "CPU");
3643 cpu_address_space_init(cs, as_normal, 0);
3644 cpu_address_space_init(cs, as_smm, 1);
3646 /* ... SMRAM with higher priority, linked from /machine/smram. */
3647 cpu->machine_done.notify = x86_cpu_machine_done;
3648 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3654 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3655 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3656 * based on inputs (sockets,cores,threads), it is still better to gives
3659 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3660 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3662 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3663 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3664 " -smp options properly.");
3668 x86_cpu_apic_realize(cpu, &local_err);
3669 if (local_err != NULL) {
3674 xcc->parent_realize(dev, &local_err);
3677 if (local_err != NULL) {
3678 error_propagate(errp, local_err);
3683 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3685 X86CPU *cpu = X86_CPU(dev);
3686 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3687 Error *local_err = NULL;
3689 #ifndef CONFIG_USER_ONLY
3690 cpu_remove_sync(CPU(dev));
3691 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3694 if (cpu->apic_state) {
3695 object_unparent(OBJECT(cpu->apic_state));
3696 cpu->apic_state = NULL;
3699 xcc->parent_unrealize(dev, &local_err);
3700 if (local_err != NULL) {
3701 error_propagate(errp, local_err);
3706 typedef struct BitProperty {
3711 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3712 void *opaque, Error **errp)
3714 X86CPU *cpu = X86_CPU(obj);
3715 BitProperty *fp = opaque;
3716 uint32_t f = cpu->env.features[fp->w];
3717 bool value = (f & fp->mask) == fp->mask;
3718 visit_type_bool(v, name, &value, errp);
3721 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3722 void *opaque, Error **errp)
3724 DeviceState *dev = DEVICE(obj);
3725 X86CPU *cpu = X86_CPU(obj);
3726 BitProperty *fp = opaque;
3727 Error *local_err = NULL;
3730 if (dev->realized) {
3731 qdev_prop_set_after_realize(dev, name, errp);
3735 visit_type_bool(v, name, &value, &local_err);
3737 error_propagate(errp, local_err);
3742 cpu->env.features[fp->w] |= fp->mask;
3744 cpu->env.features[fp->w] &= ~fp->mask;
3746 cpu->env.user_features[fp->w] |= fp->mask;
3749 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3752 BitProperty *prop = opaque;
3756 /* Register a boolean property to get/set a single bit in a uint32_t field.
3758 * The same property name can be registered multiple times to make it affect
3759 * multiple bits in the same FeatureWord. In that case, the getter will return
3760 * true only if all bits are set.
3762 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3763 const char *prop_name,
3769 uint32_t mask = (1UL << bitnr);
3771 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3777 fp = g_new0(BitProperty, 1);
3780 object_property_add(OBJECT(cpu), prop_name, "bool",
3781 x86_cpu_get_bit_prop,
3782 x86_cpu_set_bit_prop,
3783 x86_cpu_release_bit_prop, fp, &error_abort);
3787 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3791 FeatureWordInfo *fi = &feature_word_info[w];
3792 const char *name = fi->feat_names[bitnr];
3798 /* Property names should use "-" instead of "_".
3799 * Old names containing underscores are registered as aliases
3800 * using object_property_add_alias()
3802 assert(!strchr(name, '_'));
3803 /* aliases don't use "|" delimiters anymore, they are registered
3804 * manually using object_property_add_alias() */
3805 assert(!strchr(name, '|'));
3806 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
3809 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
3811 X86CPU *cpu = X86_CPU(cs);
3812 CPUX86State *env = &cpu->env;
3813 GuestPanicInformation *panic_info = NULL;
3815 if (env->features[FEAT_HYPERV_EDX] & HV_X64_GUEST_CRASH_MSR_AVAILABLE) {
3816 panic_info = g_malloc0(sizeof(GuestPanicInformation));
3818 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
3820 assert(HV_X64_MSR_CRASH_PARAMS >= 5);
3821 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
3822 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
3823 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
3824 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
3825 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
3830 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
3831 const char *name, void *opaque,
3834 CPUState *cs = CPU(obj);
3835 GuestPanicInformation *panic_info;
3837 if (!cs->crash_occurred) {
3838 error_setg(errp, "No crash occured");
3842 panic_info = x86_cpu_get_crash_info(cs);
3843 if (panic_info == NULL) {
3844 error_setg(errp, "No crash information");
3848 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
3850 qapi_free_GuestPanicInformation(panic_info);
3853 static void x86_cpu_initfn(Object *obj)
3855 CPUState *cs = CPU(obj);
3856 X86CPU *cpu = X86_CPU(obj);
3857 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3858 CPUX86State *env = &cpu->env;
3863 object_property_add(obj, "family", "int",
3864 x86_cpuid_version_get_family,
3865 x86_cpuid_version_set_family, NULL, NULL, NULL);
3866 object_property_add(obj, "model", "int",
3867 x86_cpuid_version_get_model,
3868 x86_cpuid_version_set_model, NULL, NULL, NULL);
3869 object_property_add(obj, "stepping", "int",
3870 x86_cpuid_version_get_stepping,
3871 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3872 object_property_add_str(obj, "vendor",
3873 x86_cpuid_get_vendor,
3874 x86_cpuid_set_vendor, NULL);
3875 object_property_add_str(obj, "model-id",
3876 x86_cpuid_get_model_id,
3877 x86_cpuid_set_model_id, NULL);
3878 object_property_add(obj, "tsc-frequency", "int",
3879 x86_cpuid_get_tsc_freq,
3880 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3881 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3882 x86_cpu_get_feature_words,
3883 NULL, NULL, (void *)env->features, NULL);
3884 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3885 x86_cpu_get_feature_words,
3886 NULL, NULL, (void *)cpu->filtered_features, NULL);
3888 object_property_add(obj, "crash-information", "GuestPanicInformation",
3889 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
3891 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3893 for (w = 0; w < FEATURE_WORDS; w++) {
3896 for (bitnr = 0; bitnr < 32; bitnr++) {
3897 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3901 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
3902 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
3903 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
3904 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
3905 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
3906 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
3907 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
3909 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
3910 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
3911 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
3912 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
3913 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
3914 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
3915 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
3916 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
3917 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
3918 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
3919 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
3920 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
3921 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
3922 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
3923 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
3924 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
3925 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
3926 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
3927 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
3928 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
3929 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
3932 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3936 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3938 X86CPU *cpu = X86_CPU(cs);
3940 return cpu->apic_id;
3943 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3945 X86CPU *cpu = X86_CPU(cs);
3947 return cpu->env.cr[0] & CR0_PG_MASK;
3950 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3952 X86CPU *cpu = X86_CPU(cs);
3954 cpu->env.eip = value;
3957 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3959 X86CPU *cpu = X86_CPU(cs);
3961 cpu->env.eip = tb->pc - tb->cs_base;
3964 static bool x86_cpu_has_work(CPUState *cs)
3966 X86CPU *cpu = X86_CPU(cs);
3967 CPUX86State *env = &cpu->env;
3969 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3970 CPU_INTERRUPT_POLL)) &&
3971 (env->eflags & IF_MASK)) ||
3972 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3973 CPU_INTERRUPT_INIT |
3974 CPU_INTERRUPT_SIPI |
3975 CPU_INTERRUPT_MCE)) ||
3976 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3977 !(env->hflags & HF_SMM_MASK));
3980 static Property x86_cpu_properties[] = {
3981 #ifdef CONFIG_USER_ONLY
3982 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3983 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
3984 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
3985 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
3986 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
3988 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
3989 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
3990 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
3991 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
3993 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
3994 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3995 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3996 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3997 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3998 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3999 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
4000 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
4001 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
4002 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
4003 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
4004 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
4005 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
4006 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
4007 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
4008 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
4009 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
4010 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
4011 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
4012 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
4013 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
4014 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
4015 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
4016 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
4017 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
4018 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
4019 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
4020 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
4021 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
4022 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
4024 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
4025 DEFINE_PROP_END_OF_LIST()
4028 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
4030 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4031 CPUClass *cc = CPU_CLASS(oc);
4032 DeviceClass *dc = DEVICE_CLASS(oc);
4034 xcc->parent_realize = dc->realize;
4035 xcc->parent_unrealize = dc->unrealize;
4036 dc->realize = x86_cpu_realizefn;
4037 dc->unrealize = x86_cpu_unrealizefn;
4038 dc->props = x86_cpu_properties;
4040 xcc->parent_reset = cc->reset;
4041 cc->reset = x86_cpu_reset;
4042 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
4044 cc->class_by_name = x86_cpu_class_by_name;
4045 cc->parse_features = x86_cpu_parse_featurestr;
4046 cc->has_work = x86_cpu_has_work;
4047 cc->do_interrupt = x86_cpu_do_interrupt;
4048 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
4049 cc->dump_state = x86_cpu_dump_state;
4050 cc->get_crash_info = x86_cpu_get_crash_info;
4051 cc->set_pc = x86_cpu_set_pc;
4052 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
4053 cc->gdb_read_register = x86_cpu_gdb_read_register;
4054 cc->gdb_write_register = x86_cpu_gdb_write_register;
4055 cc->get_arch_id = x86_cpu_get_arch_id;
4056 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
4057 #ifdef CONFIG_USER_ONLY
4058 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
4060 cc->asidx_from_attrs = x86_asidx_from_attrs;
4061 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
4062 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
4063 cc->write_elf64_note = x86_cpu_write_elf64_note;
4064 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
4065 cc->write_elf32_note = x86_cpu_write_elf32_note;
4066 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
4067 cc->vmsd = &vmstate_x86_cpu;
4069 cc->gdb_arch_name = x86_gdb_arch_name;
4070 #ifdef TARGET_X86_64
4071 cc->gdb_core_xml_file = "i386-64bit.xml";
4072 cc->gdb_num_core_regs = 57;
4074 cc->gdb_core_xml_file = "i386-32bit.xml";
4075 cc->gdb_num_core_regs = 41;
4077 #ifndef CONFIG_USER_ONLY
4078 cc->debug_excp_handler = breakpoint_handler;
4080 cc->cpu_exec_enter = x86_cpu_exec_enter;
4081 cc->cpu_exec_exit = x86_cpu_exec_exit;
4083 dc->user_creatable = true;
4086 static const TypeInfo x86_cpu_type_info = {
4087 .name = TYPE_X86_CPU,
4089 .instance_size = sizeof(X86CPU),
4090 .instance_init = x86_cpu_initfn,
4092 .class_size = sizeof(X86CPUClass),
4093 .class_init = x86_cpu_common_class_init,
4097 /* "base" CPU model, used by query-cpu-model-expansion */
4098 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
4100 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4102 xcc->static_model = true;
4103 xcc->migration_safe = true;
4104 xcc->model_description = "base CPU model type with no features enabled";
4108 static const TypeInfo x86_base_cpu_type_info = {
4109 .name = X86_CPU_TYPE_NAME("base"),
4110 .parent = TYPE_X86_CPU,
4111 .class_init = x86_cpu_base_class_init,
4114 static void x86_cpu_register_types(void)
4118 type_register_static(&x86_cpu_type_info);
4119 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
4120 x86_register_cpudef_type(&builtin_x86_defs[i]);
4122 type_register_static(&max_x86_cpu_type_info);
4123 type_register_static(&x86_base_cpu_type_info);
4125 type_register_static(&host_x86_cpu_type_info);
4129 type_init(x86_cpu_register_types)