2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
24 #include "exec/exec-all.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/hvf.h"
27 #include "sysemu/cpus.h"
31 #include "qemu/error-report.h"
32 #include "qemu/option.h"
33 #include "qemu/config-file.h"
34 #include "qapi/error.h"
35 #include "qapi/qapi-visit-misc.h"
36 #include "qapi/qapi-visit-run-state.h"
37 #include "qapi/qmp/qdict.h"
38 #include "qapi/qmp/qerror.h"
39 #include "qapi/visitor.h"
40 #include "qom/qom-qobject.h"
41 #include "sysemu/arch_init.h"
43 #if defined(CONFIG_KVM)
44 #include <linux/kvm_para.h>
47 #include "sysemu/sysemu.h"
48 #include "hw/qdev-properties.h"
49 #include "hw/i386/topology.h"
50 #ifndef CONFIG_USER_ONLY
51 #include "exec/address-spaces.h"
53 #include "hw/xen/xen.h"
54 #include "hw/i386/apic_internal.h"
57 #include "disas/capstone.h"
59 /* Helpers for building CPUID[2] descriptors: */
61 struct CPUID2CacheDescriptorInfo {
70 #define MiB (1024 * 1024)
73 * Known CPUID 2 cache descriptors.
74 * From Intel SDM Volume 2A, CPUID instruction
76 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
77 [0x06] = { .level = 1, .type = ICACHE, .size = 8 * KiB,
78 .associativity = 4, .line_size = 32, },
79 [0x08] = { .level = 1, .type = ICACHE, .size = 16 * KiB,
80 .associativity = 4, .line_size = 32, },
81 [0x09] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
82 .associativity = 4, .line_size = 64, },
83 [0x0A] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
84 .associativity = 2, .line_size = 32, },
85 [0x0C] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
86 .associativity = 4, .line_size = 32, },
87 [0x0D] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
88 .associativity = 4, .line_size = 64, },
89 [0x0E] = { .level = 1, .type = DCACHE, .size = 24 * KiB,
90 .associativity = 6, .line_size = 64, },
91 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
92 .associativity = 2, .line_size = 64, },
93 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
94 .associativity = 8, .line_size = 64, },
95 /* lines per sector is not supported cpuid2_cache_descriptor(),
96 * so descriptors 0x22, 0x23 are not included
98 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
99 .associativity = 16, .line_size = 64, },
100 /* lines per sector is not supported cpuid2_cache_descriptor(),
101 * so descriptors 0x25, 0x20 are not included
103 [0x2C] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
104 .associativity = 8, .line_size = 64, },
105 [0x30] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
106 .associativity = 8, .line_size = 64, },
107 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
108 .associativity = 4, .line_size = 32, },
109 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
110 .associativity = 4, .line_size = 32, },
111 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
112 .associativity = 4, .line_size = 32, },
113 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
114 .associativity = 4, .line_size = 32, },
115 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
116 .associativity = 4, .line_size = 32, },
117 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
118 .associativity = 4, .line_size = 64, },
119 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
120 .associativity = 8, .line_size = 64, },
121 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
122 .associativity = 12, .line_size = 64, },
123 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
124 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
125 .associativity = 12, .line_size = 64, },
126 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
127 .associativity = 16, .line_size = 64, },
128 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
129 .associativity = 12, .line_size = 64, },
130 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
131 .associativity = 16, .line_size = 64, },
132 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
133 .associativity = 24, .line_size = 64, },
134 [0x60] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
135 .associativity = 8, .line_size = 64, },
136 [0x66] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
137 .associativity = 4, .line_size = 64, },
138 [0x67] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
139 .associativity = 4, .line_size = 64, },
140 [0x68] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
141 .associativity = 4, .line_size = 64, },
142 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
143 .associativity = 4, .line_size = 64, },
144 /* lines per sector is not supported cpuid2_cache_descriptor(),
145 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
147 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
148 .associativity = 8, .line_size = 64, },
149 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
150 .associativity = 2, .line_size = 64, },
151 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
152 .associativity = 8, .line_size = 64, },
153 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
154 .associativity = 8, .line_size = 32, },
155 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
156 .associativity = 8, .line_size = 32, },
157 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
158 .associativity = 8, .line_size = 32, },
159 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
160 .associativity = 8, .line_size = 32, },
161 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
162 .associativity = 4, .line_size = 64, },
163 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
164 .associativity = 8, .line_size = 64, },
165 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
166 .associativity = 4, .line_size = 64, },
167 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
168 .associativity = 4, .line_size = 64, },
169 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
170 .associativity = 4, .line_size = 64, },
171 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
172 .associativity = 8, .line_size = 64, },
173 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
174 .associativity = 8, .line_size = 64, },
175 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
176 .associativity = 8, .line_size = 64, },
177 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
178 .associativity = 12, .line_size = 64, },
179 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
180 .associativity = 12, .line_size = 64, },
181 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
182 .associativity = 12, .line_size = 64, },
183 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
184 .associativity = 16, .line_size = 64, },
185 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
186 .associativity = 16, .line_size = 64, },
187 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
188 .associativity = 16, .line_size = 64, },
189 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
190 .associativity = 24, .line_size = 64, },
191 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
192 .associativity = 24, .line_size = 64, },
193 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
194 .associativity = 24, .line_size = 64, },
198 * "CPUID leaf 2 does not report cache descriptor information,
199 * use CPUID leaf 4 to query cache parameters"
201 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
204 * Return a CPUID 2 cache descriptor for a given cache.
205 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
207 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
211 assert(cache->size > 0);
212 assert(cache->level > 0);
213 assert(cache->line_size > 0);
214 assert(cache->associativity > 0);
215 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
216 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
217 if (d->level == cache->level && d->type == cache->type &&
218 d->size == cache->size && d->line_size == cache->line_size &&
219 d->associativity == cache->associativity) {
224 return CACHE_DESCRIPTOR_UNAVAILABLE;
227 /* CPUID Leaf 4 constants: */
230 #define CACHE_TYPE_D 1
231 #define CACHE_TYPE_I 2
232 #define CACHE_TYPE_UNIFIED 3
234 #define CACHE_LEVEL(l) (l << 5)
236 #define CACHE_SELF_INIT_LEVEL (1 << 8)
239 #define CACHE_NO_INVD_SHARING (1 << 0)
240 #define CACHE_INCLUSIVE (1 << 1)
241 #define CACHE_COMPLEX_IDX (1 << 2)
243 /* Encode CacheType for CPUID[4].EAX */
244 #define CACHE_TYPE(t) (((t) == DCACHE) ? CACHE_TYPE_D : \
245 ((t) == ICACHE) ? CACHE_TYPE_I : \
246 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
247 0 /* Invalid value */)
250 /* Encode cache info for CPUID[4] */
251 static void encode_cache_cpuid4(CPUCacheInfo *cache,
252 int num_apic_ids, int num_cores,
253 uint32_t *eax, uint32_t *ebx,
254 uint32_t *ecx, uint32_t *edx)
256 assert(cache->size == cache->line_size * cache->associativity *
257 cache->partitions * cache->sets);
259 assert(num_apic_ids > 0);
260 *eax = CACHE_TYPE(cache->type) |
261 CACHE_LEVEL(cache->level) |
262 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
263 ((num_cores - 1) << 26) |
264 ((num_apic_ids - 1) << 14);
266 assert(cache->line_size > 0);
267 assert(cache->partitions > 0);
268 assert(cache->associativity > 0);
269 /* We don't implement fully-associative caches */
270 assert(cache->associativity < cache->sets);
271 *ebx = (cache->line_size - 1) |
272 ((cache->partitions - 1) << 12) |
273 ((cache->associativity - 1) << 22);
275 assert(cache->sets > 0);
276 *ecx = cache->sets - 1;
278 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
279 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
280 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
283 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
284 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
286 assert(cache->size % 1024 == 0);
287 assert(cache->lines_per_tag > 0);
288 assert(cache->associativity > 0);
289 assert(cache->line_size > 0);
290 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
291 (cache->lines_per_tag << 8) | (cache->line_size);
294 #define ASSOC_FULL 0xFF
296 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
297 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
307 a == ASSOC_FULL ? 0xF : \
308 0 /* invalid value */)
311 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
314 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
316 uint32_t *ecx, uint32_t *edx)
318 assert(l2->size % 1024 == 0);
319 assert(l2->associativity > 0);
320 assert(l2->lines_per_tag > 0);
321 assert(l2->line_size > 0);
322 *ecx = ((l2->size / 1024) << 16) |
323 (AMD_ENC_ASSOC(l2->associativity) << 12) |
324 (l2->lines_per_tag << 8) | (l2->line_size);
327 assert(l3->size % (512 * 1024) == 0);
328 assert(l3->associativity > 0);
329 assert(l3->lines_per_tag > 0);
330 assert(l3->line_size > 0);
331 *edx = ((l3->size / (512 * 1024)) << 18) |
332 (AMD_ENC_ASSOC(l3->associativity) << 12) |
333 (l3->lines_per_tag << 8) | (l3->line_size);
340 * Definitions of the hardcoded cache entries we expose:
341 * These are legacy cache values. If there is a need to change any
342 * of these values please use builtin_x86_defs
346 static CPUCacheInfo legacy_l1d_cache = {
355 .no_invd_sharing = true,
358 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
359 static CPUCacheInfo legacy_l1d_cache_amd = {
369 .no_invd_sharing = true,
372 /* L1 instruction cache: */
373 static CPUCacheInfo legacy_l1i_cache = {
382 .no_invd_sharing = true,
385 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
386 static CPUCacheInfo legacy_l1i_cache_amd = {
396 .no_invd_sharing = true,
399 /* Level 2 unified cache: */
400 static CPUCacheInfo legacy_l2_cache = {
401 .type = UNIFIED_CACHE,
409 .no_invd_sharing = true,
412 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
413 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
414 .type = UNIFIED_CACHE,
422 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
423 static CPUCacheInfo legacy_l2_cache_amd = {
424 .type = UNIFIED_CACHE,
434 /* Level 3 unified cache: */
435 static CPUCacheInfo legacy_l3_cache = {
436 .type = UNIFIED_CACHE,
446 .complex_indexing = true,
449 /* TLB definitions: */
451 #define L1_DTLB_2M_ASSOC 1
452 #define L1_DTLB_2M_ENTRIES 255
453 #define L1_DTLB_4K_ASSOC 1
454 #define L1_DTLB_4K_ENTRIES 255
456 #define L1_ITLB_2M_ASSOC 1
457 #define L1_ITLB_2M_ENTRIES 255
458 #define L1_ITLB_4K_ASSOC 1
459 #define L1_ITLB_4K_ENTRIES 255
461 #define L2_DTLB_2M_ASSOC 0 /* disabled */
462 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
463 #define L2_DTLB_4K_ASSOC 4
464 #define L2_DTLB_4K_ENTRIES 512
466 #define L2_ITLB_2M_ASSOC 0 /* disabled */
467 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
468 #define L2_ITLB_4K_ASSOC 4
469 #define L2_ITLB_4K_ENTRIES 512
471 /* CPUID Leaf 0x14 constants: */
472 #define INTEL_PT_MAX_SUBLEAF 0x1
474 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
475 * MSR can be accessed;
476 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
477 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
478 * of Intel PT MSRs across warm reset;
479 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
481 #define INTEL_PT_MINIMAL_EBX 0xf
483 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
484 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
486 * bit[01]: ToPA tables can hold any number of output entries, up to the
487 * maximum allowed by the MaskOrTableOffset field of
488 * IA32_RTIT_OUTPUT_MASK_PTRS;
489 * bit[02]: Support Single-Range Output scheme;
491 #define INTEL_PT_MINIMAL_ECX 0x7
492 /* generated packets which contain IP payloads have LIP values */
493 #define INTEL_PT_IP_LIP (1 << 31)
494 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
495 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
496 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
497 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
498 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
500 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
501 uint32_t vendor2, uint32_t vendor3)
504 for (i = 0; i < 4; i++) {
505 dst[i] = vendor1 >> (8 * i);
506 dst[i + 4] = vendor2 >> (8 * i);
507 dst[i + 8] = vendor3 >> (8 * i);
509 dst[CPUID_VENDOR_SZ] = '\0';
512 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
513 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
514 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
515 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
516 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
517 CPUID_PSE36 | CPUID_FXSR)
518 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
519 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
520 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
521 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
522 CPUID_PAE | CPUID_SEP | CPUID_APIC)
524 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
525 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
526 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
527 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
528 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
529 /* partly implemented:
530 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
532 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
533 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
534 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
535 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
536 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
537 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
539 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
540 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
541 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
542 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
543 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
546 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
548 #define TCG_EXT2_X86_64_FEATURES 0
551 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
552 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
553 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
554 TCG_EXT2_X86_64_FEATURES)
555 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
556 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
557 #define TCG_EXT4_FEATURES 0
558 #define TCG_SVM_FEATURES 0
559 #define TCG_KVM_FEATURES 0
560 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
561 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
562 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
563 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
566 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
567 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
568 CPUID_7_0_EBX_RDSEED */
569 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
571 #define TCG_7_0_EDX_FEATURES 0
572 #define TCG_APM_FEATURES 0
573 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
574 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
576 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
578 typedef struct FeatureWordInfo {
579 /* feature flags names are taken from "Intel Processor Identification and
580 * the CPUID Instruction" and AMD's "CPUID Specification".
581 * In cases of disagreement between feature naming conventions,
582 * aliases may be added.
584 const char *feat_names[32];
585 uint32_t cpuid_eax; /* Input EAX for CPUID */
586 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
587 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
588 int cpuid_reg; /* output register (R_* constant) */
589 uint32_t tcg_features; /* Feature flags supported by TCG */
590 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
591 uint32_t migratable_flags; /* Feature flags known to be migratable */
592 /* Features that shouldn't be auto-enabled by "-cpu host" */
593 uint32_t no_autoenable_flags;
596 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
599 "fpu", "vme", "de", "pse",
600 "tsc", "msr", "pae", "mce",
601 "cx8", "apic", NULL, "sep",
602 "mtrr", "pge", "mca", "cmov",
603 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
604 NULL, "ds" /* Intel dts */, "acpi", "mmx",
605 "fxsr", "sse", "sse2", "ss",
606 "ht" /* Intel htt */, "tm", "ia64", "pbe",
608 .cpuid_eax = 1, .cpuid_reg = R_EDX,
609 .tcg_features = TCG_FEATURES,
613 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
614 "ds-cpl", "vmx", "smx", "est",
615 "tm2", "ssse3", "cid", NULL,
616 "fma", "cx16", "xtpr", "pdcm",
617 NULL, "pcid", "dca", "sse4.1",
618 "sse4.2", "x2apic", "movbe", "popcnt",
619 "tsc-deadline", "aes", "xsave", "osxsave",
620 "avx", "f16c", "rdrand", "hypervisor",
622 .cpuid_eax = 1, .cpuid_reg = R_ECX,
623 .tcg_features = TCG_EXT_FEATURES,
625 /* Feature names that are already defined on feature_name[] but
626 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
627 * names on feat_names below. They are copied automatically
628 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
630 [FEAT_8000_0001_EDX] = {
632 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
633 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
634 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
635 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
636 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
637 "nx", NULL, "mmxext", NULL /* mmx */,
638 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
639 NULL, "lm", "3dnowext", "3dnow",
641 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
642 .tcg_features = TCG_EXT2_FEATURES,
644 [FEAT_8000_0001_ECX] = {
646 "lahf-lm", "cmp-legacy", "svm", "extapic",
647 "cr8legacy", "abm", "sse4a", "misalignsse",
648 "3dnowprefetch", "osvw", "ibs", "xop",
649 "skinit", "wdt", NULL, "lwp",
650 "fma4", "tce", NULL, "nodeid-msr",
651 NULL, "tbm", "topoext", "perfctr-core",
652 "perfctr-nb", NULL, NULL, NULL,
653 NULL, NULL, NULL, NULL,
655 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
656 .tcg_features = TCG_EXT3_FEATURES,
658 [FEAT_C000_0001_EDX] = {
660 NULL, NULL, "xstore", "xstore-en",
661 NULL, NULL, "xcrypt", "xcrypt-en",
662 "ace2", "ace2-en", "phe", "phe-en",
663 "pmm", "pmm-en", NULL, NULL,
664 NULL, NULL, NULL, NULL,
665 NULL, NULL, NULL, NULL,
666 NULL, NULL, NULL, NULL,
667 NULL, NULL, NULL, NULL,
669 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
670 .tcg_features = TCG_EXT4_FEATURES,
674 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
675 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
676 NULL, "kvm-pv-tlb-flush", NULL, NULL,
677 NULL, NULL, NULL, NULL,
678 NULL, NULL, NULL, NULL,
679 NULL, NULL, NULL, NULL,
680 "kvmclock-stable-bit", NULL, NULL, NULL,
681 NULL, NULL, NULL, NULL,
683 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
684 .tcg_features = TCG_KVM_FEATURES,
688 "kvm-hint-dedicated", NULL, NULL, NULL,
689 NULL, NULL, NULL, NULL,
690 NULL, NULL, NULL, NULL,
691 NULL, NULL, NULL, NULL,
692 NULL, NULL, NULL, NULL,
693 NULL, NULL, NULL, NULL,
694 NULL, NULL, NULL, NULL,
695 NULL, NULL, NULL, NULL,
697 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EDX,
698 .tcg_features = TCG_KVM_FEATURES,
700 * KVM hints aren't auto-enabled by -cpu host, they need to be
701 * explicitly enabled in the command-line.
703 .no_autoenable_flags = ~0U,
705 [FEAT_HYPERV_EAX] = {
707 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
708 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
709 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
710 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
711 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
712 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
713 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
715 NULL, NULL, NULL, NULL,
716 NULL, NULL, NULL, NULL,
717 NULL, NULL, NULL, NULL,
718 NULL, NULL, NULL, NULL,
720 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
722 [FEAT_HYPERV_EBX] = {
724 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
725 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
726 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
727 NULL /* hv_create_port */, NULL /* hv_connect_port */,
728 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
729 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
731 NULL, NULL, NULL, NULL,
732 NULL, NULL, NULL, NULL,
733 NULL, NULL, NULL, NULL,
734 NULL, NULL, NULL, NULL,
736 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
738 [FEAT_HYPERV_EDX] = {
740 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
741 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
742 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
744 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
745 NULL, NULL, NULL, NULL,
746 NULL, NULL, NULL, NULL,
747 NULL, NULL, NULL, NULL,
748 NULL, NULL, NULL, NULL,
749 NULL, NULL, NULL, NULL,
751 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
755 "npt", "lbrv", "svm-lock", "nrip-save",
756 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
757 NULL, NULL, "pause-filter", NULL,
758 "pfthreshold", NULL, NULL, NULL,
759 NULL, NULL, NULL, NULL,
760 NULL, NULL, NULL, NULL,
761 NULL, NULL, NULL, NULL,
762 NULL, NULL, NULL, NULL,
764 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
765 .tcg_features = TCG_SVM_FEATURES,
769 "fsgsbase", "tsc-adjust", NULL, "bmi1",
770 "hle", "avx2", NULL, "smep",
771 "bmi2", "erms", "invpcid", "rtm",
772 NULL, NULL, "mpx", NULL,
773 "avx512f", "avx512dq", "rdseed", "adx",
774 "smap", "avx512ifma", "pcommit", "clflushopt",
775 "clwb", "intel-pt", "avx512pf", "avx512er",
776 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
779 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
781 .tcg_features = TCG_7_0_EBX_FEATURES,
785 NULL, "avx512vbmi", "umip", "pku",
786 "ospke", NULL, "avx512vbmi2", NULL,
787 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
788 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
789 "la57", NULL, NULL, NULL,
790 NULL, NULL, "rdpid", NULL,
791 NULL, "cldemote", NULL, NULL,
792 NULL, NULL, NULL, NULL,
795 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
797 .tcg_features = TCG_7_0_ECX_FEATURES,
801 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
802 NULL, NULL, NULL, NULL,
803 NULL, NULL, NULL, NULL,
804 NULL, NULL, NULL, NULL,
805 NULL, NULL, NULL, NULL,
806 NULL, NULL, NULL, NULL,
807 NULL, NULL, "spec-ctrl", NULL,
808 NULL, NULL, NULL, NULL,
811 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
813 .tcg_features = TCG_7_0_EDX_FEATURES,
815 [FEAT_8000_0007_EDX] = {
817 NULL, NULL, NULL, NULL,
818 NULL, NULL, NULL, NULL,
819 "invtsc", NULL, NULL, NULL,
820 NULL, NULL, NULL, NULL,
821 NULL, NULL, NULL, NULL,
822 NULL, NULL, NULL, NULL,
823 NULL, NULL, NULL, NULL,
824 NULL, NULL, NULL, NULL,
826 .cpuid_eax = 0x80000007,
828 .tcg_features = TCG_APM_FEATURES,
829 .unmigratable_flags = CPUID_APM_INVTSC,
831 [FEAT_8000_0008_EBX] = {
833 NULL, NULL, NULL, NULL,
834 NULL, NULL, NULL, NULL,
835 NULL, NULL, NULL, NULL,
836 "ibpb", NULL, NULL, NULL,
837 NULL, NULL, NULL, NULL,
838 NULL, NULL, NULL, NULL,
839 NULL, NULL, NULL, NULL,
840 NULL, NULL, NULL, NULL,
842 .cpuid_eax = 0x80000008,
845 .unmigratable_flags = 0,
849 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
850 NULL, NULL, NULL, NULL,
851 NULL, NULL, NULL, NULL,
852 NULL, NULL, NULL, NULL,
853 NULL, NULL, NULL, NULL,
854 NULL, NULL, NULL, NULL,
855 NULL, NULL, NULL, NULL,
856 NULL, NULL, NULL, NULL,
859 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
861 .tcg_features = TCG_XSAVE_FEATURES,
865 NULL, NULL, "arat", NULL,
866 NULL, NULL, NULL, NULL,
867 NULL, NULL, NULL, NULL,
868 NULL, NULL, NULL, NULL,
869 NULL, NULL, NULL, NULL,
870 NULL, NULL, NULL, NULL,
871 NULL, NULL, NULL, NULL,
872 NULL, NULL, NULL, NULL,
874 .cpuid_eax = 6, .cpuid_reg = R_EAX,
875 .tcg_features = TCG_6_EAX_FEATURES,
877 [FEAT_XSAVE_COMP_LO] = {
879 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
882 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
883 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
884 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
887 [FEAT_XSAVE_COMP_HI] = {
889 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
895 typedef struct X86RegisterInfo32 {
896 /* Name of register */
898 /* QAPI enum value register */
899 X86CPURegister32 qapi_enum;
902 #define REGISTER(reg) \
903 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
904 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
916 typedef struct ExtSaveArea {
917 uint32_t feature, bits;
918 uint32_t offset, size;
921 static const ExtSaveArea x86_ext_save_areas[] = {
923 /* x87 FP state component is always enabled if XSAVE is supported */
924 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
925 /* x87 state is in the legacy region of the XSAVE area */
927 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
930 /* SSE state component is always enabled if XSAVE is supported */
931 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
932 /* SSE state is in the legacy region of the XSAVE area */
934 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
937 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
938 .offset = offsetof(X86XSaveArea, avx_state),
939 .size = sizeof(XSaveAVX) },
940 [XSTATE_BNDREGS_BIT] =
941 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
942 .offset = offsetof(X86XSaveArea, bndreg_state),
943 .size = sizeof(XSaveBNDREG) },
944 [XSTATE_BNDCSR_BIT] =
945 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
946 .offset = offsetof(X86XSaveArea, bndcsr_state),
947 .size = sizeof(XSaveBNDCSR) },
948 [XSTATE_OPMASK_BIT] =
949 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
950 .offset = offsetof(X86XSaveArea, opmask_state),
951 .size = sizeof(XSaveOpmask) },
952 [XSTATE_ZMM_Hi256_BIT] =
953 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
954 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
955 .size = sizeof(XSaveZMM_Hi256) },
956 [XSTATE_Hi16_ZMM_BIT] =
957 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
958 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
959 .size = sizeof(XSaveHi16_ZMM) },
961 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
962 .offset = offsetof(X86XSaveArea, pkru_state),
963 .size = sizeof(XSavePKRU) },
966 static uint32_t xsave_area_size(uint64_t mask)
971 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
972 const ExtSaveArea *esa = &x86_ext_save_areas[i];
973 if ((mask >> i) & 1) {
974 ret = MAX(ret, esa->offset + esa->size);
980 static inline bool accel_uses_host_cpuid(void)
982 return kvm_enabled() || hvf_enabled();
985 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
987 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
988 cpu->env.features[FEAT_XSAVE_COMP_LO];
991 const char *get_register_name_32(unsigned int reg)
993 if (reg >= CPU_NB_REGS32) {
996 return x86_reg_info_32[reg].name;
1000 * Returns the set of feature flags that are supported and migratable by
1001 * QEMU, for a given FeatureWord.
1003 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
1005 FeatureWordInfo *wi = &feature_word_info[w];
1009 for (i = 0; i < 32; i++) {
1010 uint32_t f = 1U << i;
1012 /* If the feature name is known, it is implicitly considered migratable,
1013 * unless it is explicitly set in unmigratable_flags */
1014 if ((wi->migratable_flags & f) ||
1015 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1022 void host_cpuid(uint32_t function, uint32_t count,
1023 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1028 asm volatile("cpuid"
1029 : "=a"(vec[0]), "=b"(vec[1]),
1030 "=c"(vec[2]), "=d"(vec[3])
1031 : "0"(function), "c"(count) : "cc");
1032 #elif defined(__i386__)
1033 asm volatile("pusha \n\t"
1035 "mov %%eax, 0(%2) \n\t"
1036 "mov %%ebx, 4(%2) \n\t"
1037 "mov %%ecx, 8(%2) \n\t"
1038 "mov %%edx, 12(%2) \n\t"
1040 : : "a"(function), "c"(count), "S"(vec)
1056 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1058 uint32_t eax, ebx, ecx, edx;
1060 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1061 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1063 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1065 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1068 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1071 *stepping = eax & 0x0F;
1075 /* CPU class name definitions: */
1077 /* Return type name for a given CPU model name
1078 * Caller is responsible for freeing the returned string.
1080 static char *x86_cpu_type_name(const char *model_name)
1082 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1085 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1088 char *typename = x86_cpu_type_name(cpu_model);
1089 oc = object_class_by_name(typename);
1094 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1096 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1097 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1098 return g_strndup(class_name,
1099 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1102 struct X86CPUDefinition {
1106 /* vendor is zero-terminated, 12 character ASCII string */
1107 char vendor[CPUID_VENDOR_SZ + 1];
1111 FeatureWordArray features;
1112 const char *model_id;
1113 CPUCaches *cache_info;
1116 static CPUCaches epyc_cache_info = {
1127 .no_invd_sharing = true,
1139 .no_invd_sharing = true,
1142 .type = UNIFIED_CACHE,
1152 .type = UNIFIED_CACHE,
1156 .associativity = 16,
1162 .complex_indexing = true,
1166 static X86CPUDefinition builtin_x86_defs[] = {
1170 .vendor = CPUID_VENDOR_AMD,
1174 .features[FEAT_1_EDX] =
1176 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1178 .features[FEAT_1_ECX] =
1179 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1180 .features[FEAT_8000_0001_EDX] =
1181 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1182 .features[FEAT_8000_0001_ECX] =
1183 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1184 .xlevel = 0x8000000A,
1185 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1190 .vendor = CPUID_VENDOR_AMD,
1194 /* Missing: CPUID_HT */
1195 .features[FEAT_1_EDX] =
1197 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1198 CPUID_PSE36 | CPUID_VME,
1199 .features[FEAT_1_ECX] =
1200 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1202 .features[FEAT_8000_0001_EDX] =
1203 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1204 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1205 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1206 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1208 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1209 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1210 .features[FEAT_8000_0001_ECX] =
1211 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1212 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1213 /* Missing: CPUID_SVM_LBRV */
1214 .features[FEAT_SVM] =
1216 .xlevel = 0x8000001A,
1217 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1222 .vendor = CPUID_VENDOR_INTEL,
1226 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1227 .features[FEAT_1_EDX] =
1229 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1230 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1231 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1232 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1233 .features[FEAT_1_ECX] =
1234 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1236 .features[FEAT_8000_0001_EDX] =
1237 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1238 .features[FEAT_8000_0001_ECX] =
1240 .xlevel = 0x80000008,
1241 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1246 .vendor = CPUID_VENDOR_INTEL,
1250 /* Missing: CPUID_HT */
1251 .features[FEAT_1_EDX] =
1252 PPRO_FEATURES | CPUID_VME |
1253 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1255 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1256 .features[FEAT_1_ECX] =
1257 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1258 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1259 .features[FEAT_8000_0001_EDX] =
1260 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1261 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1262 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1263 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1264 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1265 .features[FEAT_8000_0001_ECX] =
1267 .xlevel = 0x80000008,
1268 .model_id = "Common KVM processor"
1273 .vendor = CPUID_VENDOR_INTEL,
1277 .features[FEAT_1_EDX] =
1279 .features[FEAT_1_ECX] =
1281 .xlevel = 0x80000004,
1282 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1287 .vendor = CPUID_VENDOR_INTEL,
1291 .features[FEAT_1_EDX] =
1292 PPRO_FEATURES | CPUID_VME |
1293 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1294 .features[FEAT_1_ECX] =
1296 .features[FEAT_8000_0001_ECX] =
1298 .xlevel = 0x80000008,
1299 .model_id = "Common 32-bit KVM processor"
1304 .vendor = CPUID_VENDOR_INTEL,
1308 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1309 .features[FEAT_1_EDX] =
1310 PPRO_FEATURES | CPUID_VME |
1311 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1313 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1314 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1315 .features[FEAT_1_ECX] =
1316 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1317 .features[FEAT_8000_0001_EDX] =
1319 .xlevel = 0x80000008,
1320 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1325 .vendor = CPUID_VENDOR_INTEL,
1329 .features[FEAT_1_EDX] =
1337 .vendor = CPUID_VENDOR_INTEL,
1341 .features[FEAT_1_EDX] =
1349 .vendor = CPUID_VENDOR_INTEL,
1353 .features[FEAT_1_EDX] =
1361 .vendor = CPUID_VENDOR_INTEL,
1365 .features[FEAT_1_EDX] =
1373 .vendor = CPUID_VENDOR_AMD,
1377 .features[FEAT_1_EDX] =
1378 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1380 .features[FEAT_8000_0001_EDX] =
1381 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1382 .xlevel = 0x80000008,
1383 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1388 .vendor = CPUID_VENDOR_INTEL,
1392 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1393 .features[FEAT_1_EDX] =
1395 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1396 CPUID_ACPI | CPUID_SS,
1397 /* Some CPUs got no CPUID_SEP */
1398 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1400 .features[FEAT_1_ECX] =
1401 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1403 .features[FEAT_8000_0001_EDX] =
1405 .features[FEAT_8000_0001_ECX] =
1407 .xlevel = 0x80000008,
1408 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1413 .vendor = CPUID_VENDOR_INTEL,
1417 .features[FEAT_1_EDX] =
1418 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1419 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1420 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1421 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1422 CPUID_DE | CPUID_FP87,
1423 .features[FEAT_1_ECX] =
1424 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1425 .features[FEAT_8000_0001_EDX] =
1426 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1427 .features[FEAT_8000_0001_ECX] =
1429 .xlevel = 0x80000008,
1430 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1435 .vendor = CPUID_VENDOR_INTEL,
1439 .features[FEAT_1_EDX] =
1440 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1441 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1442 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1443 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1444 CPUID_DE | CPUID_FP87,
1445 .features[FEAT_1_ECX] =
1446 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1448 .features[FEAT_8000_0001_EDX] =
1449 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1450 .features[FEAT_8000_0001_ECX] =
1452 .xlevel = 0x80000008,
1453 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1458 .vendor = CPUID_VENDOR_INTEL,
1462 .features[FEAT_1_EDX] =
1463 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1464 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1465 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1466 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1467 CPUID_DE | CPUID_FP87,
1468 .features[FEAT_1_ECX] =
1469 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1470 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1471 .features[FEAT_8000_0001_EDX] =
1472 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1473 .features[FEAT_8000_0001_ECX] =
1475 .xlevel = 0x80000008,
1476 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1479 .name = "Nehalem-IBRS",
1481 .vendor = CPUID_VENDOR_INTEL,
1485 .features[FEAT_1_EDX] =
1486 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1487 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1488 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1489 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1490 CPUID_DE | CPUID_FP87,
1491 .features[FEAT_1_ECX] =
1492 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1493 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1494 .features[FEAT_7_0_EDX] =
1495 CPUID_7_0_EDX_SPEC_CTRL,
1496 .features[FEAT_8000_0001_EDX] =
1497 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1498 .features[FEAT_8000_0001_ECX] =
1500 .xlevel = 0x80000008,
1501 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1506 .vendor = CPUID_VENDOR_INTEL,
1510 .features[FEAT_1_EDX] =
1511 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1512 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1513 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1514 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1515 CPUID_DE | CPUID_FP87,
1516 .features[FEAT_1_ECX] =
1517 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1518 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1519 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1520 .features[FEAT_8000_0001_EDX] =
1521 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1522 .features[FEAT_8000_0001_ECX] =
1524 .features[FEAT_6_EAX] =
1526 .xlevel = 0x80000008,
1527 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1530 .name = "Westmere-IBRS",
1532 .vendor = CPUID_VENDOR_INTEL,
1536 .features[FEAT_1_EDX] =
1537 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1538 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1539 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1540 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1541 CPUID_DE | CPUID_FP87,
1542 .features[FEAT_1_ECX] =
1543 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1544 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1545 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1546 .features[FEAT_8000_0001_EDX] =
1547 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1548 .features[FEAT_8000_0001_ECX] =
1550 .features[FEAT_7_0_EDX] =
1551 CPUID_7_0_EDX_SPEC_CTRL,
1552 .features[FEAT_6_EAX] =
1554 .xlevel = 0x80000008,
1555 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1558 .name = "SandyBridge",
1560 .vendor = CPUID_VENDOR_INTEL,
1564 .features[FEAT_1_EDX] =
1565 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1566 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1567 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1568 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1569 CPUID_DE | CPUID_FP87,
1570 .features[FEAT_1_ECX] =
1571 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1572 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1573 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1574 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1576 .features[FEAT_8000_0001_EDX] =
1577 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1579 .features[FEAT_8000_0001_ECX] =
1581 .features[FEAT_XSAVE] =
1582 CPUID_XSAVE_XSAVEOPT,
1583 .features[FEAT_6_EAX] =
1585 .xlevel = 0x80000008,
1586 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1589 .name = "SandyBridge-IBRS",
1591 .vendor = CPUID_VENDOR_INTEL,
1595 .features[FEAT_1_EDX] =
1596 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1597 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1598 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1599 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1600 CPUID_DE | CPUID_FP87,
1601 .features[FEAT_1_ECX] =
1602 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1603 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1604 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1605 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1607 .features[FEAT_8000_0001_EDX] =
1608 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1610 .features[FEAT_8000_0001_ECX] =
1612 .features[FEAT_7_0_EDX] =
1613 CPUID_7_0_EDX_SPEC_CTRL,
1614 .features[FEAT_XSAVE] =
1615 CPUID_XSAVE_XSAVEOPT,
1616 .features[FEAT_6_EAX] =
1618 .xlevel = 0x80000008,
1619 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1622 .name = "IvyBridge",
1624 .vendor = CPUID_VENDOR_INTEL,
1628 .features[FEAT_1_EDX] =
1629 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1630 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1631 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1632 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1633 CPUID_DE | CPUID_FP87,
1634 .features[FEAT_1_ECX] =
1635 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1636 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1637 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1638 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1639 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1640 .features[FEAT_7_0_EBX] =
1641 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1643 .features[FEAT_8000_0001_EDX] =
1644 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1646 .features[FEAT_8000_0001_ECX] =
1648 .features[FEAT_XSAVE] =
1649 CPUID_XSAVE_XSAVEOPT,
1650 .features[FEAT_6_EAX] =
1652 .xlevel = 0x80000008,
1653 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1656 .name = "IvyBridge-IBRS",
1658 .vendor = CPUID_VENDOR_INTEL,
1662 .features[FEAT_1_EDX] =
1663 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1664 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1665 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1666 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1667 CPUID_DE | CPUID_FP87,
1668 .features[FEAT_1_ECX] =
1669 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1670 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1671 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1672 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1673 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1674 .features[FEAT_7_0_EBX] =
1675 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1677 .features[FEAT_8000_0001_EDX] =
1678 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1680 .features[FEAT_8000_0001_ECX] =
1682 .features[FEAT_7_0_EDX] =
1683 CPUID_7_0_EDX_SPEC_CTRL,
1684 .features[FEAT_XSAVE] =
1685 CPUID_XSAVE_XSAVEOPT,
1686 .features[FEAT_6_EAX] =
1688 .xlevel = 0x80000008,
1689 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1692 .name = "Haswell-noTSX",
1694 .vendor = CPUID_VENDOR_INTEL,
1698 .features[FEAT_1_EDX] =
1699 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1700 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1701 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1702 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1703 CPUID_DE | CPUID_FP87,
1704 .features[FEAT_1_ECX] =
1705 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1706 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1707 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1708 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1709 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1710 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1711 .features[FEAT_8000_0001_EDX] =
1712 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1714 .features[FEAT_8000_0001_ECX] =
1715 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1716 .features[FEAT_7_0_EBX] =
1717 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1718 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1719 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1720 .features[FEAT_XSAVE] =
1721 CPUID_XSAVE_XSAVEOPT,
1722 .features[FEAT_6_EAX] =
1724 .xlevel = 0x80000008,
1725 .model_id = "Intel Core Processor (Haswell, no TSX)",
1728 .name = "Haswell-noTSX-IBRS",
1730 .vendor = CPUID_VENDOR_INTEL,
1734 .features[FEAT_1_EDX] =
1735 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1736 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1737 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1738 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1739 CPUID_DE | CPUID_FP87,
1740 .features[FEAT_1_ECX] =
1741 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1742 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1743 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1744 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1745 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1746 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1747 .features[FEAT_8000_0001_EDX] =
1748 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1750 .features[FEAT_8000_0001_ECX] =
1751 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1752 .features[FEAT_7_0_EDX] =
1753 CPUID_7_0_EDX_SPEC_CTRL,
1754 .features[FEAT_7_0_EBX] =
1755 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1756 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1757 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1758 .features[FEAT_XSAVE] =
1759 CPUID_XSAVE_XSAVEOPT,
1760 .features[FEAT_6_EAX] =
1762 .xlevel = 0x80000008,
1763 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
1768 .vendor = CPUID_VENDOR_INTEL,
1772 .features[FEAT_1_EDX] =
1773 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1774 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1775 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1776 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1777 CPUID_DE | CPUID_FP87,
1778 .features[FEAT_1_ECX] =
1779 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1780 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1781 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1782 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1783 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1784 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1785 .features[FEAT_8000_0001_EDX] =
1786 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1788 .features[FEAT_8000_0001_ECX] =
1789 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1790 .features[FEAT_7_0_EBX] =
1791 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1792 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1793 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1795 .features[FEAT_XSAVE] =
1796 CPUID_XSAVE_XSAVEOPT,
1797 .features[FEAT_6_EAX] =
1799 .xlevel = 0x80000008,
1800 .model_id = "Intel Core Processor (Haswell)",
1803 .name = "Haswell-IBRS",
1805 .vendor = CPUID_VENDOR_INTEL,
1809 .features[FEAT_1_EDX] =
1810 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1811 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1812 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1813 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1814 CPUID_DE | CPUID_FP87,
1815 .features[FEAT_1_ECX] =
1816 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1817 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1818 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1819 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1820 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1821 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1822 .features[FEAT_8000_0001_EDX] =
1823 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1825 .features[FEAT_8000_0001_ECX] =
1826 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1827 .features[FEAT_7_0_EDX] =
1828 CPUID_7_0_EDX_SPEC_CTRL,
1829 .features[FEAT_7_0_EBX] =
1830 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1831 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1832 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1834 .features[FEAT_XSAVE] =
1835 CPUID_XSAVE_XSAVEOPT,
1836 .features[FEAT_6_EAX] =
1838 .xlevel = 0x80000008,
1839 .model_id = "Intel Core Processor (Haswell, IBRS)",
1842 .name = "Broadwell-noTSX",
1844 .vendor = CPUID_VENDOR_INTEL,
1848 .features[FEAT_1_EDX] =
1849 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1850 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1851 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1852 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1853 CPUID_DE | CPUID_FP87,
1854 .features[FEAT_1_ECX] =
1855 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1856 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1857 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1858 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1859 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1860 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1861 .features[FEAT_8000_0001_EDX] =
1862 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1864 .features[FEAT_8000_0001_ECX] =
1865 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1866 .features[FEAT_7_0_EBX] =
1867 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1868 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1869 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1870 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1872 .features[FEAT_XSAVE] =
1873 CPUID_XSAVE_XSAVEOPT,
1874 .features[FEAT_6_EAX] =
1876 .xlevel = 0x80000008,
1877 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1880 .name = "Broadwell-noTSX-IBRS",
1882 .vendor = CPUID_VENDOR_INTEL,
1886 .features[FEAT_1_EDX] =
1887 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1888 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1889 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1890 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1891 CPUID_DE | CPUID_FP87,
1892 .features[FEAT_1_ECX] =
1893 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1894 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1895 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1896 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1897 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1898 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1899 .features[FEAT_8000_0001_EDX] =
1900 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1902 .features[FEAT_8000_0001_ECX] =
1903 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1904 .features[FEAT_7_0_EDX] =
1905 CPUID_7_0_EDX_SPEC_CTRL,
1906 .features[FEAT_7_0_EBX] =
1907 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1908 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1909 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1910 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1912 .features[FEAT_XSAVE] =
1913 CPUID_XSAVE_XSAVEOPT,
1914 .features[FEAT_6_EAX] =
1916 .xlevel = 0x80000008,
1917 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
1920 .name = "Broadwell",
1922 .vendor = CPUID_VENDOR_INTEL,
1926 .features[FEAT_1_EDX] =
1927 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1928 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1929 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1930 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1931 CPUID_DE | CPUID_FP87,
1932 .features[FEAT_1_ECX] =
1933 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1934 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1935 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1936 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1937 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1938 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1939 .features[FEAT_8000_0001_EDX] =
1940 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1942 .features[FEAT_8000_0001_ECX] =
1943 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1944 .features[FEAT_7_0_EBX] =
1945 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1946 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1947 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1948 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1950 .features[FEAT_XSAVE] =
1951 CPUID_XSAVE_XSAVEOPT,
1952 .features[FEAT_6_EAX] =
1954 .xlevel = 0x80000008,
1955 .model_id = "Intel Core Processor (Broadwell)",
1958 .name = "Broadwell-IBRS",
1960 .vendor = CPUID_VENDOR_INTEL,
1964 .features[FEAT_1_EDX] =
1965 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1966 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1967 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1968 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1969 CPUID_DE | CPUID_FP87,
1970 .features[FEAT_1_ECX] =
1971 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1972 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1973 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1974 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1975 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1976 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1977 .features[FEAT_8000_0001_EDX] =
1978 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1980 .features[FEAT_8000_0001_ECX] =
1981 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1982 .features[FEAT_7_0_EDX] =
1983 CPUID_7_0_EDX_SPEC_CTRL,
1984 .features[FEAT_7_0_EBX] =
1985 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1986 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1987 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1988 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1990 .features[FEAT_XSAVE] =
1991 CPUID_XSAVE_XSAVEOPT,
1992 .features[FEAT_6_EAX] =
1994 .xlevel = 0x80000008,
1995 .model_id = "Intel Core Processor (Broadwell, IBRS)",
1998 .name = "Skylake-Client",
2000 .vendor = CPUID_VENDOR_INTEL,
2004 .features[FEAT_1_EDX] =
2005 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2006 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2007 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2008 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2009 CPUID_DE | CPUID_FP87,
2010 .features[FEAT_1_ECX] =
2011 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2012 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2013 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2014 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2015 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2016 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2017 .features[FEAT_8000_0001_EDX] =
2018 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2020 .features[FEAT_8000_0001_ECX] =
2021 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2022 .features[FEAT_7_0_EBX] =
2023 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2024 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2025 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2026 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2027 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2028 /* Missing: XSAVES (not supported by some Linux versions,
2029 * including v4.1 to v4.12).
2030 * KVM doesn't yet expose any XSAVES state save component,
2031 * and the only one defined in Skylake (processor tracing)
2032 * probably will block migration anyway.
2034 .features[FEAT_XSAVE] =
2035 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2036 CPUID_XSAVE_XGETBV1,
2037 .features[FEAT_6_EAX] =
2039 .xlevel = 0x80000008,
2040 .model_id = "Intel Core Processor (Skylake)",
2043 .name = "Skylake-Client-IBRS",
2045 .vendor = CPUID_VENDOR_INTEL,
2049 .features[FEAT_1_EDX] =
2050 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2051 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2052 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2053 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2054 CPUID_DE | CPUID_FP87,
2055 .features[FEAT_1_ECX] =
2056 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2057 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2058 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2059 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2060 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2061 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2062 .features[FEAT_8000_0001_EDX] =
2063 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2065 .features[FEAT_8000_0001_ECX] =
2066 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2067 .features[FEAT_7_0_EDX] =
2068 CPUID_7_0_EDX_SPEC_CTRL,
2069 .features[FEAT_7_0_EBX] =
2070 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2071 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2072 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2073 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2074 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2075 /* Missing: XSAVES (not supported by some Linux versions,
2076 * including v4.1 to v4.12).
2077 * KVM doesn't yet expose any XSAVES state save component,
2078 * and the only one defined in Skylake (processor tracing)
2079 * probably will block migration anyway.
2081 .features[FEAT_XSAVE] =
2082 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2083 CPUID_XSAVE_XGETBV1,
2084 .features[FEAT_6_EAX] =
2086 .xlevel = 0x80000008,
2087 .model_id = "Intel Core Processor (Skylake, IBRS)",
2090 .name = "Skylake-Server",
2092 .vendor = CPUID_VENDOR_INTEL,
2096 .features[FEAT_1_EDX] =
2097 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2098 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2099 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2100 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2101 CPUID_DE | CPUID_FP87,
2102 .features[FEAT_1_ECX] =
2103 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2104 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2105 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2106 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2107 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2108 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2109 .features[FEAT_8000_0001_EDX] =
2110 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2111 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2112 .features[FEAT_8000_0001_ECX] =
2113 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2114 .features[FEAT_7_0_EBX] =
2115 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2116 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2117 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2118 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2119 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2120 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2121 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2122 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2123 /* Missing: XSAVES (not supported by some Linux versions,
2124 * including v4.1 to v4.12).
2125 * KVM doesn't yet expose any XSAVES state save component,
2126 * and the only one defined in Skylake (processor tracing)
2127 * probably will block migration anyway.
2129 .features[FEAT_XSAVE] =
2130 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2131 CPUID_XSAVE_XGETBV1,
2132 .features[FEAT_6_EAX] =
2134 .xlevel = 0x80000008,
2135 .model_id = "Intel Xeon Processor (Skylake)",
2138 .name = "Skylake-Server-IBRS",
2140 .vendor = CPUID_VENDOR_INTEL,
2144 .features[FEAT_1_EDX] =
2145 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2146 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2147 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2148 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2149 CPUID_DE | CPUID_FP87,
2150 .features[FEAT_1_ECX] =
2151 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2152 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2153 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2154 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2155 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2156 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2157 .features[FEAT_8000_0001_EDX] =
2158 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2159 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2160 .features[FEAT_8000_0001_ECX] =
2161 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2162 .features[FEAT_7_0_EDX] =
2163 CPUID_7_0_EDX_SPEC_CTRL,
2164 .features[FEAT_7_0_EBX] =
2165 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2166 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2167 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2168 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2169 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2170 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2171 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2172 CPUID_7_0_EBX_AVX512VL,
2173 /* Missing: XSAVES (not supported by some Linux versions,
2174 * including v4.1 to v4.12).
2175 * KVM doesn't yet expose any XSAVES state save component,
2176 * and the only one defined in Skylake (processor tracing)
2177 * probably will block migration anyway.
2179 .features[FEAT_XSAVE] =
2180 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2181 CPUID_XSAVE_XGETBV1,
2182 .features[FEAT_6_EAX] =
2184 .xlevel = 0x80000008,
2185 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
2188 .name = "KnightsMill",
2190 .vendor = CPUID_VENDOR_INTEL,
2194 .features[FEAT_1_EDX] =
2195 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2196 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2197 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2198 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2199 CPUID_PSE | CPUID_DE | CPUID_FP87,
2200 .features[FEAT_1_ECX] =
2201 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2202 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2203 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2204 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2205 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2206 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2207 .features[FEAT_8000_0001_EDX] =
2208 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2209 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2210 .features[FEAT_8000_0001_ECX] =
2211 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2212 .features[FEAT_7_0_EBX] =
2213 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2214 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2215 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2216 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2217 CPUID_7_0_EBX_AVX512ER,
2218 .features[FEAT_7_0_ECX] =
2219 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2220 .features[FEAT_7_0_EDX] =
2221 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2222 .features[FEAT_XSAVE] =
2223 CPUID_XSAVE_XSAVEOPT,
2224 .features[FEAT_6_EAX] =
2226 .xlevel = 0x80000008,
2227 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2230 .name = "Opteron_G1",
2232 .vendor = CPUID_VENDOR_AMD,
2236 .features[FEAT_1_EDX] =
2237 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2238 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2239 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2240 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2241 CPUID_DE | CPUID_FP87,
2242 .features[FEAT_1_ECX] =
2244 .features[FEAT_8000_0001_EDX] =
2245 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2246 .xlevel = 0x80000008,
2247 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2250 .name = "Opteron_G2",
2252 .vendor = CPUID_VENDOR_AMD,
2256 .features[FEAT_1_EDX] =
2257 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2258 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2259 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2260 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2261 CPUID_DE | CPUID_FP87,
2262 .features[FEAT_1_ECX] =
2263 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2264 /* Missing: CPUID_EXT2_RDTSCP */
2265 .features[FEAT_8000_0001_EDX] =
2266 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2267 .features[FEAT_8000_0001_ECX] =
2268 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2269 .xlevel = 0x80000008,
2270 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2273 .name = "Opteron_G3",
2275 .vendor = CPUID_VENDOR_AMD,
2279 .features[FEAT_1_EDX] =
2280 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2281 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2282 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2283 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2284 CPUID_DE | CPUID_FP87,
2285 .features[FEAT_1_ECX] =
2286 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2288 /* Missing: CPUID_EXT2_RDTSCP */
2289 .features[FEAT_8000_0001_EDX] =
2290 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2291 .features[FEAT_8000_0001_ECX] =
2292 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2293 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2294 .xlevel = 0x80000008,
2295 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2298 .name = "Opteron_G4",
2300 .vendor = CPUID_VENDOR_AMD,
2304 .features[FEAT_1_EDX] =
2305 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2306 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2307 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2308 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2309 CPUID_DE | CPUID_FP87,
2310 .features[FEAT_1_ECX] =
2311 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2312 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2313 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2315 /* Missing: CPUID_EXT2_RDTSCP */
2316 .features[FEAT_8000_0001_EDX] =
2317 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2319 .features[FEAT_8000_0001_ECX] =
2320 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2321 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2322 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2325 .xlevel = 0x8000001A,
2326 .model_id = "AMD Opteron 62xx class CPU",
2329 .name = "Opteron_G5",
2331 .vendor = CPUID_VENDOR_AMD,
2335 .features[FEAT_1_EDX] =
2336 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2337 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2338 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2339 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2340 CPUID_DE | CPUID_FP87,
2341 .features[FEAT_1_ECX] =
2342 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2343 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2344 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2345 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2346 /* Missing: CPUID_EXT2_RDTSCP */
2347 .features[FEAT_8000_0001_EDX] =
2348 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2350 .features[FEAT_8000_0001_ECX] =
2351 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2352 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2353 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2356 .xlevel = 0x8000001A,
2357 .model_id = "AMD Opteron 63xx class CPU",
2362 .vendor = CPUID_VENDOR_AMD,
2366 .features[FEAT_1_EDX] =
2367 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2368 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2369 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2370 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2371 CPUID_VME | CPUID_FP87,
2372 .features[FEAT_1_ECX] =
2373 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2374 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2375 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2376 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2377 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2378 .features[FEAT_8000_0001_EDX] =
2379 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2380 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2382 .features[FEAT_8000_0001_ECX] =
2383 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2384 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2385 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2386 .features[FEAT_7_0_EBX] =
2387 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2388 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2389 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2390 CPUID_7_0_EBX_SHA_NI,
2391 /* Missing: XSAVES (not supported by some Linux versions,
2392 * including v4.1 to v4.12).
2393 * KVM doesn't yet expose any XSAVES state save component.
2395 .features[FEAT_XSAVE] =
2396 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2397 CPUID_XSAVE_XGETBV1,
2398 .features[FEAT_6_EAX] =
2400 .xlevel = 0x8000000A,
2401 .model_id = "AMD EPYC Processor",
2402 .cache_info = &epyc_cache_info,
2405 .name = "EPYC-IBPB",
2407 .vendor = CPUID_VENDOR_AMD,
2411 .features[FEAT_1_EDX] =
2412 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2413 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2414 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2415 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2416 CPUID_VME | CPUID_FP87,
2417 .features[FEAT_1_ECX] =
2418 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2419 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2420 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2421 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2422 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2423 .features[FEAT_8000_0001_EDX] =
2424 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2425 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2427 .features[FEAT_8000_0001_ECX] =
2428 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2429 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2430 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2431 .features[FEAT_8000_0008_EBX] =
2432 CPUID_8000_0008_EBX_IBPB,
2433 .features[FEAT_7_0_EBX] =
2434 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2435 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2436 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2437 CPUID_7_0_EBX_SHA_NI,
2438 /* Missing: XSAVES (not supported by some Linux versions,
2439 * including v4.1 to v4.12).
2440 * KVM doesn't yet expose any XSAVES state save component.
2442 .features[FEAT_XSAVE] =
2443 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2444 CPUID_XSAVE_XGETBV1,
2445 .features[FEAT_6_EAX] =
2447 .xlevel = 0x8000000A,
2448 .model_id = "AMD EPYC Processor (with IBPB)",
2449 .cache_info = &epyc_cache_info,
2453 typedef struct PropValue {
2454 const char *prop, *value;
2457 /* KVM-specific features that are automatically added/removed
2458 * from all CPU models when KVM is enabled.
2460 static PropValue kvm_default_props[] = {
2461 { "kvmclock", "on" },
2462 { "kvm-nopiodelay", "on" },
2463 { "kvm-asyncpf", "on" },
2464 { "kvm-steal-time", "on" },
2465 { "kvm-pv-eoi", "on" },
2466 { "kvmclock-stable-bit", "on" },
2469 { "monitor", "off" },
2474 /* TCG-specific defaults that override all CPU models when using TCG
2476 static PropValue tcg_default_props[] = {
2482 void x86_cpu_change_kvm_default(const char *prop, const char *value)
2485 for (pv = kvm_default_props; pv->prop; pv++) {
2486 if (!strcmp(pv->prop, prop)) {
2492 /* It is valid to call this function only for properties that
2493 * are already present in the kvm_default_props table.
2498 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2499 bool migratable_only);
2501 static bool lmce_supported(void)
2503 uint64_t mce_cap = 0;
2506 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2511 return !!(mce_cap & MCG_LMCE_P);
2514 #define CPUID_MODEL_ID_SZ 48
2517 * cpu_x86_fill_model_id:
2518 * Get CPUID model ID string from host CPU.
2520 * @str should have at least CPUID_MODEL_ID_SZ bytes
2522 * The function does NOT add a null terminator to the string
2525 static int cpu_x86_fill_model_id(char *str)
2527 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2530 for (i = 0; i < 3; i++) {
2531 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
2532 memcpy(str + i * 16 + 0, &eax, 4);
2533 memcpy(str + i * 16 + 4, &ebx, 4);
2534 memcpy(str + i * 16 + 8, &ecx, 4);
2535 memcpy(str + i * 16 + 12, &edx, 4);
2540 static Property max_x86_cpu_properties[] = {
2541 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
2542 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
2543 DEFINE_PROP_END_OF_LIST()
2546 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
2548 DeviceClass *dc = DEVICE_CLASS(oc);
2549 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2553 xcc->model_description =
2554 "Enables all features supported by the accelerator in the current host";
2556 dc->props = max_x86_cpu_properties;
2559 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
2561 static void max_x86_cpu_initfn(Object *obj)
2563 X86CPU *cpu = X86_CPU(obj);
2564 CPUX86State *env = &cpu->env;
2565 KVMState *s = kvm_state;
2567 /* We can't fill the features array here because we don't know yet if
2568 * "migratable" is true or false.
2570 cpu->max_features = true;
2572 if (accel_uses_host_cpuid()) {
2573 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
2574 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
2575 int family, model, stepping;
2576 X86CPUDefinition host_cpudef = { };
2577 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2579 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
2580 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
2582 host_vendor_fms(vendor, &family, &model, &stepping);
2584 cpu_x86_fill_model_id(model_id);
2586 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
2587 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
2588 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
2589 object_property_set_int(OBJECT(cpu), stepping, "stepping",
2591 object_property_set_str(OBJECT(cpu), model_id, "model-id",
2594 if (kvm_enabled()) {
2595 env->cpuid_min_level =
2596 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
2597 env->cpuid_min_xlevel =
2598 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
2599 env->cpuid_min_xlevel2 =
2600 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
2602 env->cpuid_min_level =
2603 hvf_get_supported_cpuid(0x0, 0, R_EAX);
2604 env->cpuid_min_xlevel =
2605 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
2606 env->cpuid_min_xlevel2 =
2607 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
2610 if (lmce_supported()) {
2611 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
2614 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
2615 "vendor", &error_abort);
2616 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
2617 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
2618 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
2619 object_property_set_str(OBJECT(cpu),
2620 "QEMU TCG CPU version " QEMU_HW_VERSION,
2621 "model-id", &error_abort);
2624 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
2627 static const TypeInfo max_x86_cpu_type_info = {
2628 .name = X86_CPU_TYPE_NAME("max"),
2629 .parent = TYPE_X86_CPU,
2630 .instance_init = max_x86_cpu_initfn,
2631 .class_init = max_x86_cpu_class_init,
2634 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
2635 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
2637 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2639 xcc->host_cpuid_required = true;
2642 if (kvm_enabled()) {
2643 xcc->model_description =
2644 "KVM processor with all supported host features ";
2645 } else if (hvf_enabled()) {
2646 xcc->model_description =
2647 "HVF processor with all supported host features ";
2651 static const TypeInfo host_x86_cpu_type_info = {
2652 .name = X86_CPU_TYPE_NAME("host"),
2653 .parent = X86_CPU_TYPE_NAME("max"),
2654 .class_init = host_x86_cpu_class_init,
2659 static void report_unavailable_features(FeatureWord w, uint32_t mask)
2661 FeatureWordInfo *f = &feature_word_info[w];
2664 for (i = 0; i < 32; ++i) {
2665 if ((1UL << i) & mask) {
2666 const char *reg = get_register_name_32(f->cpuid_reg);
2668 warn_report("%s doesn't support requested feature: "
2669 "CPUID.%02XH:%s%s%s [bit %d]",
2670 accel_uses_host_cpuid() ? "host" : "TCG",
2672 f->feat_names[i] ? "." : "",
2673 f->feat_names[i] ? f->feat_names[i] : "", i);
2678 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
2679 const char *name, void *opaque,
2682 X86CPU *cpu = X86_CPU(obj);
2683 CPUX86State *env = &cpu->env;
2686 value = (env->cpuid_version >> 8) & 0xf;
2688 value += (env->cpuid_version >> 20) & 0xff;
2690 visit_type_int(v, name, &value, errp);
2693 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
2694 const char *name, void *opaque,
2697 X86CPU *cpu = X86_CPU(obj);
2698 CPUX86State *env = &cpu->env;
2699 const int64_t min = 0;
2700 const int64_t max = 0xff + 0xf;
2701 Error *local_err = NULL;
2704 visit_type_int(v, name, &value, &local_err);
2706 error_propagate(errp, local_err);
2709 if (value < min || value > max) {
2710 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2711 name ? name : "null", value, min, max);
2715 env->cpuid_version &= ~0xff00f00;
2717 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
2719 env->cpuid_version |= value << 8;
2723 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
2724 const char *name, void *opaque,
2727 X86CPU *cpu = X86_CPU(obj);
2728 CPUX86State *env = &cpu->env;
2731 value = (env->cpuid_version >> 4) & 0xf;
2732 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
2733 visit_type_int(v, name, &value, errp);
2736 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
2737 const char *name, void *opaque,
2740 X86CPU *cpu = X86_CPU(obj);
2741 CPUX86State *env = &cpu->env;
2742 const int64_t min = 0;
2743 const int64_t max = 0xff;
2744 Error *local_err = NULL;
2747 visit_type_int(v, name, &value, &local_err);
2749 error_propagate(errp, local_err);
2752 if (value < min || value > max) {
2753 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2754 name ? name : "null", value, min, max);
2758 env->cpuid_version &= ~0xf00f0;
2759 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
2762 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
2763 const char *name, void *opaque,
2766 X86CPU *cpu = X86_CPU(obj);
2767 CPUX86State *env = &cpu->env;
2770 value = env->cpuid_version & 0xf;
2771 visit_type_int(v, name, &value, errp);
2774 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
2775 const char *name, void *opaque,
2778 X86CPU *cpu = X86_CPU(obj);
2779 CPUX86State *env = &cpu->env;
2780 const int64_t min = 0;
2781 const int64_t max = 0xf;
2782 Error *local_err = NULL;
2785 visit_type_int(v, name, &value, &local_err);
2787 error_propagate(errp, local_err);
2790 if (value < min || value > max) {
2791 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2792 name ? name : "null", value, min, max);
2796 env->cpuid_version &= ~0xf;
2797 env->cpuid_version |= value & 0xf;
2800 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
2802 X86CPU *cpu = X86_CPU(obj);
2803 CPUX86State *env = &cpu->env;
2806 value = g_malloc(CPUID_VENDOR_SZ + 1);
2807 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
2808 env->cpuid_vendor3);
2812 static void x86_cpuid_set_vendor(Object *obj, const char *value,
2815 X86CPU *cpu = X86_CPU(obj);
2816 CPUX86State *env = &cpu->env;
2819 if (strlen(value) != CPUID_VENDOR_SZ) {
2820 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
2824 env->cpuid_vendor1 = 0;
2825 env->cpuid_vendor2 = 0;
2826 env->cpuid_vendor3 = 0;
2827 for (i = 0; i < 4; i++) {
2828 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
2829 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
2830 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
2834 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
2836 X86CPU *cpu = X86_CPU(obj);
2837 CPUX86State *env = &cpu->env;
2841 value = g_malloc(48 + 1);
2842 for (i = 0; i < 48; i++) {
2843 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
2849 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
2852 X86CPU *cpu = X86_CPU(obj);
2853 CPUX86State *env = &cpu->env;
2856 if (model_id == NULL) {
2859 len = strlen(model_id);
2860 memset(env->cpuid_model, 0, 48);
2861 for (i = 0; i < 48; i++) {
2865 c = (uint8_t)model_id[i];
2867 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
2871 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
2872 void *opaque, Error **errp)
2874 X86CPU *cpu = X86_CPU(obj);
2877 value = cpu->env.tsc_khz * 1000;
2878 visit_type_int(v, name, &value, errp);
2881 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
2882 void *opaque, Error **errp)
2884 X86CPU *cpu = X86_CPU(obj);
2885 const int64_t min = 0;
2886 const int64_t max = INT64_MAX;
2887 Error *local_err = NULL;
2890 visit_type_int(v, name, &value, &local_err);
2892 error_propagate(errp, local_err);
2895 if (value < min || value > max) {
2896 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2897 name ? name : "null", value, min, max);
2901 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
2904 /* Generic getter for "feature-words" and "filtered-features" properties */
2905 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
2906 const char *name, void *opaque,
2909 uint32_t *array = (uint32_t *)opaque;
2911 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
2912 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
2913 X86CPUFeatureWordInfoList *list = NULL;
2915 for (w = 0; w < FEATURE_WORDS; w++) {
2916 FeatureWordInfo *wi = &feature_word_info[w];
2917 X86CPUFeatureWordInfo *qwi = &word_infos[w];
2918 qwi->cpuid_input_eax = wi->cpuid_eax;
2919 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
2920 qwi->cpuid_input_ecx = wi->cpuid_ecx;
2921 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
2922 qwi->features = array[w];
2924 /* List will be in reverse order, but order shouldn't matter */
2925 list_entries[w].next = list;
2926 list_entries[w].value = &word_infos[w];
2927 list = &list_entries[w];
2930 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
2933 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2934 void *opaque, Error **errp)
2936 X86CPU *cpu = X86_CPU(obj);
2937 int64_t value = cpu->hyperv_spinlock_attempts;
2939 visit_type_int(v, name, &value, errp);
2942 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2943 void *opaque, Error **errp)
2945 const int64_t min = 0xFFF;
2946 const int64_t max = UINT_MAX;
2947 X86CPU *cpu = X86_CPU(obj);
2951 visit_type_int(v, name, &value, &err);
2953 error_propagate(errp, err);
2957 if (value < min || value > max) {
2958 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
2959 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
2960 object_get_typename(obj), name ? name : "null",
2964 cpu->hyperv_spinlock_attempts = value;
2967 static const PropertyInfo qdev_prop_spinlocks = {
2969 .get = x86_get_hv_spinlocks,
2970 .set = x86_set_hv_spinlocks,
2973 /* Convert all '_' in a feature string option name to '-', to make feature
2974 * name conform to QOM property naming rule, which uses '-' instead of '_'.
2976 static inline void feat2prop(char *s)
2978 while ((s = strchr(s, '_'))) {
2983 /* Return the feature property name for a feature flag bit */
2984 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
2986 /* XSAVE components are automatically enabled by other features,
2987 * so return the original feature name instead
2989 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
2990 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
2992 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
2993 x86_ext_save_areas[comp].bits) {
2994 w = x86_ext_save_areas[comp].feature;
2995 bitnr = ctz32(x86_ext_save_areas[comp].bits);
3000 assert(w < FEATURE_WORDS);
3001 return feature_word_info[w].feat_names[bitnr];
3004 /* Compatibily hack to maintain legacy +-feat semantic,
3005 * where +-feat overwrites any feature set by
3006 * feat=on|feat even if the later is parsed after +-feat
3007 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3009 static GList *plus_features, *minus_features;
3011 static gint compare_string(gconstpointer a, gconstpointer b)
3013 return g_strcmp0(a, b);
3016 /* Parse "+feature,-feature,feature=foo" CPU feature string
3018 static void x86_cpu_parse_featurestr(const char *typename, char *features,
3021 char *featurestr; /* Single 'key=value" string being parsed */
3022 static bool cpu_globals_initialized;
3023 bool ambiguous = false;
3025 if (cpu_globals_initialized) {
3028 cpu_globals_initialized = true;
3034 for (featurestr = strtok(features, ",");
3036 featurestr = strtok(NULL, ",")) {
3038 const char *val = NULL;
3041 GlobalProperty *prop;
3043 /* Compatibility syntax: */
3044 if (featurestr[0] == '+') {
3045 plus_features = g_list_append(plus_features,
3046 g_strdup(featurestr + 1));
3048 } else if (featurestr[0] == '-') {
3049 minus_features = g_list_append(minus_features,
3050 g_strdup(featurestr + 1));
3054 eq = strchr(featurestr, '=');
3062 feat2prop(featurestr);
3065 if (g_list_find_custom(plus_features, name, compare_string)) {
3066 warn_report("Ambiguous CPU model string. "
3067 "Don't mix both \"+%s\" and \"%s=%s\"",
3071 if (g_list_find_custom(minus_features, name, compare_string)) {
3072 warn_report("Ambiguous CPU model string. "
3073 "Don't mix both \"-%s\" and \"%s=%s\"",
3079 if (!strcmp(name, "tsc-freq")) {
3083 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3084 if (ret < 0 || tsc_freq > INT64_MAX) {
3085 error_setg(errp, "bad numerical value %s", val);
3088 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3090 name = "tsc-frequency";
3093 prop = g_new0(typeof(*prop), 1);
3094 prop->driver = typename;
3095 prop->property = g_strdup(name);
3096 prop->value = g_strdup(val);
3097 prop->errp = &error_fatal;
3098 qdev_prop_register_global(prop);
3102 warn_report("Compatibility of ambiguous CPU model "
3103 "strings won't be kept on future QEMU versions");
3107 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3108 static int x86_cpu_filter_features(X86CPU *cpu);
3110 /* Check for missing features that may prevent the CPU class from
3111 * running using the current machine and accelerator.
3113 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3114 strList **missing_feats)
3119 strList **next = missing_feats;
3121 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3122 strList *new = g_new0(strList, 1);
3123 new->value = g_strdup("kvm");
3124 *missing_feats = new;
3128 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3130 x86_cpu_expand_features(xc, &err);
3132 /* Errors at x86_cpu_expand_features should never happen,
3133 * but in case it does, just report the model as not
3134 * runnable at all using the "type" property.
3136 strList *new = g_new0(strList, 1);
3137 new->value = g_strdup("type");
3142 x86_cpu_filter_features(xc);
3144 for (w = 0; w < FEATURE_WORDS; w++) {
3145 uint32_t filtered = xc->filtered_features[w];
3147 for (i = 0; i < 32; i++) {
3148 if (filtered & (1UL << i)) {
3149 strList *new = g_new0(strList, 1);
3150 new->value = g_strdup(x86_cpu_feature_name(w, i));
3157 object_unref(OBJECT(xc));
3160 /* Print all cpuid feature names in featureset
3162 static void listflags(FILE *f, fprintf_function print, const char **featureset)
3167 for (bit = 0; bit < 32; bit++) {
3168 if (featureset[bit]) {
3169 print(f, "%s%s", first ? "" : " ", featureset[bit]);
3175 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3176 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3178 ObjectClass *class_a = (ObjectClass *)a;
3179 ObjectClass *class_b = (ObjectClass *)b;
3180 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3181 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3182 const char *name_a, *name_b;
3184 if (cc_a->ordering != cc_b->ordering) {
3185 return cc_a->ordering - cc_b->ordering;
3187 name_a = object_class_get_name(class_a);
3188 name_b = object_class_get_name(class_b);
3189 return strcmp(name_a, name_b);
3193 static GSList *get_sorted_cpu_model_list(void)
3195 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3196 list = g_slist_sort(list, x86_cpu_list_compare);
3200 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3202 ObjectClass *oc = data;
3203 X86CPUClass *cc = X86_CPU_CLASS(oc);
3204 CPUListState *s = user_data;
3205 char *name = x86_cpu_class_get_model_name(cc);
3206 const char *desc = cc->model_description;
3207 if (!desc && cc->cpu_def) {
3208 desc = cc->cpu_def->model_id;
3211 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
3216 /* list available CPU models and flags */
3217 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
3222 .cpu_fprintf = cpu_fprintf,
3226 (*cpu_fprintf)(f, "Available CPUs:\n");
3227 list = get_sorted_cpu_model_list();
3228 g_slist_foreach(list, x86_cpu_list_entry, &s);
3231 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
3232 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3233 FeatureWordInfo *fw = &feature_word_info[i];
3235 (*cpu_fprintf)(f, " ");
3236 listflags(f, cpu_fprintf, fw->feat_names);
3237 (*cpu_fprintf)(f, "\n");
3241 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3243 ObjectClass *oc = data;
3244 X86CPUClass *cc = X86_CPU_CLASS(oc);
3245 CpuDefinitionInfoList **cpu_list = user_data;
3246 CpuDefinitionInfoList *entry;
3247 CpuDefinitionInfo *info;
3249 info = g_malloc0(sizeof(*info));
3250 info->name = x86_cpu_class_get_model_name(cc);
3251 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3252 info->has_unavailable_features = true;
3253 info->q_typename = g_strdup(object_class_get_name(oc));
3254 info->migration_safe = cc->migration_safe;
3255 info->has_migration_safe = true;
3256 info->q_static = cc->static_model;
3258 entry = g_malloc0(sizeof(*entry));
3259 entry->value = info;
3260 entry->next = *cpu_list;
3264 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
3266 CpuDefinitionInfoList *cpu_list = NULL;
3267 GSList *list = get_sorted_cpu_model_list();
3268 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
3273 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3274 bool migratable_only)
3276 FeatureWordInfo *wi = &feature_word_info[w];
3279 if (kvm_enabled()) {
3280 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
3283 } else if (hvf_enabled()) {
3284 r = hvf_get_supported_cpuid(wi->cpuid_eax,
3287 } else if (tcg_enabled()) {
3288 r = wi->tcg_features;
3292 if (migratable_only) {
3293 r &= x86_cpu_get_migratable_flags(w);
3298 static void x86_cpu_report_filtered_features(X86CPU *cpu)
3302 for (w = 0; w < FEATURE_WORDS; w++) {
3303 report_unavailable_features(w, cpu->filtered_features[w]);
3307 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
3310 for (pv = props; pv->prop; pv++) {
3314 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
3319 /* Load data from X86CPUDefinition into a X86CPU object
3321 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
3323 CPUX86State *env = &cpu->env;
3325 char host_vendor[CPUID_VENDOR_SZ + 1];
3328 /*NOTE: any property set by this function should be returned by
3329 * x86_cpu_static_props(), so static expansion of
3330 * query-cpu-model-expansion is always complete.
3333 /* CPU models only set _minimum_ values for level/xlevel: */
3334 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
3335 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
3337 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
3338 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
3339 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
3340 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
3341 for (w = 0; w < FEATURE_WORDS; w++) {
3342 env->features[w] = def->features[w];
3345 /* Store Cache information from the X86CPUDefinition if available */
3346 env->cache_info = def->cache_info;
3347 cpu->legacy_cache = def->cache_info ? 0 : 1;
3349 /* Special cases not set in the X86CPUDefinition structs: */
3350 /* TODO: in-kernel irqchip for hvf */
3351 if (kvm_enabled()) {
3352 if (!kvm_irqchip_in_kernel()) {
3353 x86_cpu_change_kvm_default("x2apic", "off");
3356 x86_cpu_apply_props(cpu, kvm_default_props);
3357 } else if (tcg_enabled()) {
3358 x86_cpu_apply_props(cpu, tcg_default_props);
3361 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
3363 /* sysenter isn't supported in compatibility mode on AMD,
3364 * syscall isn't supported in compatibility mode on Intel.
3365 * Normally we advertise the actual CPU vendor, but you can
3366 * override this using the 'vendor' property if you want to use
3367 * KVM's sysenter/syscall emulation in compatibility mode and
3368 * when doing cross vendor migration
3370 vendor = def->vendor;
3371 if (accel_uses_host_cpuid()) {
3372 uint32_t ebx = 0, ecx = 0, edx = 0;
3373 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
3374 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
3375 vendor = host_vendor;
3378 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
3382 /* Return a QDict containing keys for all properties that can be included
3383 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
3384 * must be included in the dictionary.
3386 static QDict *x86_cpu_static_props(void)
3390 static const char *props[] = {
3408 for (i = 0; props[i]; i++) {
3409 qdict_put_null(d, props[i]);
3412 for (w = 0; w < FEATURE_WORDS; w++) {
3413 FeatureWordInfo *fi = &feature_word_info[w];
3415 for (bit = 0; bit < 32; bit++) {
3416 if (!fi->feat_names[bit]) {
3419 qdict_put_null(d, fi->feat_names[bit]);
3426 /* Add an entry to @props dict, with the value for property. */
3427 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
3429 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
3432 qdict_put_obj(props, prop, value);
3435 /* Convert CPU model data from X86CPU object to a property dictionary
3436 * that can recreate exactly the same CPU model.
3438 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3440 QDict *sprops = x86_cpu_static_props();
3441 const QDictEntry *e;
3443 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3444 const char *prop = qdict_entry_key(e);
3445 x86_cpu_expand_prop(cpu, props, prop);
3449 /* Convert CPU model data from X86CPU object to a property dictionary
3450 * that can recreate exactly the same CPU model, including every
3451 * writeable QOM property.
3453 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3455 ObjectPropertyIterator iter;
3456 ObjectProperty *prop;
3458 object_property_iter_init(&iter, OBJECT(cpu));
3459 while ((prop = object_property_iter_next(&iter))) {
3460 /* skip read-only or write-only properties */
3461 if (!prop->get || !prop->set) {
3465 /* "hotplugged" is the only property that is configurable
3466 * on the command-line but will be set differently on CPUs
3467 * created using "-cpu ... -smp ..." and by CPUs created
3468 * on the fly by x86_cpu_from_model() for querying. Skip it.
3470 if (!strcmp(prop->name, "hotplugged")) {
3473 x86_cpu_expand_prop(cpu, props, prop->name);
3477 static void object_apply_props(Object *obj, QDict *props, Error **errp)
3479 const QDictEntry *prop;
3482 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
3483 object_property_set_qobject(obj, qdict_entry_value(prop),
3484 qdict_entry_key(prop), &err);
3490 error_propagate(errp, err);
3493 /* Create X86CPU object according to model+props specification */
3494 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
3500 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
3502 error_setg(&err, "CPU model '%s' not found", model);
3506 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3508 object_apply_props(OBJECT(xc), props, &err);
3514 x86_cpu_expand_features(xc, &err);
3521 error_propagate(errp, err);
3522 object_unref(OBJECT(xc));
3528 CpuModelExpansionInfo *
3529 arch_query_cpu_model_expansion(CpuModelExpansionType type,
3530 CpuModelInfo *model,
3535 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
3536 QDict *props = NULL;
3537 const char *base_name;
3539 xc = x86_cpu_from_model(model->name,
3541 qobject_to(QDict, model->props) :
3547 props = qdict_new();
3550 case CPU_MODEL_EXPANSION_TYPE_STATIC:
3551 /* Static expansion will be based on "base" only */
3553 x86_cpu_to_dict(xc, props);
3555 case CPU_MODEL_EXPANSION_TYPE_FULL:
3556 /* As we don't return every single property, full expansion needs
3557 * to keep the original model name+props, and add extra
3558 * properties on top of that.
3560 base_name = model->name;
3561 x86_cpu_to_dict_full(xc, props);
3564 error_setg(&err, "Unsupportted expansion type");
3569 props = qdict_new();
3571 x86_cpu_to_dict(xc, props);
3573 ret->model = g_new0(CpuModelInfo, 1);
3574 ret->model->name = g_strdup(base_name);
3575 ret->model->props = QOBJECT(props);
3576 ret->model->has_props = true;
3579 object_unref(OBJECT(xc));
3581 error_propagate(errp, err);
3582 qapi_free_CpuModelExpansionInfo(ret);
3588 static gchar *x86_gdb_arch_name(CPUState *cs)
3590 #ifdef TARGET_X86_64
3591 return g_strdup("i386:x86-64");
3593 return g_strdup("i386");
3597 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
3599 X86CPUDefinition *cpudef = data;
3600 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3602 xcc->cpu_def = cpudef;
3603 xcc->migration_safe = true;
3606 static void x86_register_cpudef_type(X86CPUDefinition *def)
3608 char *typename = x86_cpu_type_name(def->name);
3611 .parent = TYPE_X86_CPU,
3612 .class_init = x86_cpu_cpudef_class_init,
3616 /* AMD aliases are handled at runtime based on CPUID vendor, so
3617 * they shouldn't be set on the CPU model table.
3619 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
3620 /* catch mistakes instead of silently truncating model_id when too long */
3621 assert(def->model_id && strlen(def->model_id) <= 48);
3628 #if !defined(CONFIG_USER_ONLY)
3630 void cpu_clear_apic_feature(CPUX86State *env)
3632 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
3635 #endif /* !CONFIG_USER_ONLY */
3637 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
3638 uint32_t *eax, uint32_t *ebx,
3639 uint32_t *ecx, uint32_t *edx)
3641 X86CPU *cpu = x86_env_get_cpu(env);
3642 CPUState *cs = CPU(cpu);
3643 uint32_t pkg_offset;
3645 uint32_t signature[3];
3647 /* Calculate & apply limits for different index ranges */
3648 if (index >= 0xC0000000) {
3649 limit = env->cpuid_xlevel2;
3650 } else if (index >= 0x80000000) {
3651 limit = env->cpuid_xlevel;
3652 } else if (index >= 0x40000000) {
3655 limit = env->cpuid_level;
3658 if (index > limit) {
3659 /* Intel documentation states that invalid EAX input will
3660 * return the same information as EAX=cpuid_level
3661 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
3663 index = env->cpuid_level;
3668 *eax = env->cpuid_level;
3669 *ebx = env->cpuid_vendor1;
3670 *edx = env->cpuid_vendor2;
3671 *ecx = env->cpuid_vendor3;
3674 *eax = env->cpuid_version;
3675 *ebx = (cpu->apic_id << 24) |
3676 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
3677 *ecx = env->features[FEAT_1_ECX];
3678 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
3679 *ecx |= CPUID_EXT_OSXSAVE;
3681 *edx = env->features[FEAT_1_EDX];
3682 if (cs->nr_cores * cs->nr_threads > 1) {
3683 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
3688 /* cache info: needed for Pentium Pro compatibility */
3689 if (cpu->cache_info_passthrough) {
3690 host_cpuid(index, 0, eax, ebx, ecx, edx);
3693 *eax = 1; /* Number of CPUID[EAX=2] calls required */
3695 if (!cpu->enable_l3_cache) {
3698 if (env->cache_info && !cpu->legacy_cache) {
3699 *ecx = cpuid2_cache_descriptor(&env->cache_info->l3_cache);
3701 *ecx = cpuid2_cache_descriptor(&legacy_l3_cache);
3704 if (env->cache_info && !cpu->legacy_cache) {
3705 *edx = (cpuid2_cache_descriptor(&env->cache_info->l1d_cache) << 16) |
3706 (cpuid2_cache_descriptor(&env->cache_info->l1i_cache) << 8) |
3707 (cpuid2_cache_descriptor(&env->cache_info->l2_cache));
3709 *edx = (cpuid2_cache_descriptor(&legacy_l1d_cache) << 16) |
3710 (cpuid2_cache_descriptor(&legacy_l1i_cache) << 8) |
3711 (cpuid2_cache_descriptor(&legacy_l2_cache_cpuid2));
3715 /* cache info: needed for Core compatibility */
3716 if (cpu->cache_info_passthrough) {
3717 host_cpuid(index, count, eax, ebx, ecx, edx);
3718 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
3719 *eax &= ~0xFC000000;
3720 if ((*eax & 31) && cs->nr_cores > 1) {
3721 *eax |= (cs->nr_cores - 1) << 26;
3725 CPUCacheInfo *l1d, *l1i, *l2, *l3;
3726 if (env->cache_info && !cpu->legacy_cache) {
3727 l1d = &env->cache_info->l1d_cache;
3728 l1i = &env->cache_info->l1i_cache;
3729 l2 = &env->cache_info->l2_cache;
3730 l3 = &env->cache_info->l3_cache;
3732 l1d = &legacy_l1d_cache;
3733 l1i = &legacy_l1i_cache;
3734 l2 = &legacy_l2_cache;
3735 l3 = &legacy_l3_cache;
3738 case 0: /* L1 dcache info */
3739 encode_cache_cpuid4(l1d, 1, cs->nr_cores,
3740 eax, ebx, ecx, edx);
3742 case 1: /* L1 icache info */
3743 encode_cache_cpuid4(l1i, 1, cs->nr_cores,
3744 eax, ebx, ecx, edx);
3746 case 2: /* L2 cache info */
3747 encode_cache_cpuid4(l2, cs->nr_threads, cs->nr_cores,
3748 eax, ebx, ecx, edx);
3750 case 3: /* L3 cache info */
3751 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3752 if (cpu->enable_l3_cache) {
3753 encode_cache_cpuid4(l3, (1 << pkg_offset), cs->nr_cores,
3754 eax, ebx, ecx, edx);
3758 default: /* end of info */
3759 *eax = *ebx = *ecx = *edx = 0;
3765 /* mwait info: needed for Core compatibility */
3766 *eax = 0; /* Smallest monitor-line size in bytes */
3767 *ebx = 0; /* Largest monitor-line size in bytes */
3768 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
3772 /* Thermal and Power Leaf */
3773 *eax = env->features[FEAT_6_EAX];
3779 /* Structured Extended Feature Flags Enumeration Leaf */
3781 *eax = 0; /* Maximum ECX value for sub-leaves */
3782 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
3783 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
3784 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
3785 *ecx |= CPUID_7_0_ECX_OSPKE;
3787 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
3796 /* Direct Cache Access Information Leaf */
3797 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
3803 /* Architectural Performance Monitoring Leaf */
3804 if (kvm_enabled() && cpu->enable_pmu) {
3805 KVMState *s = cs->kvm_state;
3807 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
3808 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
3809 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
3810 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
3811 } else if (hvf_enabled() && cpu->enable_pmu) {
3812 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
3813 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
3814 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
3815 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
3824 /* Extended Topology Enumeration Leaf */
3825 if (!cpu->enable_cpuid_0xb) {
3826 *eax = *ebx = *ecx = *edx = 0;
3830 *ecx = count & 0xff;
3831 *edx = cpu->apic_id;
3835 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
3836 *ebx = cs->nr_threads;
3837 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
3840 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3841 *ebx = cs->nr_cores * cs->nr_threads;
3842 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
3847 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
3850 assert(!(*eax & ~0x1f));
3851 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
3854 /* Processor Extended State */
3859 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3864 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
3865 *eax = env->features[FEAT_XSAVE_COMP_LO];
3866 *edx = env->features[FEAT_XSAVE_COMP_HI];
3868 } else if (count == 1) {
3869 *eax = env->features[FEAT_XSAVE];
3870 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
3871 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
3872 const ExtSaveArea *esa = &x86_ext_save_areas[count];
3880 /* Intel Processor Trace Enumeration */
3885 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
3891 *eax = INTEL_PT_MAX_SUBLEAF;
3892 *ebx = INTEL_PT_MINIMAL_EBX;
3893 *ecx = INTEL_PT_MINIMAL_ECX;
3894 } else if (count == 1) {
3895 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
3896 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
3902 * CPUID code in kvm_arch_init_vcpu() ignores stuff
3903 * set here, but we restrict to TCG none the less.
3905 if (tcg_enabled() && cpu->expose_tcg) {
3906 memcpy(signature, "TCGTCGTCGTCG", 12);
3908 *ebx = signature[0];
3909 *ecx = signature[1];
3910 *edx = signature[2];
3925 *eax = env->cpuid_xlevel;
3926 *ebx = env->cpuid_vendor1;
3927 *edx = env->cpuid_vendor2;
3928 *ecx = env->cpuid_vendor3;
3931 *eax = env->cpuid_version;
3933 *ecx = env->features[FEAT_8000_0001_ECX];
3934 *edx = env->features[FEAT_8000_0001_EDX];
3936 /* The Linux kernel checks for the CMPLegacy bit and
3937 * discards multiple thread information if it is set.
3938 * So don't set it here for Intel to make Linux guests happy.
3940 if (cs->nr_cores * cs->nr_threads > 1) {
3941 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
3942 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
3943 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
3944 *ecx |= 1 << 1; /* CmpLegacy bit */
3951 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
3952 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
3953 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
3954 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
3957 /* cache info (L1 cache) */
3958 if (cpu->cache_info_passthrough) {
3959 host_cpuid(index, 0, eax, ebx, ecx, edx);
3962 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
3963 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
3964 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
3965 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
3966 if (env->cache_info && !cpu->legacy_cache) {
3967 *ecx = encode_cache_cpuid80000005(&env->cache_info->l1d_cache);
3968 *edx = encode_cache_cpuid80000005(&env->cache_info->l1i_cache);
3970 *ecx = encode_cache_cpuid80000005(&legacy_l1d_cache_amd);
3971 *edx = encode_cache_cpuid80000005(&legacy_l1i_cache_amd);
3975 /* cache info (L2 cache) */
3976 if (cpu->cache_info_passthrough) {
3977 host_cpuid(index, 0, eax, ebx, ecx, edx);
3980 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
3981 (L2_DTLB_2M_ENTRIES << 16) | \
3982 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
3983 (L2_ITLB_2M_ENTRIES);
3984 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
3985 (L2_DTLB_4K_ENTRIES << 16) | \
3986 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
3987 (L2_ITLB_4K_ENTRIES);
3988 if (env->cache_info && !cpu->legacy_cache) {
3989 encode_cache_cpuid80000006(&env->cache_info->l2_cache,
3990 cpu->enable_l3_cache ?
3991 &env->cache_info->l3_cache : NULL,
3994 encode_cache_cpuid80000006(&legacy_l2_cache_amd,
3995 cpu->enable_l3_cache ?
3996 &legacy_l3_cache : NULL,
4004 *edx = env->features[FEAT_8000_0007_EDX];
4007 /* virtual & phys address size in low 2 bytes. */
4008 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4009 /* 64 bit processor */
4010 *eax = cpu->phys_bits; /* configurable physical bits */
4011 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
4012 *eax |= 0x00003900; /* 57 bits virtual */
4014 *eax |= 0x00003000; /* 48 bits virtual */
4017 *eax = cpu->phys_bits;
4019 *ebx = env->features[FEAT_8000_0008_EBX];
4022 if (cs->nr_cores * cs->nr_threads > 1) {
4023 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
4027 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4028 *eax = 0x00000001; /* SVM Revision */
4029 *ebx = 0x00000010; /* nr of ASIDs */
4031 *edx = env->features[FEAT_SVM]; /* optional features */
4040 *eax = env->cpuid_xlevel2;
4046 /* Support for VIA CPU's CPUID instruction */
4047 *eax = env->cpuid_version;
4050 *edx = env->features[FEAT_C000_0001_EDX];
4055 /* Reserved for the future, and now filled with zero */
4062 *eax = sev_enabled() ? 0x2 : 0;
4063 *ebx = sev_get_cbit_position();
4064 *ebx |= sev_get_reduced_phys_bits() << 6;
4069 /* reserved values: zero */
4078 /* CPUClass::reset() */
4079 static void x86_cpu_reset(CPUState *s)
4081 X86CPU *cpu = X86_CPU(s);
4082 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4083 CPUX86State *env = &cpu->env;
4088 xcc->parent_reset(s);
4090 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4092 env->old_exception = -1;
4094 /* init to reset state */
4096 env->hflags2 |= HF2_GIF_MASK;
4098 cpu_x86_update_cr0(env, 0x60000010);
4099 env->a20_mask = ~0x0;
4100 env->smbase = 0x30000;
4101 env->msr_smi_count = 0;
4103 env->idt.limit = 0xffff;
4104 env->gdt.limit = 0xffff;
4105 env->ldt.limit = 0xffff;
4106 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4107 env->tr.limit = 0xffff;
4108 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4110 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4111 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4112 DESC_R_MASK | DESC_A_MASK);
4113 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4114 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4116 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4117 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4119 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4120 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4122 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4123 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4125 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4126 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4130 env->regs[R_EDX] = env->cpuid_version;
4135 for (i = 0; i < 8; i++) {
4138 cpu_set_fpuc(env, 0x37f);
4140 env->mxcsr = 0x1f80;
4141 /* All units are in INIT state. */
4144 env->pat = 0x0007040600070406ULL;
4145 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4147 memset(env->dr, 0, sizeof(env->dr));
4148 env->dr[6] = DR6_FIXED_1;
4149 env->dr[7] = DR7_FIXED_1;
4150 cpu_breakpoint_remove_all(s, BP_CPU);
4151 cpu_watchpoint_remove_all(s, BP_CPU);
4154 xcr0 = XSTATE_FP_MASK;
4156 #ifdef CONFIG_USER_ONLY
4157 /* Enable all the features for user-mode. */
4158 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4159 xcr0 |= XSTATE_SSE_MASK;
4161 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4162 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4163 if (env->features[esa->feature] & esa->bits) {
4168 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
4169 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
4171 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
4172 cr4 |= CR4_FSGSBASE_MASK;
4177 cpu_x86_update_cr4(env, cr4);
4180 * SDM 11.11.5 requires:
4181 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4182 * - IA32_MTRR_PHYSMASKn.V = 0
4183 * All other bits are undefined. For simplification, zero it all.
4185 env->mtrr_deftype = 0;
4186 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
4187 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
4189 env->interrupt_injected = -1;
4190 env->exception_injected = -1;
4191 env->nmi_injected = false;
4192 #if !defined(CONFIG_USER_ONLY)
4193 /* We hard-wire the BSP to the first CPU. */
4194 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
4196 s->halted = !cpu_is_bsp(cpu);
4198 if (kvm_enabled()) {
4199 kvm_arch_reset_vcpu(cpu);
4201 else if (hvf_enabled()) {
4207 #ifndef CONFIG_USER_ONLY
4208 bool cpu_is_bsp(X86CPU *cpu)
4210 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
4213 /* TODO: remove me, when reset over QOM tree is implemented */
4214 static void x86_cpu_machine_reset_cb(void *opaque)
4216 X86CPU *cpu = opaque;
4217 cpu_reset(CPU(cpu));
4221 static void mce_init(X86CPU *cpu)
4223 CPUX86State *cenv = &cpu->env;
4226 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
4227 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
4228 (CPUID_MCE | CPUID_MCA)) {
4229 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
4230 (cpu->enable_lmce ? MCG_LMCE_P : 0);
4231 cenv->mcg_ctl = ~(uint64_t)0;
4232 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
4233 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
4238 #ifndef CONFIG_USER_ONLY
4239 APICCommonClass *apic_get_class(void)
4241 const char *apic_type = "apic";
4243 /* TODO: in-kernel irqchip for hvf */
4244 if (kvm_apic_in_kernel()) {
4245 apic_type = "kvm-apic";
4246 } else if (xen_enabled()) {
4247 apic_type = "xen-apic";
4250 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
4253 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
4255 APICCommonState *apic;
4256 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
4258 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
4260 object_property_add_child(OBJECT(cpu), "lapic",
4261 OBJECT(cpu->apic_state), &error_abort);
4262 object_unref(OBJECT(cpu->apic_state));
4264 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
4265 /* TODO: convert to link<> */
4266 apic = APIC_COMMON(cpu->apic_state);
4268 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
4271 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4273 APICCommonState *apic;
4274 static bool apic_mmio_map_once;
4276 if (cpu->apic_state == NULL) {
4279 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
4282 /* Map APIC MMIO area */
4283 apic = APIC_COMMON(cpu->apic_state);
4284 if (!apic_mmio_map_once) {
4285 memory_region_add_subregion_overlap(get_system_memory(),
4287 MSR_IA32_APICBASE_BASE,
4290 apic_mmio_map_once = true;
4294 static void x86_cpu_machine_done(Notifier *n, void *unused)
4296 X86CPU *cpu = container_of(n, X86CPU, machine_done);
4297 MemoryRegion *smram =
4298 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
4301 cpu->smram = g_new(MemoryRegion, 1);
4302 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
4303 smram, 0, 1ull << 32);
4304 memory_region_set_enabled(cpu->smram, true);
4305 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
4309 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4314 /* Note: Only safe for use on x86(-64) hosts */
4315 static uint32_t x86_host_phys_bits(void)
4318 uint32_t host_phys_bits;
4320 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
4321 if (eax >= 0x80000008) {
4322 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
4323 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4324 * at 23:16 that can specify a maximum physical address bits for
4325 * the guest that can override this value; but I've not seen
4326 * anything with that set.
4328 host_phys_bits = eax & 0xff;
4330 /* It's an odd 64 bit machine that doesn't have the leaf for
4331 * physical address bits; fall back to 36 that's most older
4334 host_phys_bits = 36;
4337 return host_phys_bits;
4340 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
4347 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4348 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
4350 CPUX86State *env = &cpu->env;
4351 FeatureWordInfo *fi = &feature_word_info[w];
4352 uint32_t eax = fi->cpuid_eax;
4353 uint32_t region = eax & 0xF0000000;
4355 if (!env->features[w]) {
4361 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
4364 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
4367 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
4372 /* Calculate XSAVE components based on the configured CPU feature flags */
4373 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
4375 CPUX86State *env = &cpu->env;
4379 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4384 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4385 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4386 if (env->features[esa->feature] & esa->bits) {
4387 mask |= (1ULL << i);
4391 env->features[FEAT_XSAVE_COMP_LO] = mask;
4392 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
4395 /***** Steps involved on loading and filtering CPUID data
4397 * When initializing and realizing a CPU object, the steps
4398 * involved in setting up CPUID data are:
4400 * 1) Loading CPU model definition (X86CPUDefinition). This is
4401 * implemented by x86_cpu_load_def() and should be completely
4402 * transparent, as it is done automatically by instance_init.
4403 * No code should need to look at X86CPUDefinition structs
4404 * outside instance_init.
4406 * 2) CPU expansion. This is done by realize before CPUID
4407 * filtering, and will make sure host/accelerator data is
4408 * loaded for CPU models that depend on host capabilities
4409 * (e.g. "host"). Done by x86_cpu_expand_features().
4411 * 3) CPUID filtering. This initializes extra data related to
4412 * CPUID, and checks if the host supports all capabilities
4413 * required by the CPU. Runnability of a CPU model is
4414 * determined at this step. Done by x86_cpu_filter_features().
4416 * Some operations don't require all steps to be performed.
4419 * - CPU instance creation (instance_init) will run only CPU
4420 * model loading. CPU expansion can't run at instance_init-time
4421 * because host/accelerator data may be not available yet.
4422 * - CPU realization will perform both CPU model expansion and CPUID
4423 * filtering, and return an error in case one of them fails.
4424 * - query-cpu-definitions needs to run all 3 steps. It needs
4425 * to run CPUID filtering, as the 'unavailable-features'
4426 * field is set based on the filtering results.
4427 * - The query-cpu-model-expansion QMP command only needs to run
4428 * CPU model loading and CPU expansion. It should not filter
4429 * any CPUID data based on host capabilities.
4432 /* Expand CPU configuration data, based on configured features
4433 * and host/accelerator capabilities when appropriate.
4435 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
4437 CPUX86State *env = &cpu->env;
4440 Error *local_err = NULL;
4442 /*TODO: Now cpu->max_features doesn't overwrite features
4443 * set using QOM properties, and we can convert
4444 * plus_features & minus_features to global properties
4445 * inside x86_cpu_parse_featurestr() too.
4447 if (cpu->max_features) {
4448 for (w = 0; w < FEATURE_WORDS; w++) {
4449 /* Override only features that weren't set explicitly
4453 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
4454 ~env->user_features[w] & \
4455 ~feature_word_info[w].no_autoenable_flags;
4459 for (l = plus_features; l; l = l->next) {
4460 const char *prop = l->data;
4461 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4467 for (l = minus_features; l; l = l->next) {
4468 const char *prop = l->data;
4469 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
4475 if (!kvm_enabled() || !cpu->expose_kvm) {
4476 env->features[FEAT_KVM] = 0;
4479 x86_cpu_enable_xsave_components(cpu);
4481 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
4482 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
4483 if (cpu->full_cpuid_auto_level) {
4484 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
4485 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
4486 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
4487 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
4488 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
4489 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
4490 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
4491 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
4492 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
4493 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
4494 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
4495 /* SVM requires CPUID[0x8000000A] */
4496 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4497 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
4500 /* SEV requires CPUID[0x8000001F] */
4501 if (sev_enabled()) {
4502 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
4506 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
4507 if (env->cpuid_level == UINT32_MAX) {
4508 env->cpuid_level = env->cpuid_min_level;
4510 if (env->cpuid_xlevel == UINT32_MAX) {
4511 env->cpuid_xlevel = env->cpuid_min_xlevel;
4513 if (env->cpuid_xlevel2 == UINT32_MAX) {
4514 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
4518 if (local_err != NULL) {
4519 error_propagate(errp, local_err);
4524 * Finishes initialization of CPUID data, filters CPU feature
4525 * words based on host availability of each feature.
4527 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
4529 static int x86_cpu_filter_features(X86CPU *cpu)
4531 CPUX86State *env = &cpu->env;
4535 for (w = 0; w < FEATURE_WORDS; w++) {
4536 uint32_t host_feat =
4537 x86_cpu_get_supported_feature_word(w, false);
4538 uint32_t requested_features = env->features[w];
4539 env->features[w] &= host_feat;
4540 cpu->filtered_features[w] = requested_features & ~env->features[w];
4541 if (cpu->filtered_features[w]) {
4546 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
4548 KVMState *s = CPU(cpu)->kvm_state;
4549 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
4550 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
4551 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
4552 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
4553 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
4556 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
4557 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
4558 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
4559 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
4560 INTEL_PT_ADDR_RANGES_NUM) ||
4561 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
4562 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
4563 (ecx_0 & INTEL_PT_IP_LIP)) {
4565 * Processor Trace capabilities aren't configurable, so if the
4566 * host can't emulate the capabilities we report on
4567 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
4569 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
4570 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
4578 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
4579 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
4580 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
4581 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
4582 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
4583 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
4584 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
4586 CPUState *cs = CPU(dev);
4587 X86CPU *cpu = X86_CPU(dev);
4588 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4589 CPUX86State *env = &cpu->env;
4590 Error *local_err = NULL;
4591 static bool ht_warned;
4593 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
4594 char *name = x86_cpu_class_get_model_name(xcc);
4595 error_setg(&local_err, "CPU model '%s' requires KVM", name);
4600 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
4601 error_setg(errp, "apic-id property was not initialized properly");
4605 x86_cpu_expand_features(cpu, &local_err);
4610 if (x86_cpu_filter_features(cpu) &&
4611 (cpu->check_cpuid || cpu->enforce_cpuid)) {
4612 x86_cpu_report_filtered_features(cpu);
4613 if (cpu->enforce_cpuid) {
4614 error_setg(&local_err,
4615 accel_uses_host_cpuid() ?
4616 "Host doesn't support requested features" :
4617 "TCG doesn't support requested features");
4622 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
4625 if (IS_AMD_CPU(env)) {
4626 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
4627 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
4628 & CPUID_EXT2_AMD_ALIASES);
4631 /* For 64bit systems think about the number of physical bits to present.
4632 * ideally this should be the same as the host; anything other than matching
4633 * the host can cause incorrect guest behaviour.
4634 * QEMU used to pick the magic value of 40 bits that corresponds to
4635 * consumer AMD devices but nothing else.
4637 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4638 if (accel_uses_host_cpuid()) {
4639 uint32_t host_phys_bits = x86_host_phys_bits();
4642 if (cpu->host_phys_bits) {
4643 /* The user asked for us to use the host physical bits */
4644 cpu->phys_bits = host_phys_bits;
4647 /* Print a warning if the user set it to a value that's not the
4650 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
4652 warn_report("Host physical bits (%u)"
4653 " does not match phys-bits property (%u)",
4654 host_phys_bits, cpu->phys_bits);
4658 if (cpu->phys_bits &&
4659 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
4660 cpu->phys_bits < 32)) {
4661 error_setg(errp, "phys-bits should be between 32 and %u "
4663 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
4667 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
4668 error_setg(errp, "TCG only supports phys-bits=%u",
4669 TCG_PHYS_ADDR_BITS);
4673 /* 0 means it was not explicitly set by the user (or by machine
4674 * compat_props or by the host code above). In this case, the default
4675 * is the value used by TCG (40).
4677 if (cpu->phys_bits == 0) {
4678 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
4681 /* For 32 bit systems don't use the user set value, but keep
4682 * phys_bits consistent with what we tell the guest.
4684 if (cpu->phys_bits != 0) {
4685 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
4689 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
4690 cpu->phys_bits = 36;
4692 cpu->phys_bits = 32;
4695 cpu_exec_realizefn(cs, &local_err);
4696 if (local_err != NULL) {
4697 error_propagate(errp, local_err);
4701 #ifndef CONFIG_USER_ONLY
4702 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
4704 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
4705 x86_cpu_apic_create(cpu, &local_err);
4706 if (local_err != NULL) {
4714 #ifndef CONFIG_USER_ONLY
4715 if (tcg_enabled()) {
4716 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
4717 cpu->cpu_as_root = g_new(MemoryRegion, 1);
4719 /* Outer container... */
4720 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
4721 memory_region_set_enabled(cpu->cpu_as_root, true);
4723 /* ... with two regions inside: normal system memory with low
4726 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
4727 get_system_memory(), 0, ~0ull);
4728 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
4729 memory_region_set_enabled(cpu->cpu_as_mem, true);
4732 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
4733 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
4735 /* ... SMRAM with higher priority, linked from /machine/smram. */
4736 cpu->machine_done.notify = x86_cpu_machine_done;
4737 qemu_add_machine_init_done_notifier(&cpu->machine_done);
4743 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
4744 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
4745 * based on inputs (sockets,cores,threads), it is still better to gives
4748 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
4749 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
4751 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
4752 error_report("AMD CPU doesn't support hyperthreading. Please configure"
4753 " -smp options properly.");
4757 x86_cpu_apic_realize(cpu, &local_err);
4758 if (local_err != NULL) {
4763 xcc->parent_realize(dev, &local_err);
4766 if (local_err != NULL) {
4767 error_propagate(errp, local_err);
4772 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
4774 X86CPU *cpu = X86_CPU(dev);
4775 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4776 Error *local_err = NULL;
4778 #ifndef CONFIG_USER_ONLY
4779 cpu_remove_sync(CPU(dev));
4780 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
4783 if (cpu->apic_state) {
4784 object_unparent(OBJECT(cpu->apic_state));
4785 cpu->apic_state = NULL;
4788 xcc->parent_unrealize(dev, &local_err);
4789 if (local_err != NULL) {
4790 error_propagate(errp, local_err);
4795 typedef struct BitProperty {
4800 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
4801 void *opaque, Error **errp)
4803 X86CPU *cpu = X86_CPU(obj);
4804 BitProperty *fp = opaque;
4805 uint32_t f = cpu->env.features[fp->w];
4806 bool value = (f & fp->mask) == fp->mask;
4807 visit_type_bool(v, name, &value, errp);
4810 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
4811 void *opaque, Error **errp)
4813 DeviceState *dev = DEVICE(obj);
4814 X86CPU *cpu = X86_CPU(obj);
4815 BitProperty *fp = opaque;
4816 Error *local_err = NULL;
4819 if (dev->realized) {
4820 qdev_prop_set_after_realize(dev, name, errp);
4824 visit_type_bool(v, name, &value, &local_err);
4826 error_propagate(errp, local_err);
4831 cpu->env.features[fp->w] |= fp->mask;
4833 cpu->env.features[fp->w] &= ~fp->mask;
4835 cpu->env.user_features[fp->w] |= fp->mask;
4838 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
4841 BitProperty *prop = opaque;
4845 /* Register a boolean property to get/set a single bit in a uint32_t field.
4847 * The same property name can be registered multiple times to make it affect
4848 * multiple bits in the same FeatureWord. In that case, the getter will return
4849 * true only if all bits are set.
4851 static void x86_cpu_register_bit_prop(X86CPU *cpu,
4852 const char *prop_name,
4858 uint32_t mask = (1UL << bitnr);
4860 op = object_property_find(OBJECT(cpu), prop_name, NULL);
4866 fp = g_new0(BitProperty, 1);
4869 object_property_add(OBJECT(cpu), prop_name, "bool",
4870 x86_cpu_get_bit_prop,
4871 x86_cpu_set_bit_prop,
4872 x86_cpu_release_bit_prop, fp, &error_abort);
4876 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
4880 FeatureWordInfo *fi = &feature_word_info[w];
4881 const char *name = fi->feat_names[bitnr];
4887 /* Property names should use "-" instead of "_".
4888 * Old names containing underscores are registered as aliases
4889 * using object_property_add_alias()
4891 assert(!strchr(name, '_'));
4892 /* aliases don't use "|" delimiters anymore, they are registered
4893 * manually using object_property_add_alias() */
4894 assert(!strchr(name, '|'));
4895 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
4898 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
4900 X86CPU *cpu = X86_CPU(cs);
4901 CPUX86State *env = &cpu->env;
4902 GuestPanicInformation *panic_info = NULL;
4904 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
4905 panic_info = g_malloc0(sizeof(GuestPanicInformation));
4907 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
4909 assert(HV_CRASH_PARAMS >= 5);
4910 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
4911 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
4912 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
4913 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
4914 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
4919 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
4920 const char *name, void *opaque,
4923 CPUState *cs = CPU(obj);
4924 GuestPanicInformation *panic_info;
4926 if (!cs->crash_occurred) {
4927 error_setg(errp, "No crash occured");
4931 panic_info = x86_cpu_get_crash_info(cs);
4932 if (panic_info == NULL) {
4933 error_setg(errp, "No crash information");
4937 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
4939 qapi_free_GuestPanicInformation(panic_info);
4942 static void x86_cpu_initfn(Object *obj)
4944 CPUState *cs = CPU(obj);
4945 X86CPU *cpu = X86_CPU(obj);
4946 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
4947 CPUX86State *env = &cpu->env;
4952 object_property_add(obj, "family", "int",
4953 x86_cpuid_version_get_family,
4954 x86_cpuid_version_set_family, NULL, NULL, NULL);
4955 object_property_add(obj, "model", "int",
4956 x86_cpuid_version_get_model,
4957 x86_cpuid_version_set_model, NULL, NULL, NULL);
4958 object_property_add(obj, "stepping", "int",
4959 x86_cpuid_version_get_stepping,
4960 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
4961 object_property_add_str(obj, "vendor",
4962 x86_cpuid_get_vendor,
4963 x86_cpuid_set_vendor, NULL);
4964 object_property_add_str(obj, "model-id",
4965 x86_cpuid_get_model_id,
4966 x86_cpuid_set_model_id, NULL);
4967 object_property_add(obj, "tsc-frequency", "int",
4968 x86_cpuid_get_tsc_freq,
4969 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
4970 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
4971 x86_cpu_get_feature_words,
4972 NULL, NULL, (void *)env->features, NULL);
4973 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
4974 x86_cpu_get_feature_words,
4975 NULL, NULL, (void *)cpu->filtered_features, NULL);
4977 object_property_add(obj, "crash-information", "GuestPanicInformation",
4978 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
4980 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
4982 for (w = 0; w < FEATURE_WORDS; w++) {
4985 for (bitnr = 0; bitnr < 32; bitnr++) {
4986 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
4990 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
4991 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
4992 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
4993 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
4994 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
4995 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
4996 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
4998 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
4999 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
5000 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
5001 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5002 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5003 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5004 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5005 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5006 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5007 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5008 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5009 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5010 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5011 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5012 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5013 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5014 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5015 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5016 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5017 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5018 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5021 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
5025 static int64_t x86_cpu_get_arch_id(CPUState *cs)
5027 X86CPU *cpu = X86_CPU(cs);
5029 return cpu->apic_id;
5032 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
5034 X86CPU *cpu = X86_CPU(cs);
5036 return cpu->env.cr[0] & CR0_PG_MASK;
5039 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5041 X86CPU *cpu = X86_CPU(cs);
5043 cpu->env.eip = value;
5046 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5048 X86CPU *cpu = X86_CPU(cs);
5050 cpu->env.eip = tb->pc - tb->cs_base;
5053 static bool x86_cpu_has_work(CPUState *cs)
5055 X86CPU *cpu = X86_CPU(cs);
5056 CPUX86State *env = &cpu->env;
5058 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
5059 CPU_INTERRUPT_POLL)) &&
5060 (env->eflags & IF_MASK)) ||
5061 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
5062 CPU_INTERRUPT_INIT |
5063 CPU_INTERRUPT_SIPI |
5064 CPU_INTERRUPT_MCE)) ||
5065 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
5066 !(env->hflags & HF_SMM_MASK));
5069 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
5071 X86CPU *cpu = X86_CPU(cs);
5072 CPUX86State *env = &cpu->env;
5074 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
5075 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
5076 : bfd_mach_i386_i8086);
5077 info->print_insn = print_insn_i386;
5079 info->cap_arch = CS_ARCH_X86;
5080 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
5081 : env->hflags & HF_CS32_MASK ? CS_MODE_32
5083 info->cap_insn_unit = 1;
5084 info->cap_insn_split = 8;
5087 void x86_update_hflags(CPUX86State *env)
5090 #define HFLAG_COPY_MASK \
5091 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5092 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5093 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5094 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5096 hflags = env->hflags & HFLAG_COPY_MASK;
5097 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
5098 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
5099 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
5100 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
5101 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
5103 if (env->cr[4] & CR4_OSFXSR_MASK) {
5104 hflags |= HF_OSFXSR_MASK;
5107 if (env->efer & MSR_EFER_LMA) {
5108 hflags |= HF_LMA_MASK;
5111 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
5112 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
5114 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
5115 (DESC_B_SHIFT - HF_CS32_SHIFT);
5116 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
5117 (DESC_B_SHIFT - HF_SS32_SHIFT);
5118 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
5119 !(hflags & HF_CS32_MASK)) {
5120 hflags |= HF_ADDSEG_MASK;
5122 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
5123 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
5126 env->hflags = hflags;
5129 static Property x86_cpu_properties[] = {
5130 #ifdef CONFIG_USER_ONLY
5131 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5132 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
5133 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
5134 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
5135 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
5137 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
5138 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
5139 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
5140 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
5142 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
5143 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
5144 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
5145 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
5146 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
5147 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
5148 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
5149 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
5150 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
5151 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
5152 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
5153 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
5154 DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false),
5155 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false),
5156 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
5157 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
5158 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
5159 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
5160 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
5161 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
5162 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
5163 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
5164 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
5165 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
5166 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
5167 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
5168 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
5169 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
5170 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
5171 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
5172 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
5173 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
5175 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
5176 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
5178 * lecacy_cache defaults to CPU model being chosen. This is set in
5179 * x86_cpu_load_def based on cache_info which is initialized in
5182 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, false),
5185 * From "Requirements for Implementing the Microsoft
5186 * Hypervisor Interface":
5187 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5189 * "Starting with Windows Server 2012 and Windows 8, if
5190 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5191 * the hypervisor imposes no specific limit to the number of VPs.
5192 * In this case, Windows Server 2012 guest VMs may use more than
5193 * 64 VPs, up to the maximum supported number of processors applicable
5194 * to the specific Windows version being used."
5196 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
5197 DEFINE_PROP_END_OF_LIST()
5200 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
5202 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5203 CPUClass *cc = CPU_CLASS(oc);
5204 DeviceClass *dc = DEVICE_CLASS(oc);
5206 device_class_set_parent_realize(dc, x86_cpu_realizefn,
5207 &xcc->parent_realize);
5208 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
5209 &xcc->parent_unrealize);
5210 dc->props = x86_cpu_properties;
5212 xcc->parent_reset = cc->reset;
5213 cc->reset = x86_cpu_reset;
5214 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
5216 cc->class_by_name = x86_cpu_class_by_name;
5217 cc->parse_features = x86_cpu_parse_featurestr;
5218 cc->has_work = x86_cpu_has_work;
5220 cc->do_interrupt = x86_cpu_do_interrupt;
5221 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
5223 cc->dump_state = x86_cpu_dump_state;
5224 cc->get_crash_info = x86_cpu_get_crash_info;
5225 cc->set_pc = x86_cpu_set_pc;
5226 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
5227 cc->gdb_read_register = x86_cpu_gdb_read_register;
5228 cc->gdb_write_register = x86_cpu_gdb_write_register;
5229 cc->get_arch_id = x86_cpu_get_arch_id;
5230 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
5231 #ifdef CONFIG_USER_ONLY
5232 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
5234 cc->asidx_from_attrs = x86_asidx_from_attrs;
5235 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
5236 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
5237 cc->write_elf64_note = x86_cpu_write_elf64_note;
5238 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
5239 cc->write_elf32_note = x86_cpu_write_elf32_note;
5240 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
5241 cc->vmsd = &vmstate_x86_cpu;
5243 cc->gdb_arch_name = x86_gdb_arch_name;
5244 #ifdef TARGET_X86_64
5245 cc->gdb_core_xml_file = "i386-64bit.xml";
5246 cc->gdb_num_core_regs = 57;
5248 cc->gdb_core_xml_file = "i386-32bit.xml";
5249 cc->gdb_num_core_regs = 41;
5251 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
5252 cc->debug_excp_handler = breakpoint_handler;
5254 cc->cpu_exec_enter = x86_cpu_exec_enter;
5255 cc->cpu_exec_exit = x86_cpu_exec_exit;
5257 cc->tcg_initialize = tcg_x86_init;
5259 cc->disas_set_info = x86_disas_set_info;
5261 dc->user_creatable = true;
5264 static const TypeInfo x86_cpu_type_info = {
5265 .name = TYPE_X86_CPU,
5267 .instance_size = sizeof(X86CPU),
5268 .instance_init = x86_cpu_initfn,
5270 .class_size = sizeof(X86CPUClass),
5271 .class_init = x86_cpu_common_class_init,
5275 /* "base" CPU model, used by query-cpu-model-expansion */
5276 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
5278 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5280 xcc->static_model = true;
5281 xcc->migration_safe = true;
5282 xcc->model_description = "base CPU model type with no features enabled";
5286 static const TypeInfo x86_base_cpu_type_info = {
5287 .name = X86_CPU_TYPE_NAME("base"),
5288 .parent = TYPE_X86_CPU,
5289 .class_init = x86_cpu_base_class_init,
5292 static void x86_cpu_register_types(void)
5296 type_register_static(&x86_cpu_type_info);
5297 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
5298 x86_register_cpudef_type(&builtin_x86_defs[i]);
5300 type_register_static(&max_x86_cpu_type_info);
5301 type_register_static(&x86_base_cpu_type_info);
5302 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
5303 type_register_static(&host_x86_cpu_type_info);
5307 type_init(x86_cpu_register_types)