2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
24 #include "exec/exec-all.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/hvf.h"
27 #include "sysemu/cpus.h"
31 #include "qemu/error-report.h"
32 #include "qemu/option.h"
33 #include "qemu/config-file.h"
34 #include "qapi/error.h"
35 #include "qapi/qapi-visit-misc.h"
36 #include "qapi/qapi-visit-run-state.h"
37 #include "qapi/qmp/qdict.h"
38 #include "qapi/qmp/qerror.h"
39 #include "qapi/visitor.h"
40 #include "qom/qom-qobject.h"
41 #include "sysemu/arch_init.h"
43 #include "standard-headers/asm-x86/kvm_para.h"
45 #include "sysemu/sysemu.h"
46 #include "hw/qdev-properties.h"
47 #include "hw/i386/topology.h"
48 #ifndef CONFIG_USER_ONLY
49 #include "exec/address-spaces.h"
51 #include "hw/xen/xen.h"
52 #include "hw/i386/apic_internal.h"
55 #include "disas/capstone.h"
57 /* Helpers for building CPUID[2] descriptors: */
59 struct CPUID2CacheDescriptorInfo {
68 #define MiB (1024 * 1024)
71 * Known CPUID 2 cache descriptors.
72 * From Intel SDM Volume 2A, CPUID instruction
74 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
75 [0x06] = { .level = 1, .type = ICACHE, .size = 8 * KiB,
76 .associativity = 4, .line_size = 32, },
77 [0x08] = { .level = 1, .type = ICACHE, .size = 16 * KiB,
78 .associativity = 4, .line_size = 32, },
79 [0x09] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
80 .associativity = 4, .line_size = 64, },
81 [0x0A] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
82 .associativity = 2, .line_size = 32, },
83 [0x0C] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
84 .associativity = 4, .line_size = 32, },
85 [0x0D] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
86 .associativity = 4, .line_size = 64, },
87 [0x0E] = { .level = 1, .type = DCACHE, .size = 24 * KiB,
88 .associativity = 6, .line_size = 64, },
89 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
90 .associativity = 2, .line_size = 64, },
91 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
92 .associativity = 8, .line_size = 64, },
93 /* lines per sector is not supported cpuid2_cache_descriptor(),
94 * so descriptors 0x22, 0x23 are not included
96 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
97 .associativity = 16, .line_size = 64, },
98 /* lines per sector is not supported cpuid2_cache_descriptor(),
99 * so descriptors 0x25, 0x20 are not included
101 [0x2C] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
102 .associativity = 8, .line_size = 64, },
103 [0x30] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
104 .associativity = 8, .line_size = 64, },
105 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
106 .associativity = 4, .line_size = 32, },
107 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
108 .associativity = 4, .line_size = 32, },
109 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
110 .associativity = 4, .line_size = 32, },
111 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
112 .associativity = 4, .line_size = 32, },
113 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
114 .associativity = 4, .line_size = 32, },
115 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
116 .associativity = 4, .line_size = 64, },
117 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
118 .associativity = 8, .line_size = 64, },
119 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
120 .associativity = 12, .line_size = 64, },
121 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
122 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
123 .associativity = 12, .line_size = 64, },
124 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
125 .associativity = 16, .line_size = 64, },
126 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
127 .associativity = 12, .line_size = 64, },
128 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
129 .associativity = 16, .line_size = 64, },
130 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
131 .associativity = 24, .line_size = 64, },
132 [0x60] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
133 .associativity = 8, .line_size = 64, },
134 [0x66] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
135 .associativity = 4, .line_size = 64, },
136 [0x67] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
137 .associativity = 4, .line_size = 64, },
138 [0x68] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
139 .associativity = 4, .line_size = 64, },
140 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
141 .associativity = 4, .line_size = 64, },
142 /* lines per sector is not supported cpuid2_cache_descriptor(),
143 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
145 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
146 .associativity = 8, .line_size = 64, },
147 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
148 .associativity = 2, .line_size = 64, },
149 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
150 .associativity = 8, .line_size = 64, },
151 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
152 .associativity = 8, .line_size = 32, },
153 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
154 .associativity = 8, .line_size = 32, },
155 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
156 .associativity = 8, .line_size = 32, },
157 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
158 .associativity = 8, .line_size = 32, },
159 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
160 .associativity = 4, .line_size = 64, },
161 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
162 .associativity = 8, .line_size = 64, },
163 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
164 .associativity = 4, .line_size = 64, },
165 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
166 .associativity = 4, .line_size = 64, },
167 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
168 .associativity = 4, .line_size = 64, },
169 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
170 .associativity = 8, .line_size = 64, },
171 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
172 .associativity = 8, .line_size = 64, },
173 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
174 .associativity = 8, .line_size = 64, },
175 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
176 .associativity = 12, .line_size = 64, },
177 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
178 .associativity = 12, .line_size = 64, },
179 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
180 .associativity = 12, .line_size = 64, },
181 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
182 .associativity = 16, .line_size = 64, },
183 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
184 .associativity = 16, .line_size = 64, },
185 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
186 .associativity = 16, .line_size = 64, },
187 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
188 .associativity = 24, .line_size = 64, },
189 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
190 .associativity = 24, .line_size = 64, },
191 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
192 .associativity = 24, .line_size = 64, },
196 * "CPUID leaf 2 does not report cache descriptor information,
197 * use CPUID leaf 4 to query cache parameters"
199 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
202 * Return a CPUID 2 cache descriptor for a given cache.
203 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
205 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
209 assert(cache->size > 0);
210 assert(cache->level > 0);
211 assert(cache->line_size > 0);
212 assert(cache->associativity > 0);
213 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
214 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
215 if (d->level == cache->level && d->type == cache->type &&
216 d->size == cache->size && d->line_size == cache->line_size &&
217 d->associativity == cache->associativity) {
222 return CACHE_DESCRIPTOR_UNAVAILABLE;
225 /* CPUID Leaf 4 constants: */
228 #define CACHE_TYPE_D 1
229 #define CACHE_TYPE_I 2
230 #define CACHE_TYPE_UNIFIED 3
232 #define CACHE_LEVEL(l) (l << 5)
234 #define CACHE_SELF_INIT_LEVEL (1 << 8)
237 #define CACHE_NO_INVD_SHARING (1 << 0)
238 #define CACHE_INCLUSIVE (1 << 1)
239 #define CACHE_COMPLEX_IDX (1 << 2)
241 /* Encode CacheType for CPUID[4].EAX */
242 #define CACHE_TYPE(t) (((t) == DCACHE) ? CACHE_TYPE_D : \
243 ((t) == ICACHE) ? CACHE_TYPE_I : \
244 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
245 0 /* Invalid value */)
248 /* Encode cache info for CPUID[4] */
249 static void encode_cache_cpuid4(CPUCacheInfo *cache,
250 int num_apic_ids, int num_cores,
251 uint32_t *eax, uint32_t *ebx,
252 uint32_t *ecx, uint32_t *edx)
254 assert(cache->size == cache->line_size * cache->associativity *
255 cache->partitions * cache->sets);
257 assert(num_apic_ids > 0);
258 *eax = CACHE_TYPE(cache->type) |
259 CACHE_LEVEL(cache->level) |
260 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
261 ((num_cores - 1) << 26) |
262 ((num_apic_ids - 1) << 14);
264 assert(cache->line_size > 0);
265 assert(cache->partitions > 0);
266 assert(cache->associativity > 0);
267 /* We don't implement fully-associative caches */
268 assert(cache->associativity < cache->sets);
269 *ebx = (cache->line_size - 1) |
270 ((cache->partitions - 1) << 12) |
271 ((cache->associativity - 1) << 22);
273 assert(cache->sets > 0);
274 *ecx = cache->sets - 1;
276 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
277 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
278 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
281 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
282 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
284 assert(cache->size % 1024 == 0);
285 assert(cache->lines_per_tag > 0);
286 assert(cache->associativity > 0);
287 assert(cache->line_size > 0);
288 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
289 (cache->lines_per_tag << 8) | (cache->line_size);
292 #define ASSOC_FULL 0xFF
294 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
295 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
305 a == ASSOC_FULL ? 0xF : \
306 0 /* invalid value */)
309 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
312 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
314 uint32_t *ecx, uint32_t *edx)
316 assert(l2->size % 1024 == 0);
317 assert(l2->associativity > 0);
318 assert(l2->lines_per_tag > 0);
319 assert(l2->line_size > 0);
320 *ecx = ((l2->size / 1024) << 16) |
321 (AMD_ENC_ASSOC(l2->associativity) << 12) |
322 (l2->lines_per_tag << 8) | (l2->line_size);
325 assert(l3->size % (512 * 1024) == 0);
326 assert(l3->associativity > 0);
327 assert(l3->lines_per_tag > 0);
328 assert(l3->line_size > 0);
329 *edx = ((l3->size / (512 * 1024)) << 18) |
330 (AMD_ENC_ASSOC(l3->associativity) << 12) |
331 (l3->lines_per_tag << 8) | (l3->line_size);
338 * Definitions of the hardcoded cache entries we expose:
339 * These are legacy cache values. If there is a need to change any
340 * of these values please use builtin_x86_defs
344 static CPUCacheInfo legacy_l1d_cache = {
353 .no_invd_sharing = true,
356 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
357 static CPUCacheInfo legacy_l1d_cache_amd = {
367 .no_invd_sharing = true,
370 /* L1 instruction cache: */
371 static CPUCacheInfo legacy_l1i_cache = {
380 .no_invd_sharing = true,
383 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
384 static CPUCacheInfo legacy_l1i_cache_amd = {
394 .no_invd_sharing = true,
397 /* Level 2 unified cache: */
398 static CPUCacheInfo legacy_l2_cache = {
399 .type = UNIFIED_CACHE,
407 .no_invd_sharing = true,
410 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
411 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
412 .type = UNIFIED_CACHE,
420 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
421 static CPUCacheInfo legacy_l2_cache_amd = {
422 .type = UNIFIED_CACHE,
432 /* Level 3 unified cache: */
433 static CPUCacheInfo legacy_l3_cache = {
434 .type = UNIFIED_CACHE,
444 .complex_indexing = true,
447 /* TLB definitions: */
449 #define L1_DTLB_2M_ASSOC 1
450 #define L1_DTLB_2M_ENTRIES 255
451 #define L1_DTLB_4K_ASSOC 1
452 #define L1_DTLB_4K_ENTRIES 255
454 #define L1_ITLB_2M_ASSOC 1
455 #define L1_ITLB_2M_ENTRIES 255
456 #define L1_ITLB_4K_ASSOC 1
457 #define L1_ITLB_4K_ENTRIES 255
459 #define L2_DTLB_2M_ASSOC 0 /* disabled */
460 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
461 #define L2_DTLB_4K_ASSOC 4
462 #define L2_DTLB_4K_ENTRIES 512
464 #define L2_ITLB_2M_ASSOC 0 /* disabled */
465 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
466 #define L2_ITLB_4K_ASSOC 4
467 #define L2_ITLB_4K_ENTRIES 512
469 /* CPUID Leaf 0x14 constants: */
470 #define INTEL_PT_MAX_SUBLEAF 0x1
472 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
473 * MSR can be accessed;
474 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
475 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
476 * of Intel PT MSRs across warm reset;
477 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
479 #define INTEL_PT_MINIMAL_EBX 0xf
481 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
482 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
484 * bit[01]: ToPA tables can hold any number of output entries, up to the
485 * maximum allowed by the MaskOrTableOffset field of
486 * IA32_RTIT_OUTPUT_MASK_PTRS;
487 * bit[02]: Support Single-Range Output scheme;
489 #define INTEL_PT_MINIMAL_ECX 0x7
490 /* generated packets which contain IP payloads have LIP values */
491 #define INTEL_PT_IP_LIP (1 << 31)
492 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
493 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
494 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
495 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
496 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
498 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
499 uint32_t vendor2, uint32_t vendor3)
502 for (i = 0; i < 4; i++) {
503 dst[i] = vendor1 >> (8 * i);
504 dst[i + 4] = vendor2 >> (8 * i);
505 dst[i + 8] = vendor3 >> (8 * i);
507 dst[CPUID_VENDOR_SZ] = '\0';
510 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
511 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
512 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
513 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
514 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
515 CPUID_PSE36 | CPUID_FXSR)
516 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
517 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
518 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
519 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
520 CPUID_PAE | CPUID_SEP | CPUID_APIC)
522 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
523 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
524 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
525 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
526 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
527 /* partly implemented:
528 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
530 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
531 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
532 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
533 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
534 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
535 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
537 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
538 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
539 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
540 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
541 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
544 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
546 #define TCG_EXT2_X86_64_FEATURES 0
549 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
550 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
551 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
552 TCG_EXT2_X86_64_FEATURES)
553 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
554 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
555 #define TCG_EXT4_FEATURES 0
556 #define TCG_SVM_FEATURES 0
557 #define TCG_KVM_FEATURES 0
558 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
559 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
560 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
561 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
564 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
565 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
566 CPUID_7_0_EBX_RDSEED */
567 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
569 #define TCG_7_0_EDX_FEATURES 0
570 #define TCG_APM_FEATURES 0
571 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
572 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
574 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
576 typedef struct FeatureWordInfo {
577 /* feature flags names are taken from "Intel Processor Identification and
578 * the CPUID Instruction" and AMD's "CPUID Specification".
579 * In cases of disagreement between feature naming conventions,
580 * aliases may be added.
582 const char *feat_names[32];
583 uint32_t cpuid_eax; /* Input EAX for CPUID */
584 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
585 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
586 int cpuid_reg; /* output register (R_* constant) */
587 uint32_t tcg_features; /* Feature flags supported by TCG */
588 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
589 uint32_t migratable_flags; /* Feature flags known to be migratable */
590 /* Features that shouldn't be auto-enabled by "-cpu host" */
591 uint32_t no_autoenable_flags;
594 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
597 "fpu", "vme", "de", "pse",
598 "tsc", "msr", "pae", "mce",
599 "cx8", "apic", NULL, "sep",
600 "mtrr", "pge", "mca", "cmov",
601 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
602 NULL, "ds" /* Intel dts */, "acpi", "mmx",
603 "fxsr", "sse", "sse2", "ss",
604 "ht" /* Intel htt */, "tm", "ia64", "pbe",
606 .cpuid_eax = 1, .cpuid_reg = R_EDX,
607 .tcg_features = TCG_FEATURES,
611 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
612 "ds-cpl", "vmx", "smx", "est",
613 "tm2", "ssse3", "cid", NULL,
614 "fma", "cx16", "xtpr", "pdcm",
615 NULL, "pcid", "dca", "sse4.1",
616 "sse4.2", "x2apic", "movbe", "popcnt",
617 "tsc-deadline", "aes", "xsave", "osxsave",
618 "avx", "f16c", "rdrand", "hypervisor",
620 .cpuid_eax = 1, .cpuid_reg = R_ECX,
621 .tcg_features = TCG_EXT_FEATURES,
623 /* Feature names that are already defined on feature_name[] but
624 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
625 * names on feat_names below. They are copied automatically
626 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
628 [FEAT_8000_0001_EDX] = {
630 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
631 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
632 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
633 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
634 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
635 "nx", NULL, "mmxext", NULL /* mmx */,
636 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
637 NULL, "lm", "3dnowext", "3dnow",
639 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
640 .tcg_features = TCG_EXT2_FEATURES,
642 [FEAT_8000_0001_ECX] = {
644 "lahf-lm", "cmp-legacy", "svm", "extapic",
645 "cr8legacy", "abm", "sse4a", "misalignsse",
646 "3dnowprefetch", "osvw", "ibs", "xop",
647 "skinit", "wdt", NULL, "lwp",
648 "fma4", "tce", NULL, "nodeid-msr",
649 NULL, "tbm", "topoext", "perfctr-core",
650 "perfctr-nb", NULL, NULL, NULL,
651 NULL, NULL, NULL, NULL,
653 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
654 .tcg_features = TCG_EXT3_FEATURES,
656 [FEAT_C000_0001_EDX] = {
658 NULL, NULL, "xstore", "xstore-en",
659 NULL, NULL, "xcrypt", "xcrypt-en",
660 "ace2", "ace2-en", "phe", "phe-en",
661 "pmm", "pmm-en", NULL, NULL,
662 NULL, NULL, NULL, NULL,
663 NULL, NULL, NULL, NULL,
664 NULL, NULL, NULL, NULL,
665 NULL, NULL, NULL, NULL,
667 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
668 .tcg_features = TCG_EXT4_FEATURES,
672 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
673 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
674 NULL, "kvm-pv-tlb-flush", NULL, NULL,
675 NULL, NULL, NULL, NULL,
676 NULL, NULL, NULL, NULL,
677 NULL, NULL, NULL, NULL,
678 "kvmclock-stable-bit", NULL, NULL, NULL,
679 NULL, NULL, NULL, NULL,
681 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
682 .tcg_features = TCG_KVM_FEATURES,
686 "kvm-hint-dedicated", NULL, NULL, NULL,
687 NULL, NULL, NULL, NULL,
688 NULL, NULL, NULL, NULL,
689 NULL, NULL, NULL, NULL,
690 NULL, NULL, NULL, NULL,
691 NULL, NULL, NULL, NULL,
692 NULL, NULL, NULL, NULL,
693 NULL, NULL, NULL, NULL,
695 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EDX,
696 .tcg_features = TCG_KVM_FEATURES,
698 * KVM hints aren't auto-enabled by -cpu host, they need to be
699 * explicitly enabled in the command-line.
701 .no_autoenable_flags = ~0U,
703 [FEAT_HYPERV_EAX] = {
705 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
706 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
707 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
708 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
709 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
710 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
711 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
713 NULL, NULL, NULL, NULL,
714 NULL, NULL, NULL, NULL,
715 NULL, NULL, NULL, NULL,
716 NULL, NULL, NULL, NULL,
718 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
720 [FEAT_HYPERV_EBX] = {
722 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
723 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
724 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
725 NULL /* hv_create_port */, NULL /* hv_connect_port */,
726 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
727 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
729 NULL, NULL, NULL, NULL,
730 NULL, NULL, NULL, NULL,
731 NULL, NULL, NULL, NULL,
732 NULL, NULL, NULL, NULL,
734 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
736 [FEAT_HYPERV_EDX] = {
738 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
739 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
740 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
742 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
743 NULL, NULL, NULL, NULL,
744 NULL, NULL, NULL, NULL,
745 NULL, NULL, NULL, NULL,
746 NULL, NULL, NULL, NULL,
747 NULL, NULL, NULL, NULL,
749 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
753 "npt", "lbrv", "svm-lock", "nrip-save",
754 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
755 NULL, NULL, "pause-filter", NULL,
756 "pfthreshold", NULL, NULL, NULL,
757 NULL, NULL, NULL, NULL,
758 NULL, NULL, NULL, NULL,
759 NULL, NULL, NULL, NULL,
760 NULL, NULL, NULL, NULL,
762 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
763 .tcg_features = TCG_SVM_FEATURES,
767 "fsgsbase", "tsc-adjust", NULL, "bmi1",
768 "hle", "avx2", NULL, "smep",
769 "bmi2", "erms", "invpcid", "rtm",
770 NULL, NULL, "mpx", NULL,
771 "avx512f", "avx512dq", "rdseed", "adx",
772 "smap", "avx512ifma", "pcommit", "clflushopt",
773 "clwb", "intel-pt", "avx512pf", "avx512er",
774 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
777 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
779 .tcg_features = TCG_7_0_EBX_FEATURES,
783 NULL, "avx512vbmi", "umip", "pku",
784 "ospke", NULL, "avx512vbmi2", NULL,
785 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
786 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
787 "la57", NULL, NULL, NULL,
788 NULL, NULL, "rdpid", NULL,
789 NULL, "cldemote", NULL, NULL,
790 NULL, NULL, NULL, NULL,
793 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
795 .tcg_features = TCG_7_0_ECX_FEATURES,
799 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
800 NULL, NULL, NULL, NULL,
801 NULL, NULL, NULL, NULL,
802 NULL, NULL, NULL, NULL,
803 NULL, NULL, NULL, NULL,
804 NULL, NULL, NULL, NULL,
805 NULL, NULL, "spec-ctrl", NULL,
806 NULL, NULL, NULL, "ssbd",
809 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
811 .tcg_features = TCG_7_0_EDX_FEATURES,
813 [FEAT_8000_0007_EDX] = {
815 NULL, NULL, NULL, NULL,
816 NULL, NULL, NULL, NULL,
817 "invtsc", NULL, NULL, NULL,
818 NULL, NULL, NULL, NULL,
819 NULL, NULL, NULL, NULL,
820 NULL, NULL, NULL, NULL,
821 NULL, NULL, NULL, NULL,
822 NULL, NULL, NULL, NULL,
824 .cpuid_eax = 0x80000007,
826 .tcg_features = TCG_APM_FEATURES,
827 .unmigratable_flags = CPUID_APM_INVTSC,
829 [FEAT_8000_0008_EBX] = {
831 NULL, NULL, NULL, NULL,
832 NULL, NULL, NULL, NULL,
833 NULL, NULL, NULL, NULL,
834 "ibpb", NULL, NULL, NULL,
835 NULL, NULL, NULL, NULL,
836 NULL, NULL, NULL, NULL,
837 NULL, "virt-ssbd", NULL, NULL,
838 NULL, NULL, NULL, NULL,
840 .cpuid_eax = 0x80000008,
843 .unmigratable_flags = 0,
847 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
848 NULL, NULL, NULL, NULL,
849 NULL, NULL, NULL, NULL,
850 NULL, NULL, NULL, NULL,
851 NULL, NULL, NULL, NULL,
852 NULL, NULL, NULL, NULL,
853 NULL, NULL, NULL, NULL,
854 NULL, NULL, NULL, NULL,
857 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
859 .tcg_features = TCG_XSAVE_FEATURES,
863 NULL, NULL, "arat", NULL,
864 NULL, NULL, NULL, NULL,
865 NULL, NULL, NULL, NULL,
866 NULL, NULL, NULL, NULL,
867 NULL, NULL, NULL, NULL,
868 NULL, NULL, NULL, NULL,
869 NULL, NULL, NULL, NULL,
870 NULL, NULL, NULL, NULL,
872 .cpuid_eax = 6, .cpuid_reg = R_EAX,
873 .tcg_features = TCG_6_EAX_FEATURES,
875 [FEAT_XSAVE_COMP_LO] = {
877 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
880 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
881 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
882 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
885 [FEAT_XSAVE_COMP_HI] = {
887 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
893 typedef struct X86RegisterInfo32 {
894 /* Name of register */
896 /* QAPI enum value register */
897 X86CPURegister32 qapi_enum;
900 #define REGISTER(reg) \
901 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
902 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
914 typedef struct ExtSaveArea {
915 uint32_t feature, bits;
916 uint32_t offset, size;
919 static const ExtSaveArea x86_ext_save_areas[] = {
921 /* x87 FP state component is always enabled if XSAVE is supported */
922 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
923 /* x87 state is in the legacy region of the XSAVE area */
925 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
928 /* SSE state component is always enabled if XSAVE is supported */
929 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
930 /* SSE state is in the legacy region of the XSAVE area */
932 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
935 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
936 .offset = offsetof(X86XSaveArea, avx_state),
937 .size = sizeof(XSaveAVX) },
938 [XSTATE_BNDREGS_BIT] =
939 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
940 .offset = offsetof(X86XSaveArea, bndreg_state),
941 .size = sizeof(XSaveBNDREG) },
942 [XSTATE_BNDCSR_BIT] =
943 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
944 .offset = offsetof(X86XSaveArea, bndcsr_state),
945 .size = sizeof(XSaveBNDCSR) },
946 [XSTATE_OPMASK_BIT] =
947 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
948 .offset = offsetof(X86XSaveArea, opmask_state),
949 .size = sizeof(XSaveOpmask) },
950 [XSTATE_ZMM_Hi256_BIT] =
951 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
952 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
953 .size = sizeof(XSaveZMM_Hi256) },
954 [XSTATE_Hi16_ZMM_BIT] =
955 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
956 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
957 .size = sizeof(XSaveHi16_ZMM) },
959 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
960 .offset = offsetof(X86XSaveArea, pkru_state),
961 .size = sizeof(XSavePKRU) },
964 static uint32_t xsave_area_size(uint64_t mask)
969 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
970 const ExtSaveArea *esa = &x86_ext_save_areas[i];
971 if ((mask >> i) & 1) {
972 ret = MAX(ret, esa->offset + esa->size);
978 static inline bool accel_uses_host_cpuid(void)
980 return kvm_enabled() || hvf_enabled();
983 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
985 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
986 cpu->env.features[FEAT_XSAVE_COMP_LO];
989 const char *get_register_name_32(unsigned int reg)
991 if (reg >= CPU_NB_REGS32) {
994 return x86_reg_info_32[reg].name;
998 * Returns the set of feature flags that are supported and migratable by
999 * QEMU, for a given FeatureWord.
1001 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
1003 FeatureWordInfo *wi = &feature_word_info[w];
1007 for (i = 0; i < 32; i++) {
1008 uint32_t f = 1U << i;
1010 /* If the feature name is known, it is implicitly considered migratable,
1011 * unless it is explicitly set in unmigratable_flags */
1012 if ((wi->migratable_flags & f) ||
1013 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1020 void host_cpuid(uint32_t function, uint32_t count,
1021 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1026 asm volatile("cpuid"
1027 : "=a"(vec[0]), "=b"(vec[1]),
1028 "=c"(vec[2]), "=d"(vec[3])
1029 : "0"(function), "c"(count) : "cc");
1030 #elif defined(__i386__)
1031 asm volatile("pusha \n\t"
1033 "mov %%eax, 0(%2) \n\t"
1034 "mov %%ebx, 4(%2) \n\t"
1035 "mov %%ecx, 8(%2) \n\t"
1036 "mov %%edx, 12(%2) \n\t"
1038 : : "a"(function), "c"(count), "S"(vec)
1054 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1056 uint32_t eax, ebx, ecx, edx;
1058 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1059 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1061 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1063 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1066 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1069 *stepping = eax & 0x0F;
1073 /* CPU class name definitions: */
1075 /* Return type name for a given CPU model name
1076 * Caller is responsible for freeing the returned string.
1078 static char *x86_cpu_type_name(const char *model_name)
1080 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1083 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1086 char *typename = x86_cpu_type_name(cpu_model);
1087 oc = object_class_by_name(typename);
1092 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1094 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1095 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1096 return g_strndup(class_name,
1097 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1100 struct X86CPUDefinition {
1104 /* vendor is zero-terminated, 12 character ASCII string */
1105 char vendor[CPUID_VENDOR_SZ + 1];
1109 FeatureWordArray features;
1110 const char *model_id;
1111 CPUCaches *cache_info;
1114 static CPUCaches epyc_cache_info = {
1115 .l1d_cache = &(CPUCacheInfo) {
1125 .no_invd_sharing = true,
1127 .l1i_cache = &(CPUCacheInfo) {
1137 .no_invd_sharing = true,
1139 .l2_cache = &(CPUCacheInfo) {
1140 .type = UNIFIED_CACHE,
1149 .l3_cache = &(CPUCacheInfo) {
1150 .type = UNIFIED_CACHE,
1154 .associativity = 16,
1160 .complex_indexing = true,
1164 static X86CPUDefinition builtin_x86_defs[] = {
1168 .vendor = CPUID_VENDOR_AMD,
1172 .features[FEAT_1_EDX] =
1174 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1176 .features[FEAT_1_ECX] =
1177 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1178 .features[FEAT_8000_0001_EDX] =
1179 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1180 .features[FEAT_8000_0001_ECX] =
1181 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1182 .xlevel = 0x8000000A,
1183 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1188 .vendor = CPUID_VENDOR_AMD,
1192 /* Missing: CPUID_HT */
1193 .features[FEAT_1_EDX] =
1195 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1196 CPUID_PSE36 | CPUID_VME,
1197 .features[FEAT_1_ECX] =
1198 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1200 .features[FEAT_8000_0001_EDX] =
1201 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1202 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1203 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1204 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1206 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1207 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1208 .features[FEAT_8000_0001_ECX] =
1209 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1210 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1211 /* Missing: CPUID_SVM_LBRV */
1212 .features[FEAT_SVM] =
1214 .xlevel = 0x8000001A,
1215 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1220 .vendor = CPUID_VENDOR_INTEL,
1224 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1225 .features[FEAT_1_EDX] =
1227 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1228 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1229 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1230 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1231 .features[FEAT_1_ECX] =
1232 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1234 .features[FEAT_8000_0001_EDX] =
1235 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1236 .features[FEAT_8000_0001_ECX] =
1238 .xlevel = 0x80000008,
1239 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1244 .vendor = CPUID_VENDOR_INTEL,
1248 /* Missing: CPUID_HT */
1249 .features[FEAT_1_EDX] =
1250 PPRO_FEATURES | CPUID_VME |
1251 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1253 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1254 .features[FEAT_1_ECX] =
1255 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1256 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1257 .features[FEAT_8000_0001_EDX] =
1258 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1259 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1260 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1261 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1262 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1263 .features[FEAT_8000_0001_ECX] =
1265 .xlevel = 0x80000008,
1266 .model_id = "Common KVM processor"
1271 .vendor = CPUID_VENDOR_INTEL,
1275 .features[FEAT_1_EDX] =
1277 .features[FEAT_1_ECX] =
1279 .xlevel = 0x80000004,
1280 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1285 .vendor = CPUID_VENDOR_INTEL,
1289 .features[FEAT_1_EDX] =
1290 PPRO_FEATURES | CPUID_VME |
1291 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1292 .features[FEAT_1_ECX] =
1294 .features[FEAT_8000_0001_ECX] =
1296 .xlevel = 0x80000008,
1297 .model_id = "Common 32-bit KVM processor"
1302 .vendor = CPUID_VENDOR_INTEL,
1306 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1307 .features[FEAT_1_EDX] =
1308 PPRO_FEATURES | CPUID_VME |
1309 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1311 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1312 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1313 .features[FEAT_1_ECX] =
1314 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1315 .features[FEAT_8000_0001_EDX] =
1317 .xlevel = 0x80000008,
1318 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1323 .vendor = CPUID_VENDOR_INTEL,
1327 .features[FEAT_1_EDX] =
1335 .vendor = CPUID_VENDOR_INTEL,
1339 .features[FEAT_1_EDX] =
1347 .vendor = CPUID_VENDOR_INTEL,
1351 .features[FEAT_1_EDX] =
1359 .vendor = CPUID_VENDOR_INTEL,
1363 .features[FEAT_1_EDX] =
1371 .vendor = CPUID_VENDOR_AMD,
1375 .features[FEAT_1_EDX] =
1376 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1378 .features[FEAT_8000_0001_EDX] =
1379 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1380 .xlevel = 0x80000008,
1381 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1386 .vendor = CPUID_VENDOR_INTEL,
1390 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1391 .features[FEAT_1_EDX] =
1393 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1394 CPUID_ACPI | CPUID_SS,
1395 /* Some CPUs got no CPUID_SEP */
1396 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1398 .features[FEAT_1_ECX] =
1399 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1401 .features[FEAT_8000_0001_EDX] =
1403 .features[FEAT_8000_0001_ECX] =
1405 .xlevel = 0x80000008,
1406 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1411 .vendor = CPUID_VENDOR_INTEL,
1415 .features[FEAT_1_EDX] =
1416 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1417 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1418 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1419 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1420 CPUID_DE | CPUID_FP87,
1421 .features[FEAT_1_ECX] =
1422 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1423 .features[FEAT_8000_0001_EDX] =
1424 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1425 .features[FEAT_8000_0001_ECX] =
1427 .xlevel = 0x80000008,
1428 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1433 .vendor = CPUID_VENDOR_INTEL,
1437 .features[FEAT_1_EDX] =
1438 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1439 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1440 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1441 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1442 CPUID_DE | CPUID_FP87,
1443 .features[FEAT_1_ECX] =
1444 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1446 .features[FEAT_8000_0001_EDX] =
1447 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1448 .features[FEAT_8000_0001_ECX] =
1450 .xlevel = 0x80000008,
1451 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1456 .vendor = CPUID_VENDOR_INTEL,
1460 .features[FEAT_1_EDX] =
1461 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1462 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1463 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1464 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1465 CPUID_DE | CPUID_FP87,
1466 .features[FEAT_1_ECX] =
1467 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1468 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1469 .features[FEAT_8000_0001_EDX] =
1470 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1471 .features[FEAT_8000_0001_ECX] =
1473 .xlevel = 0x80000008,
1474 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1477 .name = "Nehalem-IBRS",
1479 .vendor = CPUID_VENDOR_INTEL,
1483 .features[FEAT_1_EDX] =
1484 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1485 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1486 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1487 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1488 CPUID_DE | CPUID_FP87,
1489 .features[FEAT_1_ECX] =
1490 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1491 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1492 .features[FEAT_7_0_EDX] =
1493 CPUID_7_0_EDX_SPEC_CTRL,
1494 .features[FEAT_8000_0001_EDX] =
1495 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1496 .features[FEAT_8000_0001_ECX] =
1498 .xlevel = 0x80000008,
1499 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1504 .vendor = CPUID_VENDOR_INTEL,
1508 .features[FEAT_1_EDX] =
1509 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1510 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1511 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1512 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1513 CPUID_DE | CPUID_FP87,
1514 .features[FEAT_1_ECX] =
1515 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1516 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1517 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1518 .features[FEAT_8000_0001_EDX] =
1519 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1520 .features[FEAT_8000_0001_ECX] =
1522 .features[FEAT_6_EAX] =
1524 .xlevel = 0x80000008,
1525 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1528 .name = "Westmere-IBRS",
1530 .vendor = CPUID_VENDOR_INTEL,
1534 .features[FEAT_1_EDX] =
1535 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1536 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1537 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1538 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1539 CPUID_DE | CPUID_FP87,
1540 .features[FEAT_1_ECX] =
1541 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1542 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1543 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1544 .features[FEAT_8000_0001_EDX] =
1545 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1546 .features[FEAT_8000_0001_ECX] =
1548 .features[FEAT_7_0_EDX] =
1549 CPUID_7_0_EDX_SPEC_CTRL,
1550 .features[FEAT_6_EAX] =
1552 .xlevel = 0x80000008,
1553 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1556 .name = "SandyBridge",
1558 .vendor = CPUID_VENDOR_INTEL,
1562 .features[FEAT_1_EDX] =
1563 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1564 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1565 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1566 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1567 CPUID_DE | CPUID_FP87,
1568 .features[FEAT_1_ECX] =
1569 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1570 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1571 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1572 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1574 .features[FEAT_8000_0001_EDX] =
1575 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1577 .features[FEAT_8000_0001_ECX] =
1579 .features[FEAT_XSAVE] =
1580 CPUID_XSAVE_XSAVEOPT,
1581 .features[FEAT_6_EAX] =
1583 .xlevel = 0x80000008,
1584 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1587 .name = "SandyBridge-IBRS",
1589 .vendor = CPUID_VENDOR_INTEL,
1593 .features[FEAT_1_EDX] =
1594 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1595 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1596 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1597 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1598 CPUID_DE | CPUID_FP87,
1599 .features[FEAT_1_ECX] =
1600 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1601 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1602 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1603 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1605 .features[FEAT_8000_0001_EDX] =
1606 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1608 .features[FEAT_8000_0001_ECX] =
1610 .features[FEAT_7_0_EDX] =
1611 CPUID_7_0_EDX_SPEC_CTRL,
1612 .features[FEAT_XSAVE] =
1613 CPUID_XSAVE_XSAVEOPT,
1614 .features[FEAT_6_EAX] =
1616 .xlevel = 0x80000008,
1617 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1620 .name = "IvyBridge",
1622 .vendor = CPUID_VENDOR_INTEL,
1626 .features[FEAT_1_EDX] =
1627 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1628 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1629 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1630 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1631 CPUID_DE | CPUID_FP87,
1632 .features[FEAT_1_ECX] =
1633 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1634 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1635 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1636 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1637 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1638 .features[FEAT_7_0_EBX] =
1639 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1641 .features[FEAT_8000_0001_EDX] =
1642 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1644 .features[FEAT_8000_0001_ECX] =
1646 .features[FEAT_XSAVE] =
1647 CPUID_XSAVE_XSAVEOPT,
1648 .features[FEAT_6_EAX] =
1650 .xlevel = 0x80000008,
1651 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1654 .name = "IvyBridge-IBRS",
1656 .vendor = CPUID_VENDOR_INTEL,
1660 .features[FEAT_1_EDX] =
1661 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1662 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1663 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1664 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1665 CPUID_DE | CPUID_FP87,
1666 .features[FEAT_1_ECX] =
1667 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1668 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1669 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1670 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1671 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1672 .features[FEAT_7_0_EBX] =
1673 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1675 .features[FEAT_8000_0001_EDX] =
1676 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1678 .features[FEAT_8000_0001_ECX] =
1680 .features[FEAT_7_0_EDX] =
1681 CPUID_7_0_EDX_SPEC_CTRL,
1682 .features[FEAT_XSAVE] =
1683 CPUID_XSAVE_XSAVEOPT,
1684 .features[FEAT_6_EAX] =
1686 .xlevel = 0x80000008,
1687 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1690 .name = "Haswell-noTSX",
1692 .vendor = CPUID_VENDOR_INTEL,
1696 .features[FEAT_1_EDX] =
1697 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1698 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1699 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1700 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1701 CPUID_DE | CPUID_FP87,
1702 .features[FEAT_1_ECX] =
1703 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1704 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1705 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1706 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1707 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1708 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1709 .features[FEAT_8000_0001_EDX] =
1710 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1712 .features[FEAT_8000_0001_ECX] =
1713 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1714 .features[FEAT_7_0_EBX] =
1715 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1716 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1717 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1718 .features[FEAT_XSAVE] =
1719 CPUID_XSAVE_XSAVEOPT,
1720 .features[FEAT_6_EAX] =
1722 .xlevel = 0x80000008,
1723 .model_id = "Intel Core Processor (Haswell, no TSX)",
1726 .name = "Haswell-noTSX-IBRS",
1728 .vendor = CPUID_VENDOR_INTEL,
1732 .features[FEAT_1_EDX] =
1733 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1734 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1735 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1736 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1737 CPUID_DE | CPUID_FP87,
1738 .features[FEAT_1_ECX] =
1739 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1740 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1741 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1742 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1743 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1744 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1745 .features[FEAT_8000_0001_EDX] =
1746 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1748 .features[FEAT_8000_0001_ECX] =
1749 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1750 .features[FEAT_7_0_EDX] =
1751 CPUID_7_0_EDX_SPEC_CTRL,
1752 .features[FEAT_7_0_EBX] =
1753 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1754 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1755 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1756 .features[FEAT_XSAVE] =
1757 CPUID_XSAVE_XSAVEOPT,
1758 .features[FEAT_6_EAX] =
1760 .xlevel = 0x80000008,
1761 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
1766 .vendor = CPUID_VENDOR_INTEL,
1770 .features[FEAT_1_EDX] =
1771 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1772 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1773 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1774 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1775 CPUID_DE | CPUID_FP87,
1776 .features[FEAT_1_ECX] =
1777 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1778 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1779 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1780 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1781 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1782 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1783 .features[FEAT_8000_0001_EDX] =
1784 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1786 .features[FEAT_8000_0001_ECX] =
1787 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1788 .features[FEAT_7_0_EBX] =
1789 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1790 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1791 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1793 .features[FEAT_XSAVE] =
1794 CPUID_XSAVE_XSAVEOPT,
1795 .features[FEAT_6_EAX] =
1797 .xlevel = 0x80000008,
1798 .model_id = "Intel Core Processor (Haswell)",
1801 .name = "Haswell-IBRS",
1803 .vendor = CPUID_VENDOR_INTEL,
1807 .features[FEAT_1_EDX] =
1808 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1809 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1810 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1811 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1812 CPUID_DE | CPUID_FP87,
1813 .features[FEAT_1_ECX] =
1814 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1815 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1816 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1817 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1818 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1819 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1820 .features[FEAT_8000_0001_EDX] =
1821 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1823 .features[FEAT_8000_0001_ECX] =
1824 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1825 .features[FEAT_7_0_EDX] =
1826 CPUID_7_0_EDX_SPEC_CTRL,
1827 .features[FEAT_7_0_EBX] =
1828 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1829 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1830 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1832 .features[FEAT_XSAVE] =
1833 CPUID_XSAVE_XSAVEOPT,
1834 .features[FEAT_6_EAX] =
1836 .xlevel = 0x80000008,
1837 .model_id = "Intel Core Processor (Haswell, IBRS)",
1840 .name = "Broadwell-noTSX",
1842 .vendor = CPUID_VENDOR_INTEL,
1846 .features[FEAT_1_EDX] =
1847 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1848 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1849 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1850 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1851 CPUID_DE | CPUID_FP87,
1852 .features[FEAT_1_ECX] =
1853 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1854 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1855 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1856 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1857 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1858 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1859 .features[FEAT_8000_0001_EDX] =
1860 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1862 .features[FEAT_8000_0001_ECX] =
1863 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1864 .features[FEAT_7_0_EBX] =
1865 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1866 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1867 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1868 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1870 .features[FEAT_XSAVE] =
1871 CPUID_XSAVE_XSAVEOPT,
1872 .features[FEAT_6_EAX] =
1874 .xlevel = 0x80000008,
1875 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1878 .name = "Broadwell-noTSX-IBRS",
1880 .vendor = CPUID_VENDOR_INTEL,
1884 .features[FEAT_1_EDX] =
1885 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1886 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1887 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1888 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1889 CPUID_DE | CPUID_FP87,
1890 .features[FEAT_1_ECX] =
1891 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1892 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1893 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1894 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1895 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1896 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1897 .features[FEAT_8000_0001_EDX] =
1898 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1900 .features[FEAT_8000_0001_ECX] =
1901 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1902 .features[FEAT_7_0_EDX] =
1903 CPUID_7_0_EDX_SPEC_CTRL,
1904 .features[FEAT_7_0_EBX] =
1905 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1906 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1907 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1908 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1910 .features[FEAT_XSAVE] =
1911 CPUID_XSAVE_XSAVEOPT,
1912 .features[FEAT_6_EAX] =
1914 .xlevel = 0x80000008,
1915 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
1918 .name = "Broadwell",
1920 .vendor = CPUID_VENDOR_INTEL,
1924 .features[FEAT_1_EDX] =
1925 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1926 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1927 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1928 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1929 CPUID_DE | CPUID_FP87,
1930 .features[FEAT_1_ECX] =
1931 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1932 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1933 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1934 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1935 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1936 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1937 .features[FEAT_8000_0001_EDX] =
1938 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1940 .features[FEAT_8000_0001_ECX] =
1941 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1942 .features[FEAT_7_0_EBX] =
1943 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1944 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1945 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1946 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1948 .features[FEAT_XSAVE] =
1949 CPUID_XSAVE_XSAVEOPT,
1950 .features[FEAT_6_EAX] =
1952 .xlevel = 0x80000008,
1953 .model_id = "Intel Core Processor (Broadwell)",
1956 .name = "Broadwell-IBRS",
1958 .vendor = CPUID_VENDOR_INTEL,
1962 .features[FEAT_1_EDX] =
1963 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1964 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1965 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1966 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1967 CPUID_DE | CPUID_FP87,
1968 .features[FEAT_1_ECX] =
1969 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1970 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1971 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1972 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1973 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1974 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1975 .features[FEAT_8000_0001_EDX] =
1976 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1978 .features[FEAT_8000_0001_ECX] =
1979 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1980 .features[FEAT_7_0_EDX] =
1981 CPUID_7_0_EDX_SPEC_CTRL,
1982 .features[FEAT_7_0_EBX] =
1983 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1984 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1985 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1986 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1988 .features[FEAT_XSAVE] =
1989 CPUID_XSAVE_XSAVEOPT,
1990 .features[FEAT_6_EAX] =
1992 .xlevel = 0x80000008,
1993 .model_id = "Intel Core Processor (Broadwell, IBRS)",
1996 .name = "Skylake-Client",
1998 .vendor = CPUID_VENDOR_INTEL,
2002 .features[FEAT_1_EDX] =
2003 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2004 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2005 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2006 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2007 CPUID_DE | CPUID_FP87,
2008 .features[FEAT_1_ECX] =
2009 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2010 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2011 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2012 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2013 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2014 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2015 .features[FEAT_8000_0001_EDX] =
2016 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2018 .features[FEAT_8000_0001_ECX] =
2019 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2020 .features[FEAT_7_0_EBX] =
2021 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2022 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2023 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2024 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2025 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2026 /* Missing: XSAVES (not supported by some Linux versions,
2027 * including v4.1 to v4.12).
2028 * KVM doesn't yet expose any XSAVES state save component,
2029 * and the only one defined in Skylake (processor tracing)
2030 * probably will block migration anyway.
2032 .features[FEAT_XSAVE] =
2033 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2034 CPUID_XSAVE_XGETBV1,
2035 .features[FEAT_6_EAX] =
2037 .xlevel = 0x80000008,
2038 .model_id = "Intel Core Processor (Skylake)",
2041 .name = "Skylake-Client-IBRS",
2043 .vendor = CPUID_VENDOR_INTEL,
2047 .features[FEAT_1_EDX] =
2048 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2049 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2050 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2051 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2052 CPUID_DE | CPUID_FP87,
2053 .features[FEAT_1_ECX] =
2054 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2055 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2056 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2057 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2058 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2059 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2060 .features[FEAT_8000_0001_EDX] =
2061 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2063 .features[FEAT_8000_0001_ECX] =
2064 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2065 .features[FEAT_7_0_EDX] =
2066 CPUID_7_0_EDX_SPEC_CTRL,
2067 .features[FEAT_7_0_EBX] =
2068 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2069 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2070 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2071 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2072 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2073 /* Missing: XSAVES (not supported by some Linux versions,
2074 * including v4.1 to v4.12).
2075 * KVM doesn't yet expose any XSAVES state save component,
2076 * and the only one defined in Skylake (processor tracing)
2077 * probably will block migration anyway.
2079 .features[FEAT_XSAVE] =
2080 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2081 CPUID_XSAVE_XGETBV1,
2082 .features[FEAT_6_EAX] =
2084 .xlevel = 0x80000008,
2085 .model_id = "Intel Core Processor (Skylake, IBRS)",
2088 .name = "Skylake-Server",
2090 .vendor = CPUID_VENDOR_INTEL,
2094 .features[FEAT_1_EDX] =
2095 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2096 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2097 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2098 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2099 CPUID_DE | CPUID_FP87,
2100 .features[FEAT_1_ECX] =
2101 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2102 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2103 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2104 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2105 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2106 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2107 .features[FEAT_8000_0001_EDX] =
2108 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2109 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2110 .features[FEAT_8000_0001_ECX] =
2111 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2112 .features[FEAT_7_0_EBX] =
2113 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2114 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2115 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2116 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2117 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2118 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2119 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2120 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2121 /* Missing: XSAVES (not supported by some Linux versions,
2122 * including v4.1 to v4.12).
2123 * KVM doesn't yet expose any XSAVES state save component,
2124 * and the only one defined in Skylake (processor tracing)
2125 * probably will block migration anyway.
2127 .features[FEAT_XSAVE] =
2128 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2129 CPUID_XSAVE_XGETBV1,
2130 .features[FEAT_6_EAX] =
2132 .xlevel = 0x80000008,
2133 .model_id = "Intel Xeon Processor (Skylake)",
2136 .name = "Skylake-Server-IBRS",
2138 .vendor = CPUID_VENDOR_INTEL,
2142 .features[FEAT_1_EDX] =
2143 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2144 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2145 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2146 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2147 CPUID_DE | CPUID_FP87,
2148 .features[FEAT_1_ECX] =
2149 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2150 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2151 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2152 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2153 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2154 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2155 .features[FEAT_8000_0001_EDX] =
2156 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2157 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2158 .features[FEAT_8000_0001_ECX] =
2159 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2160 .features[FEAT_7_0_EDX] =
2161 CPUID_7_0_EDX_SPEC_CTRL,
2162 .features[FEAT_7_0_EBX] =
2163 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2164 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2165 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2166 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2167 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2168 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2169 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2170 CPUID_7_0_EBX_AVX512VL,
2171 /* Missing: XSAVES (not supported by some Linux versions,
2172 * including v4.1 to v4.12).
2173 * KVM doesn't yet expose any XSAVES state save component,
2174 * and the only one defined in Skylake (processor tracing)
2175 * probably will block migration anyway.
2177 .features[FEAT_XSAVE] =
2178 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2179 CPUID_XSAVE_XGETBV1,
2180 .features[FEAT_6_EAX] =
2182 .xlevel = 0x80000008,
2183 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
2186 .name = "KnightsMill",
2188 .vendor = CPUID_VENDOR_INTEL,
2192 .features[FEAT_1_EDX] =
2193 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2194 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2195 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2196 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2197 CPUID_PSE | CPUID_DE | CPUID_FP87,
2198 .features[FEAT_1_ECX] =
2199 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2200 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2201 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2202 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2203 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2204 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2205 .features[FEAT_8000_0001_EDX] =
2206 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2207 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2208 .features[FEAT_8000_0001_ECX] =
2209 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2210 .features[FEAT_7_0_EBX] =
2211 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2212 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2213 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2214 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2215 CPUID_7_0_EBX_AVX512ER,
2216 .features[FEAT_7_0_ECX] =
2217 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2218 .features[FEAT_7_0_EDX] =
2219 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2220 .features[FEAT_XSAVE] =
2221 CPUID_XSAVE_XSAVEOPT,
2222 .features[FEAT_6_EAX] =
2224 .xlevel = 0x80000008,
2225 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2228 .name = "Opteron_G1",
2230 .vendor = CPUID_VENDOR_AMD,
2234 .features[FEAT_1_EDX] =
2235 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2236 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2237 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2238 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2239 CPUID_DE | CPUID_FP87,
2240 .features[FEAT_1_ECX] =
2242 .features[FEAT_8000_0001_EDX] =
2243 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2244 .xlevel = 0x80000008,
2245 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2248 .name = "Opteron_G2",
2250 .vendor = CPUID_VENDOR_AMD,
2254 .features[FEAT_1_EDX] =
2255 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2256 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2257 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2258 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2259 CPUID_DE | CPUID_FP87,
2260 .features[FEAT_1_ECX] =
2261 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2262 /* Missing: CPUID_EXT2_RDTSCP */
2263 .features[FEAT_8000_0001_EDX] =
2264 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2265 .features[FEAT_8000_0001_ECX] =
2266 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2267 .xlevel = 0x80000008,
2268 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2271 .name = "Opteron_G3",
2273 .vendor = CPUID_VENDOR_AMD,
2277 .features[FEAT_1_EDX] =
2278 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2279 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2280 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2281 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2282 CPUID_DE | CPUID_FP87,
2283 .features[FEAT_1_ECX] =
2284 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2286 /* Missing: CPUID_EXT2_RDTSCP */
2287 .features[FEAT_8000_0001_EDX] =
2288 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2289 .features[FEAT_8000_0001_ECX] =
2290 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2291 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2292 .xlevel = 0x80000008,
2293 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2296 .name = "Opteron_G4",
2298 .vendor = CPUID_VENDOR_AMD,
2302 .features[FEAT_1_EDX] =
2303 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2304 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2305 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2306 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2307 CPUID_DE | CPUID_FP87,
2308 .features[FEAT_1_ECX] =
2309 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2310 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2311 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2313 /* Missing: CPUID_EXT2_RDTSCP */
2314 .features[FEAT_8000_0001_EDX] =
2315 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2317 .features[FEAT_8000_0001_ECX] =
2318 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2319 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2320 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2323 .xlevel = 0x8000001A,
2324 .model_id = "AMD Opteron 62xx class CPU",
2327 .name = "Opteron_G5",
2329 .vendor = CPUID_VENDOR_AMD,
2333 .features[FEAT_1_EDX] =
2334 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2335 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2336 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2337 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2338 CPUID_DE | CPUID_FP87,
2339 .features[FEAT_1_ECX] =
2340 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2341 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2342 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2343 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2344 /* Missing: CPUID_EXT2_RDTSCP */
2345 .features[FEAT_8000_0001_EDX] =
2346 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2348 .features[FEAT_8000_0001_ECX] =
2349 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2350 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2351 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2354 .xlevel = 0x8000001A,
2355 .model_id = "AMD Opteron 63xx class CPU",
2360 .vendor = CPUID_VENDOR_AMD,
2364 .features[FEAT_1_EDX] =
2365 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2366 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2367 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2368 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2369 CPUID_VME | CPUID_FP87,
2370 .features[FEAT_1_ECX] =
2371 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2372 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2373 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2374 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2375 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2376 .features[FEAT_8000_0001_EDX] =
2377 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2378 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2380 .features[FEAT_8000_0001_ECX] =
2381 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2382 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2383 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2384 .features[FEAT_7_0_EBX] =
2385 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2386 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2387 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2388 CPUID_7_0_EBX_SHA_NI,
2389 /* Missing: XSAVES (not supported by some Linux versions,
2390 * including v4.1 to v4.12).
2391 * KVM doesn't yet expose any XSAVES state save component.
2393 .features[FEAT_XSAVE] =
2394 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2395 CPUID_XSAVE_XGETBV1,
2396 .features[FEAT_6_EAX] =
2398 .xlevel = 0x8000000A,
2399 .model_id = "AMD EPYC Processor",
2400 .cache_info = &epyc_cache_info,
2403 .name = "EPYC-IBPB",
2405 .vendor = CPUID_VENDOR_AMD,
2409 .features[FEAT_1_EDX] =
2410 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2411 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2412 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2413 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2414 CPUID_VME | CPUID_FP87,
2415 .features[FEAT_1_ECX] =
2416 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2417 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2418 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2419 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2420 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2421 .features[FEAT_8000_0001_EDX] =
2422 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2423 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2425 .features[FEAT_8000_0001_ECX] =
2426 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2427 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2428 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2429 .features[FEAT_8000_0008_EBX] =
2430 CPUID_8000_0008_EBX_IBPB,
2431 .features[FEAT_7_0_EBX] =
2432 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2433 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2434 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2435 CPUID_7_0_EBX_SHA_NI,
2436 /* Missing: XSAVES (not supported by some Linux versions,
2437 * including v4.1 to v4.12).
2438 * KVM doesn't yet expose any XSAVES state save component.
2440 .features[FEAT_XSAVE] =
2441 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2442 CPUID_XSAVE_XGETBV1,
2443 .features[FEAT_6_EAX] =
2445 .xlevel = 0x8000000A,
2446 .model_id = "AMD EPYC Processor (with IBPB)",
2447 .cache_info = &epyc_cache_info,
2451 typedef struct PropValue {
2452 const char *prop, *value;
2455 /* KVM-specific features that are automatically added/removed
2456 * from all CPU models when KVM is enabled.
2458 static PropValue kvm_default_props[] = {
2459 { "kvmclock", "on" },
2460 { "kvm-nopiodelay", "on" },
2461 { "kvm-asyncpf", "on" },
2462 { "kvm-steal-time", "on" },
2463 { "kvm-pv-eoi", "on" },
2464 { "kvmclock-stable-bit", "on" },
2467 { "monitor", "off" },
2472 /* TCG-specific defaults that override all CPU models when using TCG
2474 static PropValue tcg_default_props[] = {
2480 void x86_cpu_change_kvm_default(const char *prop, const char *value)
2483 for (pv = kvm_default_props; pv->prop; pv++) {
2484 if (!strcmp(pv->prop, prop)) {
2490 /* It is valid to call this function only for properties that
2491 * are already present in the kvm_default_props table.
2496 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2497 bool migratable_only);
2499 static bool lmce_supported(void)
2501 uint64_t mce_cap = 0;
2504 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2509 return !!(mce_cap & MCG_LMCE_P);
2512 #define CPUID_MODEL_ID_SZ 48
2515 * cpu_x86_fill_model_id:
2516 * Get CPUID model ID string from host CPU.
2518 * @str should have at least CPUID_MODEL_ID_SZ bytes
2520 * The function does NOT add a null terminator to the string
2523 static int cpu_x86_fill_model_id(char *str)
2525 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2528 for (i = 0; i < 3; i++) {
2529 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
2530 memcpy(str + i * 16 + 0, &eax, 4);
2531 memcpy(str + i * 16 + 4, &ebx, 4);
2532 memcpy(str + i * 16 + 8, &ecx, 4);
2533 memcpy(str + i * 16 + 12, &edx, 4);
2538 static Property max_x86_cpu_properties[] = {
2539 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
2540 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
2541 DEFINE_PROP_END_OF_LIST()
2544 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
2546 DeviceClass *dc = DEVICE_CLASS(oc);
2547 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2551 xcc->model_description =
2552 "Enables all features supported by the accelerator in the current host";
2554 dc->props = max_x86_cpu_properties;
2557 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
2559 static void max_x86_cpu_initfn(Object *obj)
2561 X86CPU *cpu = X86_CPU(obj);
2562 CPUX86State *env = &cpu->env;
2563 KVMState *s = kvm_state;
2565 /* We can't fill the features array here because we don't know yet if
2566 * "migratable" is true or false.
2568 cpu->max_features = true;
2570 if (accel_uses_host_cpuid()) {
2571 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
2572 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
2573 int family, model, stepping;
2574 X86CPUDefinition host_cpudef = { };
2575 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2577 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
2578 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
2580 host_vendor_fms(vendor, &family, &model, &stepping);
2582 cpu_x86_fill_model_id(model_id);
2584 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
2585 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
2586 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
2587 object_property_set_int(OBJECT(cpu), stepping, "stepping",
2589 object_property_set_str(OBJECT(cpu), model_id, "model-id",
2592 if (kvm_enabled()) {
2593 env->cpuid_min_level =
2594 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
2595 env->cpuid_min_xlevel =
2596 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
2597 env->cpuid_min_xlevel2 =
2598 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
2600 env->cpuid_min_level =
2601 hvf_get_supported_cpuid(0x0, 0, R_EAX);
2602 env->cpuid_min_xlevel =
2603 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
2604 env->cpuid_min_xlevel2 =
2605 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
2608 if (lmce_supported()) {
2609 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
2612 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
2613 "vendor", &error_abort);
2614 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
2615 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
2616 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
2617 object_property_set_str(OBJECT(cpu),
2618 "QEMU TCG CPU version " QEMU_HW_VERSION,
2619 "model-id", &error_abort);
2622 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
2625 static const TypeInfo max_x86_cpu_type_info = {
2626 .name = X86_CPU_TYPE_NAME("max"),
2627 .parent = TYPE_X86_CPU,
2628 .instance_init = max_x86_cpu_initfn,
2629 .class_init = max_x86_cpu_class_init,
2632 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
2633 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
2635 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2637 xcc->host_cpuid_required = true;
2640 if (kvm_enabled()) {
2641 xcc->model_description =
2642 "KVM processor with all supported host features ";
2643 } else if (hvf_enabled()) {
2644 xcc->model_description =
2645 "HVF processor with all supported host features ";
2649 static const TypeInfo host_x86_cpu_type_info = {
2650 .name = X86_CPU_TYPE_NAME("host"),
2651 .parent = X86_CPU_TYPE_NAME("max"),
2652 .class_init = host_x86_cpu_class_init,
2657 static void report_unavailable_features(FeatureWord w, uint32_t mask)
2659 FeatureWordInfo *f = &feature_word_info[w];
2662 for (i = 0; i < 32; ++i) {
2663 if ((1UL << i) & mask) {
2664 const char *reg = get_register_name_32(f->cpuid_reg);
2666 warn_report("%s doesn't support requested feature: "
2667 "CPUID.%02XH:%s%s%s [bit %d]",
2668 accel_uses_host_cpuid() ? "host" : "TCG",
2670 f->feat_names[i] ? "." : "",
2671 f->feat_names[i] ? f->feat_names[i] : "", i);
2676 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
2677 const char *name, void *opaque,
2680 X86CPU *cpu = X86_CPU(obj);
2681 CPUX86State *env = &cpu->env;
2684 value = (env->cpuid_version >> 8) & 0xf;
2686 value += (env->cpuid_version >> 20) & 0xff;
2688 visit_type_int(v, name, &value, errp);
2691 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
2692 const char *name, void *opaque,
2695 X86CPU *cpu = X86_CPU(obj);
2696 CPUX86State *env = &cpu->env;
2697 const int64_t min = 0;
2698 const int64_t max = 0xff + 0xf;
2699 Error *local_err = NULL;
2702 visit_type_int(v, name, &value, &local_err);
2704 error_propagate(errp, local_err);
2707 if (value < min || value > max) {
2708 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2709 name ? name : "null", value, min, max);
2713 env->cpuid_version &= ~0xff00f00;
2715 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
2717 env->cpuid_version |= value << 8;
2721 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
2722 const char *name, void *opaque,
2725 X86CPU *cpu = X86_CPU(obj);
2726 CPUX86State *env = &cpu->env;
2729 value = (env->cpuid_version >> 4) & 0xf;
2730 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
2731 visit_type_int(v, name, &value, errp);
2734 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
2735 const char *name, void *opaque,
2738 X86CPU *cpu = X86_CPU(obj);
2739 CPUX86State *env = &cpu->env;
2740 const int64_t min = 0;
2741 const int64_t max = 0xff;
2742 Error *local_err = NULL;
2745 visit_type_int(v, name, &value, &local_err);
2747 error_propagate(errp, local_err);
2750 if (value < min || value > max) {
2751 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2752 name ? name : "null", value, min, max);
2756 env->cpuid_version &= ~0xf00f0;
2757 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
2760 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
2761 const char *name, void *opaque,
2764 X86CPU *cpu = X86_CPU(obj);
2765 CPUX86State *env = &cpu->env;
2768 value = env->cpuid_version & 0xf;
2769 visit_type_int(v, name, &value, errp);
2772 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
2773 const char *name, void *opaque,
2776 X86CPU *cpu = X86_CPU(obj);
2777 CPUX86State *env = &cpu->env;
2778 const int64_t min = 0;
2779 const int64_t max = 0xf;
2780 Error *local_err = NULL;
2783 visit_type_int(v, name, &value, &local_err);
2785 error_propagate(errp, local_err);
2788 if (value < min || value > max) {
2789 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2790 name ? name : "null", value, min, max);
2794 env->cpuid_version &= ~0xf;
2795 env->cpuid_version |= value & 0xf;
2798 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
2800 X86CPU *cpu = X86_CPU(obj);
2801 CPUX86State *env = &cpu->env;
2804 value = g_malloc(CPUID_VENDOR_SZ + 1);
2805 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
2806 env->cpuid_vendor3);
2810 static void x86_cpuid_set_vendor(Object *obj, const char *value,
2813 X86CPU *cpu = X86_CPU(obj);
2814 CPUX86State *env = &cpu->env;
2817 if (strlen(value) != CPUID_VENDOR_SZ) {
2818 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
2822 env->cpuid_vendor1 = 0;
2823 env->cpuid_vendor2 = 0;
2824 env->cpuid_vendor3 = 0;
2825 for (i = 0; i < 4; i++) {
2826 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
2827 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
2828 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
2832 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
2834 X86CPU *cpu = X86_CPU(obj);
2835 CPUX86State *env = &cpu->env;
2839 value = g_malloc(48 + 1);
2840 for (i = 0; i < 48; i++) {
2841 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
2847 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
2850 X86CPU *cpu = X86_CPU(obj);
2851 CPUX86State *env = &cpu->env;
2854 if (model_id == NULL) {
2857 len = strlen(model_id);
2858 memset(env->cpuid_model, 0, 48);
2859 for (i = 0; i < 48; i++) {
2863 c = (uint8_t)model_id[i];
2865 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
2869 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
2870 void *opaque, Error **errp)
2872 X86CPU *cpu = X86_CPU(obj);
2875 value = cpu->env.tsc_khz * 1000;
2876 visit_type_int(v, name, &value, errp);
2879 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
2880 void *opaque, Error **errp)
2882 X86CPU *cpu = X86_CPU(obj);
2883 const int64_t min = 0;
2884 const int64_t max = INT64_MAX;
2885 Error *local_err = NULL;
2888 visit_type_int(v, name, &value, &local_err);
2890 error_propagate(errp, local_err);
2893 if (value < min || value > max) {
2894 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2895 name ? name : "null", value, min, max);
2899 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
2902 /* Generic getter for "feature-words" and "filtered-features" properties */
2903 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
2904 const char *name, void *opaque,
2907 uint32_t *array = (uint32_t *)opaque;
2909 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
2910 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
2911 X86CPUFeatureWordInfoList *list = NULL;
2913 for (w = 0; w < FEATURE_WORDS; w++) {
2914 FeatureWordInfo *wi = &feature_word_info[w];
2915 X86CPUFeatureWordInfo *qwi = &word_infos[w];
2916 qwi->cpuid_input_eax = wi->cpuid_eax;
2917 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
2918 qwi->cpuid_input_ecx = wi->cpuid_ecx;
2919 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
2920 qwi->features = array[w];
2922 /* List will be in reverse order, but order shouldn't matter */
2923 list_entries[w].next = list;
2924 list_entries[w].value = &word_infos[w];
2925 list = &list_entries[w];
2928 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
2931 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2932 void *opaque, Error **errp)
2934 X86CPU *cpu = X86_CPU(obj);
2935 int64_t value = cpu->hyperv_spinlock_attempts;
2937 visit_type_int(v, name, &value, errp);
2940 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2941 void *opaque, Error **errp)
2943 const int64_t min = 0xFFF;
2944 const int64_t max = UINT_MAX;
2945 X86CPU *cpu = X86_CPU(obj);
2949 visit_type_int(v, name, &value, &err);
2951 error_propagate(errp, err);
2955 if (value < min || value > max) {
2956 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
2957 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
2958 object_get_typename(obj), name ? name : "null",
2962 cpu->hyperv_spinlock_attempts = value;
2965 static const PropertyInfo qdev_prop_spinlocks = {
2967 .get = x86_get_hv_spinlocks,
2968 .set = x86_set_hv_spinlocks,
2971 /* Convert all '_' in a feature string option name to '-', to make feature
2972 * name conform to QOM property naming rule, which uses '-' instead of '_'.
2974 static inline void feat2prop(char *s)
2976 while ((s = strchr(s, '_'))) {
2981 /* Return the feature property name for a feature flag bit */
2982 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
2984 /* XSAVE components are automatically enabled by other features,
2985 * so return the original feature name instead
2987 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
2988 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
2990 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
2991 x86_ext_save_areas[comp].bits) {
2992 w = x86_ext_save_areas[comp].feature;
2993 bitnr = ctz32(x86_ext_save_areas[comp].bits);
2998 assert(w < FEATURE_WORDS);
2999 return feature_word_info[w].feat_names[bitnr];
3002 /* Compatibily hack to maintain legacy +-feat semantic,
3003 * where +-feat overwrites any feature set by
3004 * feat=on|feat even if the later is parsed after +-feat
3005 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3007 static GList *plus_features, *minus_features;
3009 static gint compare_string(gconstpointer a, gconstpointer b)
3011 return g_strcmp0(a, b);
3014 /* Parse "+feature,-feature,feature=foo" CPU feature string
3016 static void x86_cpu_parse_featurestr(const char *typename, char *features,
3019 char *featurestr; /* Single 'key=value" string being parsed */
3020 static bool cpu_globals_initialized;
3021 bool ambiguous = false;
3023 if (cpu_globals_initialized) {
3026 cpu_globals_initialized = true;
3032 for (featurestr = strtok(features, ",");
3034 featurestr = strtok(NULL, ",")) {
3036 const char *val = NULL;
3039 GlobalProperty *prop;
3041 /* Compatibility syntax: */
3042 if (featurestr[0] == '+') {
3043 plus_features = g_list_append(plus_features,
3044 g_strdup(featurestr + 1));
3046 } else if (featurestr[0] == '-') {
3047 minus_features = g_list_append(minus_features,
3048 g_strdup(featurestr + 1));
3052 eq = strchr(featurestr, '=');
3060 feat2prop(featurestr);
3063 if (g_list_find_custom(plus_features, name, compare_string)) {
3064 warn_report("Ambiguous CPU model string. "
3065 "Don't mix both \"+%s\" and \"%s=%s\"",
3069 if (g_list_find_custom(minus_features, name, compare_string)) {
3070 warn_report("Ambiguous CPU model string. "
3071 "Don't mix both \"-%s\" and \"%s=%s\"",
3077 if (!strcmp(name, "tsc-freq")) {
3081 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3082 if (ret < 0 || tsc_freq > INT64_MAX) {
3083 error_setg(errp, "bad numerical value %s", val);
3086 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3088 name = "tsc-frequency";
3091 prop = g_new0(typeof(*prop), 1);
3092 prop->driver = typename;
3093 prop->property = g_strdup(name);
3094 prop->value = g_strdup(val);
3095 prop->errp = &error_fatal;
3096 qdev_prop_register_global(prop);
3100 warn_report("Compatibility of ambiguous CPU model "
3101 "strings won't be kept on future QEMU versions");
3105 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3106 static int x86_cpu_filter_features(X86CPU *cpu);
3108 /* Check for missing features that may prevent the CPU class from
3109 * running using the current machine and accelerator.
3111 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3112 strList **missing_feats)
3117 strList **next = missing_feats;
3119 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3120 strList *new = g_new0(strList, 1);
3121 new->value = g_strdup("kvm");
3122 *missing_feats = new;
3126 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3128 x86_cpu_expand_features(xc, &err);
3130 /* Errors at x86_cpu_expand_features should never happen,
3131 * but in case it does, just report the model as not
3132 * runnable at all using the "type" property.
3134 strList *new = g_new0(strList, 1);
3135 new->value = g_strdup("type");
3140 x86_cpu_filter_features(xc);
3142 for (w = 0; w < FEATURE_WORDS; w++) {
3143 uint32_t filtered = xc->filtered_features[w];
3145 for (i = 0; i < 32; i++) {
3146 if (filtered & (1UL << i)) {
3147 strList *new = g_new0(strList, 1);
3148 new->value = g_strdup(x86_cpu_feature_name(w, i));
3155 object_unref(OBJECT(xc));
3158 /* Print all cpuid feature names in featureset
3160 static void listflags(FILE *f, fprintf_function print, const char **featureset)
3165 for (bit = 0; bit < 32; bit++) {
3166 if (featureset[bit]) {
3167 print(f, "%s%s", first ? "" : " ", featureset[bit]);
3173 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3174 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3176 ObjectClass *class_a = (ObjectClass *)a;
3177 ObjectClass *class_b = (ObjectClass *)b;
3178 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3179 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3180 const char *name_a, *name_b;
3182 if (cc_a->ordering != cc_b->ordering) {
3183 return cc_a->ordering - cc_b->ordering;
3185 name_a = object_class_get_name(class_a);
3186 name_b = object_class_get_name(class_b);
3187 return strcmp(name_a, name_b);
3191 static GSList *get_sorted_cpu_model_list(void)
3193 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3194 list = g_slist_sort(list, x86_cpu_list_compare);
3198 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3200 ObjectClass *oc = data;
3201 X86CPUClass *cc = X86_CPU_CLASS(oc);
3202 CPUListState *s = user_data;
3203 char *name = x86_cpu_class_get_model_name(cc);
3204 const char *desc = cc->model_description;
3205 if (!desc && cc->cpu_def) {
3206 desc = cc->cpu_def->model_id;
3209 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
3214 /* list available CPU models and flags */
3215 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
3220 .cpu_fprintf = cpu_fprintf,
3224 (*cpu_fprintf)(f, "Available CPUs:\n");
3225 list = get_sorted_cpu_model_list();
3226 g_slist_foreach(list, x86_cpu_list_entry, &s);
3229 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
3230 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3231 FeatureWordInfo *fw = &feature_word_info[i];
3233 (*cpu_fprintf)(f, " ");
3234 listflags(f, cpu_fprintf, fw->feat_names);
3235 (*cpu_fprintf)(f, "\n");
3239 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3241 ObjectClass *oc = data;
3242 X86CPUClass *cc = X86_CPU_CLASS(oc);
3243 CpuDefinitionInfoList **cpu_list = user_data;
3244 CpuDefinitionInfoList *entry;
3245 CpuDefinitionInfo *info;
3247 info = g_malloc0(sizeof(*info));
3248 info->name = x86_cpu_class_get_model_name(cc);
3249 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3250 info->has_unavailable_features = true;
3251 info->q_typename = g_strdup(object_class_get_name(oc));
3252 info->migration_safe = cc->migration_safe;
3253 info->has_migration_safe = true;
3254 info->q_static = cc->static_model;
3256 entry = g_malloc0(sizeof(*entry));
3257 entry->value = info;
3258 entry->next = *cpu_list;
3262 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
3264 CpuDefinitionInfoList *cpu_list = NULL;
3265 GSList *list = get_sorted_cpu_model_list();
3266 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
3271 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3272 bool migratable_only)
3274 FeatureWordInfo *wi = &feature_word_info[w];
3277 if (kvm_enabled()) {
3278 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
3281 } else if (hvf_enabled()) {
3282 r = hvf_get_supported_cpuid(wi->cpuid_eax,
3285 } else if (tcg_enabled()) {
3286 r = wi->tcg_features;
3290 if (migratable_only) {
3291 r &= x86_cpu_get_migratable_flags(w);
3296 static void x86_cpu_report_filtered_features(X86CPU *cpu)
3300 for (w = 0; w < FEATURE_WORDS; w++) {
3301 report_unavailable_features(w, cpu->filtered_features[w]);
3305 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
3308 for (pv = props; pv->prop; pv++) {
3312 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
3317 /* Load data from X86CPUDefinition into a X86CPU object
3319 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
3321 CPUX86State *env = &cpu->env;
3323 char host_vendor[CPUID_VENDOR_SZ + 1];
3326 /*NOTE: any property set by this function should be returned by
3327 * x86_cpu_static_props(), so static expansion of
3328 * query-cpu-model-expansion is always complete.
3331 /* CPU models only set _minimum_ values for level/xlevel: */
3332 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
3333 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
3335 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
3336 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
3337 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
3338 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
3339 for (w = 0; w < FEATURE_WORDS; w++) {
3340 env->features[w] = def->features[w];
3343 /* legacy-cache defaults to 'off' if CPU model provides cache info */
3344 cpu->legacy_cache = !def->cache_info;
3346 /* Special cases not set in the X86CPUDefinition structs: */
3347 /* TODO: in-kernel irqchip for hvf */
3348 if (kvm_enabled()) {
3349 if (!kvm_irqchip_in_kernel()) {
3350 x86_cpu_change_kvm_default("x2apic", "off");
3353 x86_cpu_apply_props(cpu, kvm_default_props);
3354 } else if (tcg_enabled()) {
3355 x86_cpu_apply_props(cpu, tcg_default_props);
3358 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
3360 /* sysenter isn't supported in compatibility mode on AMD,
3361 * syscall isn't supported in compatibility mode on Intel.
3362 * Normally we advertise the actual CPU vendor, but you can
3363 * override this using the 'vendor' property if you want to use
3364 * KVM's sysenter/syscall emulation in compatibility mode and
3365 * when doing cross vendor migration
3367 vendor = def->vendor;
3368 if (accel_uses_host_cpuid()) {
3369 uint32_t ebx = 0, ecx = 0, edx = 0;
3370 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
3371 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
3372 vendor = host_vendor;
3375 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
3379 /* Return a QDict containing keys for all properties that can be included
3380 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
3381 * must be included in the dictionary.
3383 static QDict *x86_cpu_static_props(void)
3387 static const char *props[] = {
3405 for (i = 0; props[i]; i++) {
3406 qdict_put_null(d, props[i]);
3409 for (w = 0; w < FEATURE_WORDS; w++) {
3410 FeatureWordInfo *fi = &feature_word_info[w];
3412 for (bit = 0; bit < 32; bit++) {
3413 if (!fi->feat_names[bit]) {
3416 qdict_put_null(d, fi->feat_names[bit]);
3423 /* Add an entry to @props dict, with the value for property. */
3424 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
3426 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
3429 qdict_put_obj(props, prop, value);
3432 /* Convert CPU model data from X86CPU object to a property dictionary
3433 * that can recreate exactly the same CPU model.
3435 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3437 QDict *sprops = x86_cpu_static_props();
3438 const QDictEntry *e;
3440 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3441 const char *prop = qdict_entry_key(e);
3442 x86_cpu_expand_prop(cpu, props, prop);
3446 /* Convert CPU model data from X86CPU object to a property dictionary
3447 * that can recreate exactly the same CPU model, including every
3448 * writeable QOM property.
3450 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3452 ObjectPropertyIterator iter;
3453 ObjectProperty *prop;
3455 object_property_iter_init(&iter, OBJECT(cpu));
3456 while ((prop = object_property_iter_next(&iter))) {
3457 /* skip read-only or write-only properties */
3458 if (!prop->get || !prop->set) {
3462 /* "hotplugged" is the only property that is configurable
3463 * on the command-line but will be set differently on CPUs
3464 * created using "-cpu ... -smp ..." and by CPUs created
3465 * on the fly by x86_cpu_from_model() for querying. Skip it.
3467 if (!strcmp(prop->name, "hotplugged")) {
3470 x86_cpu_expand_prop(cpu, props, prop->name);
3474 static void object_apply_props(Object *obj, QDict *props, Error **errp)
3476 const QDictEntry *prop;
3479 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
3480 object_property_set_qobject(obj, qdict_entry_value(prop),
3481 qdict_entry_key(prop), &err);
3487 error_propagate(errp, err);
3490 /* Create X86CPU object according to model+props specification */
3491 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
3497 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
3499 error_setg(&err, "CPU model '%s' not found", model);
3503 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3505 object_apply_props(OBJECT(xc), props, &err);
3511 x86_cpu_expand_features(xc, &err);
3518 error_propagate(errp, err);
3519 object_unref(OBJECT(xc));
3525 CpuModelExpansionInfo *
3526 arch_query_cpu_model_expansion(CpuModelExpansionType type,
3527 CpuModelInfo *model,
3532 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
3533 QDict *props = NULL;
3534 const char *base_name;
3536 xc = x86_cpu_from_model(model->name,
3538 qobject_to(QDict, model->props) :
3544 props = qdict_new();
3547 case CPU_MODEL_EXPANSION_TYPE_STATIC:
3548 /* Static expansion will be based on "base" only */
3550 x86_cpu_to_dict(xc, props);
3552 case CPU_MODEL_EXPANSION_TYPE_FULL:
3553 /* As we don't return every single property, full expansion needs
3554 * to keep the original model name+props, and add extra
3555 * properties on top of that.
3557 base_name = model->name;
3558 x86_cpu_to_dict_full(xc, props);
3561 error_setg(&err, "Unsupportted expansion type");
3566 props = qdict_new();
3568 x86_cpu_to_dict(xc, props);
3570 ret->model = g_new0(CpuModelInfo, 1);
3571 ret->model->name = g_strdup(base_name);
3572 ret->model->props = QOBJECT(props);
3573 ret->model->has_props = true;
3576 object_unref(OBJECT(xc));
3578 error_propagate(errp, err);
3579 qapi_free_CpuModelExpansionInfo(ret);
3585 static gchar *x86_gdb_arch_name(CPUState *cs)
3587 #ifdef TARGET_X86_64
3588 return g_strdup("i386:x86-64");
3590 return g_strdup("i386");
3594 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
3596 X86CPUDefinition *cpudef = data;
3597 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3599 xcc->cpu_def = cpudef;
3600 xcc->migration_safe = true;
3603 static void x86_register_cpudef_type(X86CPUDefinition *def)
3605 char *typename = x86_cpu_type_name(def->name);
3608 .parent = TYPE_X86_CPU,
3609 .class_init = x86_cpu_cpudef_class_init,
3613 /* AMD aliases are handled at runtime based on CPUID vendor, so
3614 * they shouldn't be set on the CPU model table.
3616 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
3617 /* catch mistakes instead of silently truncating model_id when too long */
3618 assert(def->model_id && strlen(def->model_id) <= 48);
3625 #if !defined(CONFIG_USER_ONLY)
3627 void cpu_clear_apic_feature(CPUX86State *env)
3629 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
3632 #endif /* !CONFIG_USER_ONLY */
3634 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
3635 uint32_t *eax, uint32_t *ebx,
3636 uint32_t *ecx, uint32_t *edx)
3638 X86CPU *cpu = x86_env_get_cpu(env);
3639 CPUState *cs = CPU(cpu);
3640 uint32_t pkg_offset;
3642 uint32_t signature[3];
3644 /* Calculate & apply limits for different index ranges */
3645 if (index >= 0xC0000000) {
3646 limit = env->cpuid_xlevel2;
3647 } else if (index >= 0x80000000) {
3648 limit = env->cpuid_xlevel;
3649 } else if (index >= 0x40000000) {
3652 limit = env->cpuid_level;
3655 if (index > limit) {
3656 /* Intel documentation states that invalid EAX input will
3657 * return the same information as EAX=cpuid_level
3658 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
3660 index = env->cpuid_level;
3665 *eax = env->cpuid_level;
3666 *ebx = env->cpuid_vendor1;
3667 *edx = env->cpuid_vendor2;
3668 *ecx = env->cpuid_vendor3;
3671 *eax = env->cpuid_version;
3672 *ebx = (cpu->apic_id << 24) |
3673 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
3674 *ecx = env->features[FEAT_1_ECX];
3675 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
3676 *ecx |= CPUID_EXT_OSXSAVE;
3678 *edx = env->features[FEAT_1_EDX];
3679 if (cs->nr_cores * cs->nr_threads > 1) {
3680 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
3685 /* cache info: needed for Pentium Pro compatibility */
3686 if (cpu->cache_info_passthrough) {
3687 host_cpuid(index, 0, eax, ebx, ecx, edx);
3690 *eax = 1; /* Number of CPUID[EAX=2] calls required */
3692 if (!cpu->enable_l3_cache) {
3695 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
3697 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
3698 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
3699 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
3702 /* cache info: needed for Core compatibility */
3703 if (cpu->cache_info_passthrough) {
3704 host_cpuid(index, count, eax, ebx, ecx, edx);
3705 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
3706 *eax &= ~0xFC000000;
3707 if ((*eax & 31) && cs->nr_cores > 1) {
3708 *eax |= (cs->nr_cores - 1) << 26;
3713 case 0: /* L1 dcache info */
3714 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
3716 eax, ebx, ecx, edx);
3718 case 1: /* L1 icache info */
3719 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
3721 eax, ebx, ecx, edx);
3723 case 2: /* L2 cache info */
3724 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
3725 cs->nr_threads, cs->nr_cores,
3726 eax, ebx, ecx, edx);
3728 case 3: /* L3 cache info */
3729 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3730 if (cpu->enable_l3_cache) {
3731 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
3732 (1 << pkg_offset), cs->nr_cores,
3733 eax, ebx, ecx, edx);
3737 default: /* end of info */
3738 *eax = *ebx = *ecx = *edx = 0;
3744 /* mwait info: needed for Core compatibility */
3745 *eax = 0; /* Smallest monitor-line size in bytes */
3746 *ebx = 0; /* Largest monitor-line size in bytes */
3747 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
3751 /* Thermal and Power Leaf */
3752 *eax = env->features[FEAT_6_EAX];
3758 /* Structured Extended Feature Flags Enumeration Leaf */
3760 *eax = 0; /* Maximum ECX value for sub-leaves */
3761 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
3762 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
3763 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
3764 *ecx |= CPUID_7_0_ECX_OSPKE;
3766 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
3775 /* Direct Cache Access Information Leaf */
3776 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
3782 /* Architectural Performance Monitoring Leaf */
3783 if (kvm_enabled() && cpu->enable_pmu) {
3784 KVMState *s = cs->kvm_state;
3786 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
3787 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
3788 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
3789 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
3790 } else if (hvf_enabled() && cpu->enable_pmu) {
3791 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
3792 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
3793 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
3794 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
3803 /* Extended Topology Enumeration Leaf */
3804 if (!cpu->enable_cpuid_0xb) {
3805 *eax = *ebx = *ecx = *edx = 0;
3809 *ecx = count & 0xff;
3810 *edx = cpu->apic_id;
3814 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
3815 *ebx = cs->nr_threads;
3816 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
3819 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3820 *ebx = cs->nr_cores * cs->nr_threads;
3821 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
3826 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
3829 assert(!(*eax & ~0x1f));
3830 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
3833 /* Processor Extended State */
3838 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3843 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
3844 *eax = env->features[FEAT_XSAVE_COMP_LO];
3845 *edx = env->features[FEAT_XSAVE_COMP_HI];
3847 } else if (count == 1) {
3848 *eax = env->features[FEAT_XSAVE];
3849 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
3850 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
3851 const ExtSaveArea *esa = &x86_ext_save_areas[count];
3859 /* Intel Processor Trace Enumeration */
3864 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
3870 *eax = INTEL_PT_MAX_SUBLEAF;
3871 *ebx = INTEL_PT_MINIMAL_EBX;
3872 *ecx = INTEL_PT_MINIMAL_ECX;
3873 } else if (count == 1) {
3874 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
3875 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
3881 * CPUID code in kvm_arch_init_vcpu() ignores stuff
3882 * set here, but we restrict to TCG none the less.
3884 if (tcg_enabled() && cpu->expose_tcg) {
3885 memcpy(signature, "TCGTCGTCGTCG", 12);
3887 *ebx = signature[0];
3888 *ecx = signature[1];
3889 *edx = signature[2];
3904 *eax = env->cpuid_xlevel;
3905 *ebx = env->cpuid_vendor1;
3906 *edx = env->cpuid_vendor2;
3907 *ecx = env->cpuid_vendor3;
3910 *eax = env->cpuid_version;
3912 *ecx = env->features[FEAT_8000_0001_ECX];
3913 *edx = env->features[FEAT_8000_0001_EDX];
3915 /* The Linux kernel checks for the CMPLegacy bit and
3916 * discards multiple thread information if it is set.
3917 * So don't set it here for Intel to make Linux guests happy.
3919 if (cs->nr_cores * cs->nr_threads > 1) {
3920 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
3921 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
3922 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
3923 *ecx |= 1 << 1; /* CmpLegacy bit */
3930 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
3931 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
3932 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
3933 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
3936 /* cache info (L1 cache) */
3937 if (cpu->cache_info_passthrough) {
3938 host_cpuid(index, 0, eax, ebx, ecx, edx);
3941 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
3942 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
3943 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
3944 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
3945 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
3946 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
3949 /* cache info (L2 cache) */
3950 if (cpu->cache_info_passthrough) {
3951 host_cpuid(index, 0, eax, ebx, ecx, edx);
3954 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
3955 (L2_DTLB_2M_ENTRIES << 16) | \
3956 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
3957 (L2_ITLB_2M_ENTRIES);
3958 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
3959 (L2_DTLB_4K_ENTRIES << 16) | \
3960 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
3961 (L2_ITLB_4K_ENTRIES);
3962 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
3963 cpu->enable_l3_cache ?
3964 env->cache_info_amd.l3_cache : NULL,
3971 *edx = env->features[FEAT_8000_0007_EDX];
3974 /* virtual & phys address size in low 2 bytes. */
3975 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3976 /* 64 bit processor */
3977 *eax = cpu->phys_bits; /* configurable physical bits */
3978 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
3979 *eax |= 0x00003900; /* 57 bits virtual */
3981 *eax |= 0x00003000; /* 48 bits virtual */
3984 *eax = cpu->phys_bits;
3986 *ebx = env->features[FEAT_8000_0008_EBX];
3989 if (cs->nr_cores * cs->nr_threads > 1) {
3990 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
3994 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3995 *eax = 0x00000001; /* SVM Revision */
3996 *ebx = 0x00000010; /* nr of ASIDs */
3998 *edx = env->features[FEAT_SVM]; /* optional features */
4007 *eax = env->cpuid_xlevel2;
4013 /* Support for VIA CPU's CPUID instruction */
4014 *eax = env->cpuid_version;
4017 *edx = env->features[FEAT_C000_0001_EDX];
4022 /* Reserved for the future, and now filled with zero */
4029 *eax = sev_enabled() ? 0x2 : 0;
4030 *ebx = sev_get_cbit_position();
4031 *ebx |= sev_get_reduced_phys_bits() << 6;
4036 /* reserved values: zero */
4045 /* CPUClass::reset() */
4046 static void x86_cpu_reset(CPUState *s)
4048 X86CPU *cpu = X86_CPU(s);
4049 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4050 CPUX86State *env = &cpu->env;
4055 xcc->parent_reset(s);
4057 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4059 env->old_exception = -1;
4061 /* init to reset state */
4063 env->hflags2 |= HF2_GIF_MASK;
4065 cpu_x86_update_cr0(env, 0x60000010);
4066 env->a20_mask = ~0x0;
4067 env->smbase = 0x30000;
4068 env->msr_smi_count = 0;
4070 env->idt.limit = 0xffff;
4071 env->gdt.limit = 0xffff;
4072 env->ldt.limit = 0xffff;
4073 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4074 env->tr.limit = 0xffff;
4075 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4077 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4078 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4079 DESC_R_MASK | DESC_A_MASK);
4080 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4081 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4083 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4084 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4086 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4087 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4089 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4090 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4092 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4093 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4097 env->regs[R_EDX] = env->cpuid_version;
4102 for (i = 0; i < 8; i++) {
4105 cpu_set_fpuc(env, 0x37f);
4107 env->mxcsr = 0x1f80;
4108 /* All units are in INIT state. */
4111 env->pat = 0x0007040600070406ULL;
4112 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4114 memset(env->dr, 0, sizeof(env->dr));
4115 env->dr[6] = DR6_FIXED_1;
4116 env->dr[7] = DR7_FIXED_1;
4117 cpu_breakpoint_remove_all(s, BP_CPU);
4118 cpu_watchpoint_remove_all(s, BP_CPU);
4121 xcr0 = XSTATE_FP_MASK;
4123 #ifdef CONFIG_USER_ONLY
4124 /* Enable all the features for user-mode. */
4125 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4126 xcr0 |= XSTATE_SSE_MASK;
4128 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4129 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4130 if (env->features[esa->feature] & esa->bits) {
4135 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
4136 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
4138 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
4139 cr4 |= CR4_FSGSBASE_MASK;
4144 cpu_x86_update_cr4(env, cr4);
4147 * SDM 11.11.5 requires:
4148 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4149 * - IA32_MTRR_PHYSMASKn.V = 0
4150 * All other bits are undefined. For simplification, zero it all.
4152 env->mtrr_deftype = 0;
4153 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
4154 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
4156 env->interrupt_injected = -1;
4157 env->exception_injected = -1;
4158 env->nmi_injected = false;
4159 #if !defined(CONFIG_USER_ONLY)
4160 /* We hard-wire the BSP to the first CPU. */
4161 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
4163 s->halted = !cpu_is_bsp(cpu);
4165 if (kvm_enabled()) {
4166 kvm_arch_reset_vcpu(cpu);
4168 else if (hvf_enabled()) {
4174 #ifndef CONFIG_USER_ONLY
4175 bool cpu_is_bsp(X86CPU *cpu)
4177 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
4180 /* TODO: remove me, when reset over QOM tree is implemented */
4181 static void x86_cpu_machine_reset_cb(void *opaque)
4183 X86CPU *cpu = opaque;
4184 cpu_reset(CPU(cpu));
4188 static void mce_init(X86CPU *cpu)
4190 CPUX86State *cenv = &cpu->env;
4193 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
4194 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
4195 (CPUID_MCE | CPUID_MCA)) {
4196 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
4197 (cpu->enable_lmce ? MCG_LMCE_P : 0);
4198 cenv->mcg_ctl = ~(uint64_t)0;
4199 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
4200 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
4205 #ifndef CONFIG_USER_ONLY
4206 APICCommonClass *apic_get_class(void)
4208 const char *apic_type = "apic";
4210 /* TODO: in-kernel irqchip for hvf */
4211 if (kvm_apic_in_kernel()) {
4212 apic_type = "kvm-apic";
4213 } else if (xen_enabled()) {
4214 apic_type = "xen-apic";
4217 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
4220 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
4222 APICCommonState *apic;
4223 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
4225 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
4227 object_property_add_child(OBJECT(cpu), "lapic",
4228 OBJECT(cpu->apic_state), &error_abort);
4229 object_unref(OBJECT(cpu->apic_state));
4231 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
4232 /* TODO: convert to link<> */
4233 apic = APIC_COMMON(cpu->apic_state);
4235 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
4238 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4240 APICCommonState *apic;
4241 static bool apic_mmio_map_once;
4243 if (cpu->apic_state == NULL) {
4246 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
4249 /* Map APIC MMIO area */
4250 apic = APIC_COMMON(cpu->apic_state);
4251 if (!apic_mmio_map_once) {
4252 memory_region_add_subregion_overlap(get_system_memory(),
4254 MSR_IA32_APICBASE_BASE,
4257 apic_mmio_map_once = true;
4261 static void x86_cpu_machine_done(Notifier *n, void *unused)
4263 X86CPU *cpu = container_of(n, X86CPU, machine_done);
4264 MemoryRegion *smram =
4265 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
4268 cpu->smram = g_new(MemoryRegion, 1);
4269 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
4270 smram, 0, 1ull << 32);
4271 memory_region_set_enabled(cpu->smram, true);
4272 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
4276 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4281 /* Note: Only safe for use on x86(-64) hosts */
4282 static uint32_t x86_host_phys_bits(void)
4285 uint32_t host_phys_bits;
4287 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
4288 if (eax >= 0x80000008) {
4289 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
4290 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4291 * at 23:16 that can specify a maximum physical address bits for
4292 * the guest that can override this value; but I've not seen
4293 * anything with that set.
4295 host_phys_bits = eax & 0xff;
4297 /* It's an odd 64 bit machine that doesn't have the leaf for
4298 * physical address bits; fall back to 36 that's most older
4301 host_phys_bits = 36;
4304 return host_phys_bits;
4307 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
4314 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4315 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
4317 CPUX86State *env = &cpu->env;
4318 FeatureWordInfo *fi = &feature_word_info[w];
4319 uint32_t eax = fi->cpuid_eax;
4320 uint32_t region = eax & 0xF0000000;
4322 if (!env->features[w]) {
4328 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
4331 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
4334 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
4339 /* Calculate XSAVE components based on the configured CPU feature flags */
4340 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
4342 CPUX86State *env = &cpu->env;
4346 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4351 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4352 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4353 if (env->features[esa->feature] & esa->bits) {
4354 mask |= (1ULL << i);
4358 env->features[FEAT_XSAVE_COMP_LO] = mask;
4359 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
4362 /***** Steps involved on loading and filtering CPUID data
4364 * When initializing and realizing a CPU object, the steps
4365 * involved in setting up CPUID data are:
4367 * 1) Loading CPU model definition (X86CPUDefinition). This is
4368 * implemented by x86_cpu_load_def() and should be completely
4369 * transparent, as it is done automatically by instance_init.
4370 * No code should need to look at X86CPUDefinition structs
4371 * outside instance_init.
4373 * 2) CPU expansion. This is done by realize before CPUID
4374 * filtering, and will make sure host/accelerator data is
4375 * loaded for CPU models that depend on host capabilities
4376 * (e.g. "host"). Done by x86_cpu_expand_features().
4378 * 3) CPUID filtering. This initializes extra data related to
4379 * CPUID, and checks if the host supports all capabilities
4380 * required by the CPU. Runnability of a CPU model is
4381 * determined at this step. Done by x86_cpu_filter_features().
4383 * Some operations don't require all steps to be performed.
4386 * - CPU instance creation (instance_init) will run only CPU
4387 * model loading. CPU expansion can't run at instance_init-time
4388 * because host/accelerator data may be not available yet.
4389 * - CPU realization will perform both CPU model expansion and CPUID
4390 * filtering, and return an error in case one of them fails.
4391 * - query-cpu-definitions needs to run all 3 steps. It needs
4392 * to run CPUID filtering, as the 'unavailable-features'
4393 * field is set based on the filtering results.
4394 * - The query-cpu-model-expansion QMP command only needs to run
4395 * CPU model loading and CPU expansion. It should not filter
4396 * any CPUID data based on host capabilities.
4399 /* Expand CPU configuration data, based on configured features
4400 * and host/accelerator capabilities when appropriate.
4402 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
4404 CPUX86State *env = &cpu->env;
4407 Error *local_err = NULL;
4409 /*TODO: Now cpu->max_features doesn't overwrite features
4410 * set using QOM properties, and we can convert
4411 * plus_features & minus_features to global properties
4412 * inside x86_cpu_parse_featurestr() too.
4414 if (cpu->max_features) {
4415 for (w = 0; w < FEATURE_WORDS; w++) {
4416 /* Override only features that weren't set explicitly
4420 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
4421 ~env->user_features[w] & \
4422 ~feature_word_info[w].no_autoenable_flags;
4426 for (l = plus_features; l; l = l->next) {
4427 const char *prop = l->data;
4428 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4434 for (l = minus_features; l; l = l->next) {
4435 const char *prop = l->data;
4436 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
4442 if (!kvm_enabled() || !cpu->expose_kvm) {
4443 env->features[FEAT_KVM] = 0;
4446 x86_cpu_enable_xsave_components(cpu);
4448 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
4449 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
4450 if (cpu->full_cpuid_auto_level) {
4451 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
4452 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
4453 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
4454 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
4455 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
4456 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
4457 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
4458 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
4459 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
4460 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
4461 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
4462 /* SVM requires CPUID[0x8000000A] */
4463 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4464 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
4467 /* SEV requires CPUID[0x8000001F] */
4468 if (sev_enabled()) {
4469 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
4473 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
4474 if (env->cpuid_level == UINT32_MAX) {
4475 env->cpuid_level = env->cpuid_min_level;
4477 if (env->cpuid_xlevel == UINT32_MAX) {
4478 env->cpuid_xlevel = env->cpuid_min_xlevel;
4480 if (env->cpuid_xlevel2 == UINT32_MAX) {
4481 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
4485 if (local_err != NULL) {
4486 error_propagate(errp, local_err);
4491 * Finishes initialization of CPUID data, filters CPU feature
4492 * words based on host availability of each feature.
4494 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
4496 static int x86_cpu_filter_features(X86CPU *cpu)
4498 CPUX86State *env = &cpu->env;
4502 for (w = 0; w < FEATURE_WORDS; w++) {
4503 uint32_t host_feat =
4504 x86_cpu_get_supported_feature_word(w, false);
4505 uint32_t requested_features = env->features[w];
4506 env->features[w] &= host_feat;
4507 cpu->filtered_features[w] = requested_features & ~env->features[w];
4508 if (cpu->filtered_features[w]) {
4513 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
4515 KVMState *s = CPU(cpu)->kvm_state;
4516 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
4517 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
4518 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
4519 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
4520 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
4523 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
4524 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
4525 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
4526 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
4527 INTEL_PT_ADDR_RANGES_NUM) ||
4528 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
4529 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
4530 (ecx_0 & INTEL_PT_IP_LIP)) {
4532 * Processor Trace capabilities aren't configurable, so if the
4533 * host can't emulate the capabilities we report on
4534 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
4536 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
4537 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
4545 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
4546 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
4547 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
4548 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
4549 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
4550 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
4551 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
4553 CPUState *cs = CPU(dev);
4554 X86CPU *cpu = X86_CPU(dev);
4555 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4556 CPUX86State *env = &cpu->env;
4557 Error *local_err = NULL;
4558 static bool ht_warned;
4560 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
4561 char *name = x86_cpu_class_get_model_name(xcc);
4562 error_setg(&local_err, "CPU model '%s' requires KVM", name);
4567 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
4568 error_setg(errp, "apic-id property was not initialized properly");
4572 x86_cpu_expand_features(cpu, &local_err);
4577 if (x86_cpu_filter_features(cpu) &&
4578 (cpu->check_cpuid || cpu->enforce_cpuid)) {
4579 x86_cpu_report_filtered_features(cpu);
4580 if (cpu->enforce_cpuid) {
4581 error_setg(&local_err,
4582 accel_uses_host_cpuid() ?
4583 "Host doesn't support requested features" :
4584 "TCG doesn't support requested features");
4589 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
4592 if (IS_AMD_CPU(env)) {
4593 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
4594 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
4595 & CPUID_EXT2_AMD_ALIASES);
4598 /* For 64bit systems think about the number of physical bits to present.
4599 * ideally this should be the same as the host; anything other than matching
4600 * the host can cause incorrect guest behaviour.
4601 * QEMU used to pick the magic value of 40 bits that corresponds to
4602 * consumer AMD devices but nothing else.
4604 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4605 if (accel_uses_host_cpuid()) {
4606 uint32_t host_phys_bits = x86_host_phys_bits();
4609 if (cpu->host_phys_bits) {
4610 /* The user asked for us to use the host physical bits */
4611 cpu->phys_bits = host_phys_bits;
4614 /* Print a warning if the user set it to a value that's not the
4617 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
4619 warn_report("Host physical bits (%u)"
4620 " does not match phys-bits property (%u)",
4621 host_phys_bits, cpu->phys_bits);
4625 if (cpu->phys_bits &&
4626 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
4627 cpu->phys_bits < 32)) {
4628 error_setg(errp, "phys-bits should be between 32 and %u "
4630 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
4634 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
4635 error_setg(errp, "TCG only supports phys-bits=%u",
4636 TCG_PHYS_ADDR_BITS);
4640 /* 0 means it was not explicitly set by the user (or by machine
4641 * compat_props or by the host code above). In this case, the default
4642 * is the value used by TCG (40).
4644 if (cpu->phys_bits == 0) {
4645 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
4648 /* For 32 bit systems don't use the user set value, but keep
4649 * phys_bits consistent with what we tell the guest.
4651 if (cpu->phys_bits != 0) {
4652 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
4656 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
4657 cpu->phys_bits = 36;
4659 cpu->phys_bits = 32;
4663 /* Cache information initialization */
4664 if (!cpu->legacy_cache) {
4665 if (!xcc->cpu_def || !xcc->cpu_def->cache_info) {
4666 char *name = x86_cpu_class_get_model_name(xcc);
4668 "CPU model '%s' doesn't support legacy-cache=off", name);
4672 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
4673 *xcc->cpu_def->cache_info;
4675 /* Build legacy cache information */
4676 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
4677 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
4678 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
4679 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
4681 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
4682 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
4683 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
4684 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
4686 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
4687 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
4688 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
4689 env->cache_info_amd.l3_cache = &legacy_l3_cache;
4693 cpu_exec_realizefn(cs, &local_err);
4694 if (local_err != NULL) {
4695 error_propagate(errp, local_err);
4699 #ifndef CONFIG_USER_ONLY
4700 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
4702 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
4703 x86_cpu_apic_create(cpu, &local_err);
4704 if (local_err != NULL) {
4712 #ifndef CONFIG_USER_ONLY
4713 if (tcg_enabled()) {
4714 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
4715 cpu->cpu_as_root = g_new(MemoryRegion, 1);
4717 /* Outer container... */
4718 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
4719 memory_region_set_enabled(cpu->cpu_as_root, true);
4721 /* ... with two regions inside: normal system memory with low
4724 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
4725 get_system_memory(), 0, ~0ull);
4726 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
4727 memory_region_set_enabled(cpu->cpu_as_mem, true);
4730 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
4731 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
4733 /* ... SMRAM with higher priority, linked from /machine/smram. */
4734 cpu->machine_done.notify = x86_cpu_machine_done;
4735 qemu_add_machine_init_done_notifier(&cpu->machine_done);
4741 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
4742 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
4743 * based on inputs (sockets,cores,threads), it is still better to gives
4746 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
4747 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
4749 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
4750 error_report("AMD CPU doesn't support hyperthreading. Please configure"
4751 " -smp options properly.");
4755 x86_cpu_apic_realize(cpu, &local_err);
4756 if (local_err != NULL) {
4761 xcc->parent_realize(dev, &local_err);
4764 if (local_err != NULL) {
4765 error_propagate(errp, local_err);
4770 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
4772 X86CPU *cpu = X86_CPU(dev);
4773 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4774 Error *local_err = NULL;
4776 #ifndef CONFIG_USER_ONLY
4777 cpu_remove_sync(CPU(dev));
4778 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
4781 if (cpu->apic_state) {
4782 object_unparent(OBJECT(cpu->apic_state));
4783 cpu->apic_state = NULL;
4786 xcc->parent_unrealize(dev, &local_err);
4787 if (local_err != NULL) {
4788 error_propagate(errp, local_err);
4793 typedef struct BitProperty {
4798 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
4799 void *opaque, Error **errp)
4801 X86CPU *cpu = X86_CPU(obj);
4802 BitProperty *fp = opaque;
4803 uint32_t f = cpu->env.features[fp->w];
4804 bool value = (f & fp->mask) == fp->mask;
4805 visit_type_bool(v, name, &value, errp);
4808 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
4809 void *opaque, Error **errp)
4811 DeviceState *dev = DEVICE(obj);
4812 X86CPU *cpu = X86_CPU(obj);
4813 BitProperty *fp = opaque;
4814 Error *local_err = NULL;
4817 if (dev->realized) {
4818 qdev_prop_set_after_realize(dev, name, errp);
4822 visit_type_bool(v, name, &value, &local_err);
4824 error_propagate(errp, local_err);
4829 cpu->env.features[fp->w] |= fp->mask;
4831 cpu->env.features[fp->w] &= ~fp->mask;
4833 cpu->env.user_features[fp->w] |= fp->mask;
4836 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
4839 BitProperty *prop = opaque;
4843 /* Register a boolean property to get/set a single bit in a uint32_t field.
4845 * The same property name can be registered multiple times to make it affect
4846 * multiple bits in the same FeatureWord. In that case, the getter will return
4847 * true only if all bits are set.
4849 static void x86_cpu_register_bit_prop(X86CPU *cpu,
4850 const char *prop_name,
4856 uint32_t mask = (1UL << bitnr);
4858 op = object_property_find(OBJECT(cpu), prop_name, NULL);
4864 fp = g_new0(BitProperty, 1);
4867 object_property_add(OBJECT(cpu), prop_name, "bool",
4868 x86_cpu_get_bit_prop,
4869 x86_cpu_set_bit_prop,
4870 x86_cpu_release_bit_prop, fp, &error_abort);
4874 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
4878 FeatureWordInfo *fi = &feature_word_info[w];
4879 const char *name = fi->feat_names[bitnr];
4885 /* Property names should use "-" instead of "_".
4886 * Old names containing underscores are registered as aliases
4887 * using object_property_add_alias()
4889 assert(!strchr(name, '_'));
4890 /* aliases don't use "|" delimiters anymore, they are registered
4891 * manually using object_property_add_alias() */
4892 assert(!strchr(name, '|'));
4893 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
4896 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
4898 X86CPU *cpu = X86_CPU(cs);
4899 CPUX86State *env = &cpu->env;
4900 GuestPanicInformation *panic_info = NULL;
4902 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
4903 panic_info = g_malloc0(sizeof(GuestPanicInformation));
4905 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
4907 assert(HV_CRASH_PARAMS >= 5);
4908 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
4909 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
4910 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
4911 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
4912 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
4917 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
4918 const char *name, void *opaque,
4921 CPUState *cs = CPU(obj);
4922 GuestPanicInformation *panic_info;
4924 if (!cs->crash_occurred) {
4925 error_setg(errp, "No crash occured");
4929 panic_info = x86_cpu_get_crash_info(cs);
4930 if (panic_info == NULL) {
4931 error_setg(errp, "No crash information");
4935 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
4937 qapi_free_GuestPanicInformation(panic_info);
4940 static void x86_cpu_initfn(Object *obj)
4942 CPUState *cs = CPU(obj);
4943 X86CPU *cpu = X86_CPU(obj);
4944 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
4945 CPUX86State *env = &cpu->env;
4950 object_property_add(obj, "family", "int",
4951 x86_cpuid_version_get_family,
4952 x86_cpuid_version_set_family, NULL, NULL, NULL);
4953 object_property_add(obj, "model", "int",
4954 x86_cpuid_version_get_model,
4955 x86_cpuid_version_set_model, NULL, NULL, NULL);
4956 object_property_add(obj, "stepping", "int",
4957 x86_cpuid_version_get_stepping,
4958 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
4959 object_property_add_str(obj, "vendor",
4960 x86_cpuid_get_vendor,
4961 x86_cpuid_set_vendor, NULL);
4962 object_property_add_str(obj, "model-id",
4963 x86_cpuid_get_model_id,
4964 x86_cpuid_set_model_id, NULL);
4965 object_property_add(obj, "tsc-frequency", "int",
4966 x86_cpuid_get_tsc_freq,
4967 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
4968 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
4969 x86_cpu_get_feature_words,
4970 NULL, NULL, (void *)env->features, NULL);
4971 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
4972 x86_cpu_get_feature_words,
4973 NULL, NULL, (void *)cpu->filtered_features, NULL);
4975 object_property_add(obj, "crash-information", "GuestPanicInformation",
4976 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
4978 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
4980 for (w = 0; w < FEATURE_WORDS; w++) {
4983 for (bitnr = 0; bitnr < 32; bitnr++) {
4984 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
4988 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
4989 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
4990 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
4991 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
4992 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
4993 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
4994 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
4996 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
4997 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
4998 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
4999 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5000 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5001 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5002 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5003 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5004 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5005 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5006 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5007 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5008 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5009 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5010 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5011 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5012 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5013 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5014 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5015 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5016 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5019 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
5023 static int64_t x86_cpu_get_arch_id(CPUState *cs)
5025 X86CPU *cpu = X86_CPU(cs);
5027 return cpu->apic_id;
5030 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
5032 X86CPU *cpu = X86_CPU(cs);
5034 return cpu->env.cr[0] & CR0_PG_MASK;
5037 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5039 X86CPU *cpu = X86_CPU(cs);
5041 cpu->env.eip = value;
5044 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5046 X86CPU *cpu = X86_CPU(cs);
5048 cpu->env.eip = tb->pc - tb->cs_base;
5051 static bool x86_cpu_has_work(CPUState *cs)
5053 X86CPU *cpu = X86_CPU(cs);
5054 CPUX86State *env = &cpu->env;
5056 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
5057 CPU_INTERRUPT_POLL)) &&
5058 (env->eflags & IF_MASK)) ||
5059 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
5060 CPU_INTERRUPT_INIT |
5061 CPU_INTERRUPT_SIPI |
5062 CPU_INTERRUPT_MCE)) ||
5063 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
5064 !(env->hflags & HF_SMM_MASK));
5067 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
5069 X86CPU *cpu = X86_CPU(cs);
5070 CPUX86State *env = &cpu->env;
5072 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
5073 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
5074 : bfd_mach_i386_i8086);
5075 info->print_insn = print_insn_i386;
5077 info->cap_arch = CS_ARCH_X86;
5078 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
5079 : env->hflags & HF_CS32_MASK ? CS_MODE_32
5081 info->cap_insn_unit = 1;
5082 info->cap_insn_split = 8;
5085 void x86_update_hflags(CPUX86State *env)
5088 #define HFLAG_COPY_MASK \
5089 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5090 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5091 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5092 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5094 hflags = env->hflags & HFLAG_COPY_MASK;
5095 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
5096 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
5097 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
5098 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
5099 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
5101 if (env->cr[4] & CR4_OSFXSR_MASK) {
5102 hflags |= HF_OSFXSR_MASK;
5105 if (env->efer & MSR_EFER_LMA) {
5106 hflags |= HF_LMA_MASK;
5109 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
5110 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
5112 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
5113 (DESC_B_SHIFT - HF_CS32_SHIFT);
5114 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
5115 (DESC_B_SHIFT - HF_SS32_SHIFT);
5116 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
5117 !(hflags & HF_CS32_MASK)) {
5118 hflags |= HF_ADDSEG_MASK;
5120 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
5121 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
5124 env->hflags = hflags;
5127 static Property x86_cpu_properties[] = {
5128 #ifdef CONFIG_USER_ONLY
5129 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5130 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
5131 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
5132 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
5133 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
5135 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
5136 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
5137 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
5138 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
5140 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
5141 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
5142 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
5143 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
5144 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
5145 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
5146 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
5147 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
5148 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
5149 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
5150 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
5151 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
5152 DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false),
5153 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false),
5154 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
5155 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
5156 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
5157 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
5158 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
5159 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
5160 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
5161 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
5162 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
5163 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
5164 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
5165 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
5166 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
5167 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
5168 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
5169 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
5170 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
5171 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
5173 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
5174 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
5176 * lecacy_cache defaults to true unless the CPU model provides its
5177 * own cache information (see x86_cpu_load_def()).
5179 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
5182 * From "Requirements for Implementing the Microsoft
5183 * Hypervisor Interface":
5184 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5186 * "Starting with Windows Server 2012 and Windows 8, if
5187 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5188 * the hypervisor imposes no specific limit to the number of VPs.
5189 * In this case, Windows Server 2012 guest VMs may use more than
5190 * 64 VPs, up to the maximum supported number of processors applicable
5191 * to the specific Windows version being used."
5193 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
5194 DEFINE_PROP_END_OF_LIST()
5197 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
5199 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5200 CPUClass *cc = CPU_CLASS(oc);
5201 DeviceClass *dc = DEVICE_CLASS(oc);
5203 device_class_set_parent_realize(dc, x86_cpu_realizefn,
5204 &xcc->parent_realize);
5205 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
5206 &xcc->parent_unrealize);
5207 dc->props = x86_cpu_properties;
5209 xcc->parent_reset = cc->reset;
5210 cc->reset = x86_cpu_reset;
5211 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
5213 cc->class_by_name = x86_cpu_class_by_name;
5214 cc->parse_features = x86_cpu_parse_featurestr;
5215 cc->has_work = x86_cpu_has_work;
5217 cc->do_interrupt = x86_cpu_do_interrupt;
5218 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
5220 cc->dump_state = x86_cpu_dump_state;
5221 cc->get_crash_info = x86_cpu_get_crash_info;
5222 cc->set_pc = x86_cpu_set_pc;
5223 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
5224 cc->gdb_read_register = x86_cpu_gdb_read_register;
5225 cc->gdb_write_register = x86_cpu_gdb_write_register;
5226 cc->get_arch_id = x86_cpu_get_arch_id;
5227 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
5228 #ifdef CONFIG_USER_ONLY
5229 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
5231 cc->asidx_from_attrs = x86_asidx_from_attrs;
5232 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
5233 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
5234 cc->write_elf64_note = x86_cpu_write_elf64_note;
5235 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
5236 cc->write_elf32_note = x86_cpu_write_elf32_note;
5237 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
5238 cc->vmsd = &vmstate_x86_cpu;
5240 cc->gdb_arch_name = x86_gdb_arch_name;
5241 #ifdef TARGET_X86_64
5242 cc->gdb_core_xml_file = "i386-64bit.xml";
5243 cc->gdb_num_core_regs = 57;
5245 cc->gdb_core_xml_file = "i386-32bit.xml";
5246 cc->gdb_num_core_regs = 41;
5248 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
5249 cc->debug_excp_handler = breakpoint_handler;
5251 cc->cpu_exec_enter = x86_cpu_exec_enter;
5252 cc->cpu_exec_exit = x86_cpu_exec_exit;
5254 cc->tcg_initialize = tcg_x86_init;
5256 cc->disas_set_info = x86_disas_set_info;
5258 dc->user_creatable = true;
5261 static const TypeInfo x86_cpu_type_info = {
5262 .name = TYPE_X86_CPU,
5264 .instance_size = sizeof(X86CPU),
5265 .instance_init = x86_cpu_initfn,
5267 .class_size = sizeof(X86CPUClass),
5268 .class_init = x86_cpu_common_class_init,
5272 /* "base" CPU model, used by query-cpu-model-expansion */
5273 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
5275 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5277 xcc->static_model = true;
5278 xcc->migration_safe = true;
5279 xcc->model_description = "base CPU model type with no features enabled";
5283 static const TypeInfo x86_base_cpu_type_info = {
5284 .name = X86_CPU_TYPE_NAME("base"),
5285 .parent = TYPE_X86_CPU,
5286 .class_init = x86_cpu_base_class_init,
5289 static void x86_cpu_register_types(void)
5293 type_register_static(&x86_cpu_type_info);
5294 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
5295 x86_register_cpudef_type(&builtin_x86_defs[i]);
5297 type_register_static(&max_x86_cpu_type_info);
5298 type_register_static(&x86_base_cpu_type_info);
5299 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
5300 type_register_static(&host_x86_cpu_type_info);
5304 type_init(x86_cpu_register_types)