2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
24 #include "exec/exec-all.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/hvf.h"
27 #include "sysemu/cpus.h"
31 #include "qemu/error-report.h"
32 #include "qemu/option.h"
33 #include "qemu/config-file.h"
34 #include "qapi/error.h"
35 #include "qapi/qapi-visit-misc.h"
36 #include "qapi/qapi-visit-run-state.h"
37 #include "qapi/qmp/qdict.h"
38 #include "qapi/qmp/qerror.h"
39 #include "qapi/visitor.h"
40 #include "qom/qom-qobject.h"
41 #include "sysemu/arch_init.h"
43 #include "standard-headers/asm-x86/kvm_para.h"
45 #include "sysemu/sysemu.h"
46 #include "hw/qdev-properties.h"
47 #include "hw/i386/topology.h"
48 #ifndef CONFIG_USER_ONLY
49 #include "exec/address-spaces.h"
51 #include "hw/xen/xen.h"
52 #include "hw/i386/apic_internal.h"
55 #include "disas/capstone.h"
57 /* Helpers for building CPUID[2] descriptors: */
59 struct CPUID2CacheDescriptorInfo {
68 #define MiB (1024 * 1024)
71 * Known CPUID 2 cache descriptors.
72 * From Intel SDM Volume 2A, CPUID instruction
74 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
75 [0x06] = { .level = 1, .type = ICACHE, .size = 8 * KiB,
76 .associativity = 4, .line_size = 32, },
77 [0x08] = { .level = 1, .type = ICACHE, .size = 16 * KiB,
78 .associativity = 4, .line_size = 32, },
79 [0x09] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
80 .associativity = 4, .line_size = 64, },
81 [0x0A] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
82 .associativity = 2, .line_size = 32, },
83 [0x0C] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
84 .associativity = 4, .line_size = 32, },
85 [0x0D] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
86 .associativity = 4, .line_size = 64, },
87 [0x0E] = { .level = 1, .type = DCACHE, .size = 24 * KiB,
88 .associativity = 6, .line_size = 64, },
89 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
90 .associativity = 2, .line_size = 64, },
91 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
92 .associativity = 8, .line_size = 64, },
93 /* lines per sector is not supported cpuid2_cache_descriptor(),
94 * so descriptors 0x22, 0x23 are not included
96 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
97 .associativity = 16, .line_size = 64, },
98 /* lines per sector is not supported cpuid2_cache_descriptor(),
99 * so descriptors 0x25, 0x20 are not included
101 [0x2C] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
102 .associativity = 8, .line_size = 64, },
103 [0x30] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
104 .associativity = 8, .line_size = 64, },
105 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
106 .associativity = 4, .line_size = 32, },
107 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
108 .associativity = 4, .line_size = 32, },
109 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
110 .associativity = 4, .line_size = 32, },
111 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
112 .associativity = 4, .line_size = 32, },
113 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
114 .associativity = 4, .line_size = 32, },
115 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
116 .associativity = 4, .line_size = 64, },
117 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
118 .associativity = 8, .line_size = 64, },
119 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
120 .associativity = 12, .line_size = 64, },
121 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
122 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
123 .associativity = 12, .line_size = 64, },
124 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
125 .associativity = 16, .line_size = 64, },
126 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
127 .associativity = 12, .line_size = 64, },
128 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
129 .associativity = 16, .line_size = 64, },
130 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
131 .associativity = 24, .line_size = 64, },
132 [0x60] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
133 .associativity = 8, .line_size = 64, },
134 [0x66] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
135 .associativity = 4, .line_size = 64, },
136 [0x67] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
137 .associativity = 4, .line_size = 64, },
138 [0x68] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
139 .associativity = 4, .line_size = 64, },
140 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
141 .associativity = 4, .line_size = 64, },
142 /* lines per sector is not supported cpuid2_cache_descriptor(),
143 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
145 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
146 .associativity = 8, .line_size = 64, },
147 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
148 .associativity = 2, .line_size = 64, },
149 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
150 .associativity = 8, .line_size = 64, },
151 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
152 .associativity = 8, .line_size = 32, },
153 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
154 .associativity = 8, .line_size = 32, },
155 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
156 .associativity = 8, .line_size = 32, },
157 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
158 .associativity = 8, .line_size = 32, },
159 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
160 .associativity = 4, .line_size = 64, },
161 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
162 .associativity = 8, .line_size = 64, },
163 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
164 .associativity = 4, .line_size = 64, },
165 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
166 .associativity = 4, .line_size = 64, },
167 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
168 .associativity = 4, .line_size = 64, },
169 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
170 .associativity = 8, .line_size = 64, },
171 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
172 .associativity = 8, .line_size = 64, },
173 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
174 .associativity = 8, .line_size = 64, },
175 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
176 .associativity = 12, .line_size = 64, },
177 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
178 .associativity = 12, .line_size = 64, },
179 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
180 .associativity = 12, .line_size = 64, },
181 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
182 .associativity = 16, .line_size = 64, },
183 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
184 .associativity = 16, .line_size = 64, },
185 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
186 .associativity = 16, .line_size = 64, },
187 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
188 .associativity = 24, .line_size = 64, },
189 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
190 .associativity = 24, .line_size = 64, },
191 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
192 .associativity = 24, .line_size = 64, },
196 * "CPUID leaf 2 does not report cache descriptor information,
197 * use CPUID leaf 4 to query cache parameters"
199 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
202 * Return a CPUID 2 cache descriptor for a given cache.
203 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
205 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
209 assert(cache->size > 0);
210 assert(cache->level > 0);
211 assert(cache->line_size > 0);
212 assert(cache->associativity > 0);
213 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
214 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
215 if (d->level == cache->level && d->type == cache->type &&
216 d->size == cache->size && d->line_size == cache->line_size &&
217 d->associativity == cache->associativity) {
222 return CACHE_DESCRIPTOR_UNAVAILABLE;
225 /* CPUID Leaf 4 constants: */
228 #define CACHE_TYPE_D 1
229 #define CACHE_TYPE_I 2
230 #define CACHE_TYPE_UNIFIED 3
232 #define CACHE_LEVEL(l) (l << 5)
234 #define CACHE_SELF_INIT_LEVEL (1 << 8)
237 #define CACHE_NO_INVD_SHARING (1 << 0)
238 #define CACHE_INCLUSIVE (1 << 1)
239 #define CACHE_COMPLEX_IDX (1 << 2)
241 /* Encode CacheType for CPUID[4].EAX */
242 #define CACHE_TYPE(t) (((t) == DCACHE) ? CACHE_TYPE_D : \
243 ((t) == ICACHE) ? CACHE_TYPE_I : \
244 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
245 0 /* Invalid value */)
248 /* Encode cache info for CPUID[4] */
249 static void encode_cache_cpuid4(CPUCacheInfo *cache,
250 int num_apic_ids, int num_cores,
251 uint32_t *eax, uint32_t *ebx,
252 uint32_t *ecx, uint32_t *edx)
254 assert(cache->size == cache->line_size * cache->associativity *
255 cache->partitions * cache->sets);
257 assert(num_apic_ids > 0);
258 *eax = CACHE_TYPE(cache->type) |
259 CACHE_LEVEL(cache->level) |
260 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
261 ((num_cores - 1) << 26) |
262 ((num_apic_ids - 1) << 14);
264 assert(cache->line_size > 0);
265 assert(cache->partitions > 0);
266 assert(cache->associativity > 0);
267 /* We don't implement fully-associative caches */
268 assert(cache->associativity < cache->sets);
269 *ebx = (cache->line_size - 1) |
270 ((cache->partitions - 1) << 12) |
271 ((cache->associativity - 1) << 22);
273 assert(cache->sets > 0);
274 *ecx = cache->sets - 1;
276 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
277 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
278 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
281 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
282 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
284 assert(cache->size % 1024 == 0);
285 assert(cache->lines_per_tag > 0);
286 assert(cache->associativity > 0);
287 assert(cache->line_size > 0);
288 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
289 (cache->lines_per_tag << 8) | (cache->line_size);
292 #define ASSOC_FULL 0xFF
294 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
295 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
305 a == ASSOC_FULL ? 0xF : \
306 0 /* invalid value */)
309 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
312 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
314 uint32_t *ecx, uint32_t *edx)
316 assert(l2->size % 1024 == 0);
317 assert(l2->associativity > 0);
318 assert(l2->lines_per_tag > 0);
319 assert(l2->line_size > 0);
320 *ecx = ((l2->size / 1024) << 16) |
321 (AMD_ENC_ASSOC(l2->associativity) << 12) |
322 (l2->lines_per_tag << 8) | (l2->line_size);
325 assert(l3->size % (512 * 1024) == 0);
326 assert(l3->associativity > 0);
327 assert(l3->lines_per_tag > 0);
328 assert(l3->line_size > 0);
329 *edx = ((l3->size / (512 * 1024)) << 18) |
330 (AMD_ENC_ASSOC(l3->associativity) << 12) |
331 (l3->lines_per_tag << 8) | (l3->line_size);
338 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
339 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
340 * Define the constants to build the cpu topology. Right now, TOPOEXT
341 * feature is enabled only on EPYC. So, these constants are based on
342 * EPYC supported configurations. We may need to handle the cases if
343 * these values change in future.
345 /* Maximum core complexes in a node */
347 /* Maximum cores in a core complex */
348 #define MAX_CORES_IN_CCX 4
349 /* Maximum cores in a node */
350 #define MAX_CORES_IN_NODE 8
351 /* Maximum nodes in a socket */
352 #define MAX_NODES_PER_SOCKET 4
355 * Figure out the number of nodes required to build this config.
356 * Max cores in a node is 8
358 static int nodes_in_socket(int nr_cores)
362 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
364 /* Hardware does not support config with 3 nodes, return 4 in that case */
365 return (nodes == 3) ? 4 : nodes;
369 * Decide the number of cores in a core complex with the given nr_cores using
370 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
371 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
372 * L3 cache is shared across all cores in a core complex. So, this will also
373 * tell us how many cores are sharing the L3 cache.
375 static int cores_in_core_complex(int nr_cores)
379 /* Check if we can fit all the cores in one core complex */
380 if (nr_cores <= MAX_CORES_IN_CCX) {
383 /* Get the number of nodes required to build this config */
384 nodes = nodes_in_socket(nr_cores);
387 * Divide the cores accros all the core complexes
388 * Return rounded up value
390 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
393 /* Encode cache info for CPUID[8000001D] */
394 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
395 uint32_t *eax, uint32_t *ebx,
396 uint32_t *ecx, uint32_t *edx)
399 assert(cache->size == cache->line_size * cache->associativity *
400 cache->partitions * cache->sets);
402 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
403 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
405 /* L3 is shared among multiple cores */
406 if (cache->level == 3) {
407 l3_cores = cores_in_core_complex(cs->nr_cores);
408 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
410 *eax |= ((cs->nr_threads - 1) << 14);
413 assert(cache->line_size > 0);
414 assert(cache->partitions > 0);
415 assert(cache->associativity > 0);
416 /* We don't implement fully-associative caches */
417 assert(cache->associativity < cache->sets);
418 *ebx = (cache->line_size - 1) |
419 ((cache->partitions - 1) << 12) |
420 ((cache->associativity - 1) << 22);
422 assert(cache->sets > 0);
423 *ecx = cache->sets - 1;
425 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
426 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
427 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
431 * Definitions of the hardcoded cache entries we expose:
432 * These are legacy cache values. If there is a need to change any
433 * of these values please use builtin_x86_defs
437 static CPUCacheInfo legacy_l1d_cache = {
446 .no_invd_sharing = true,
449 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
450 static CPUCacheInfo legacy_l1d_cache_amd = {
460 .no_invd_sharing = true,
463 /* L1 instruction cache: */
464 static CPUCacheInfo legacy_l1i_cache = {
473 .no_invd_sharing = true,
476 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
477 static CPUCacheInfo legacy_l1i_cache_amd = {
487 .no_invd_sharing = true,
490 /* Level 2 unified cache: */
491 static CPUCacheInfo legacy_l2_cache = {
492 .type = UNIFIED_CACHE,
500 .no_invd_sharing = true,
503 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
504 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
505 .type = UNIFIED_CACHE,
513 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
514 static CPUCacheInfo legacy_l2_cache_amd = {
515 .type = UNIFIED_CACHE,
525 /* Level 3 unified cache: */
526 static CPUCacheInfo legacy_l3_cache = {
527 .type = UNIFIED_CACHE,
537 .complex_indexing = true,
540 /* TLB definitions: */
542 #define L1_DTLB_2M_ASSOC 1
543 #define L1_DTLB_2M_ENTRIES 255
544 #define L1_DTLB_4K_ASSOC 1
545 #define L1_DTLB_4K_ENTRIES 255
547 #define L1_ITLB_2M_ASSOC 1
548 #define L1_ITLB_2M_ENTRIES 255
549 #define L1_ITLB_4K_ASSOC 1
550 #define L1_ITLB_4K_ENTRIES 255
552 #define L2_DTLB_2M_ASSOC 0 /* disabled */
553 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
554 #define L2_DTLB_4K_ASSOC 4
555 #define L2_DTLB_4K_ENTRIES 512
557 #define L2_ITLB_2M_ASSOC 0 /* disabled */
558 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
559 #define L2_ITLB_4K_ASSOC 4
560 #define L2_ITLB_4K_ENTRIES 512
562 /* CPUID Leaf 0x14 constants: */
563 #define INTEL_PT_MAX_SUBLEAF 0x1
565 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
566 * MSR can be accessed;
567 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
568 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
569 * of Intel PT MSRs across warm reset;
570 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
572 #define INTEL_PT_MINIMAL_EBX 0xf
574 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
575 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
577 * bit[01]: ToPA tables can hold any number of output entries, up to the
578 * maximum allowed by the MaskOrTableOffset field of
579 * IA32_RTIT_OUTPUT_MASK_PTRS;
580 * bit[02]: Support Single-Range Output scheme;
582 #define INTEL_PT_MINIMAL_ECX 0x7
583 /* generated packets which contain IP payloads have LIP values */
584 #define INTEL_PT_IP_LIP (1 << 31)
585 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
586 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
587 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
588 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
589 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
591 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
592 uint32_t vendor2, uint32_t vendor3)
595 for (i = 0; i < 4; i++) {
596 dst[i] = vendor1 >> (8 * i);
597 dst[i + 4] = vendor2 >> (8 * i);
598 dst[i + 8] = vendor3 >> (8 * i);
600 dst[CPUID_VENDOR_SZ] = '\0';
603 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
604 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
605 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
606 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
607 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
608 CPUID_PSE36 | CPUID_FXSR)
609 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
610 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
611 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
612 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
613 CPUID_PAE | CPUID_SEP | CPUID_APIC)
615 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
616 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
617 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
618 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
619 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
620 /* partly implemented:
621 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
623 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
624 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
625 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
626 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
627 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
628 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
630 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
631 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
632 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
633 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
634 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
637 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
639 #define TCG_EXT2_X86_64_FEATURES 0
642 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
643 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
644 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
645 TCG_EXT2_X86_64_FEATURES)
646 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
647 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
648 #define TCG_EXT4_FEATURES 0
649 #define TCG_SVM_FEATURES 0
650 #define TCG_KVM_FEATURES 0
651 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
652 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
653 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
654 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
657 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
658 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
659 CPUID_7_0_EBX_RDSEED */
660 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
662 #define TCG_7_0_EDX_FEATURES 0
663 #define TCG_APM_FEATURES 0
664 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
665 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
667 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
669 typedef struct FeatureWordInfo {
670 /* feature flags names are taken from "Intel Processor Identification and
671 * the CPUID Instruction" and AMD's "CPUID Specification".
672 * In cases of disagreement between feature naming conventions,
673 * aliases may be added.
675 const char *feat_names[32];
676 uint32_t cpuid_eax; /* Input EAX for CPUID */
677 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
678 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
679 int cpuid_reg; /* output register (R_* constant) */
680 uint32_t tcg_features; /* Feature flags supported by TCG */
681 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
682 uint32_t migratable_flags; /* Feature flags known to be migratable */
683 /* Features that shouldn't be auto-enabled by "-cpu host" */
684 uint32_t no_autoenable_flags;
687 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
690 "fpu", "vme", "de", "pse",
691 "tsc", "msr", "pae", "mce",
692 "cx8", "apic", NULL, "sep",
693 "mtrr", "pge", "mca", "cmov",
694 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
695 NULL, "ds" /* Intel dts */, "acpi", "mmx",
696 "fxsr", "sse", "sse2", "ss",
697 "ht" /* Intel htt */, "tm", "ia64", "pbe",
699 .cpuid_eax = 1, .cpuid_reg = R_EDX,
700 .tcg_features = TCG_FEATURES,
704 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
705 "ds-cpl", "vmx", "smx", "est",
706 "tm2", "ssse3", "cid", NULL,
707 "fma", "cx16", "xtpr", "pdcm",
708 NULL, "pcid", "dca", "sse4.1",
709 "sse4.2", "x2apic", "movbe", "popcnt",
710 "tsc-deadline", "aes", "xsave", "osxsave",
711 "avx", "f16c", "rdrand", "hypervisor",
713 .cpuid_eax = 1, .cpuid_reg = R_ECX,
714 .tcg_features = TCG_EXT_FEATURES,
716 /* Feature names that are already defined on feature_name[] but
717 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
718 * names on feat_names below. They are copied automatically
719 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
721 [FEAT_8000_0001_EDX] = {
723 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
724 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
725 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
726 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
727 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
728 "nx", NULL, "mmxext", NULL /* mmx */,
729 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
730 NULL, "lm", "3dnowext", "3dnow",
732 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
733 .tcg_features = TCG_EXT2_FEATURES,
735 [FEAT_8000_0001_ECX] = {
737 "lahf-lm", "cmp-legacy", "svm", "extapic",
738 "cr8legacy", "abm", "sse4a", "misalignsse",
739 "3dnowprefetch", "osvw", "ibs", "xop",
740 "skinit", "wdt", NULL, "lwp",
741 "fma4", "tce", NULL, "nodeid-msr",
742 NULL, "tbm", "topoext", "perfctr-core",
743 "perfctr-nb", NULL, NULL, NULL,
744 NULL, NULL, NULL, NULL,
746 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
747 .tcg_features = TCG_EXT3_FEATURES,
749 [FEAT_C000_0001_EDX] = {
751 NULL, NULL, "xstore", "xstore-en",
752 NULL, NULL, "xcrypt", "xcrypt-en",
753 "ace2", "ace2-en", "phe", "phe-en",
754 "pmm", "pmm-en", NULL, NULL,
755 NULL, NULL, NULL, NULL,
756 NULL, NULL, NULL, NULL,
757 NULL, NULL, NULL, NULL,
758 NULL, NULL, NULL, NULL,
760 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
761 .tcg_features = TCG_EXT4_FEATURES,
765 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
766 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
767 NULL, "kvm-pv-tlb-flush", NULL, NULL,
768 NULL, NULL, NULL, NULL,
769 NULL, NULL, NULL, NULL,
770 NULL, NULL, NULL, NULL,
771 "kvmclock-stable-bit", NULL, NULL, NULL,
772 NULL, NULL, NULL, NULL,
774 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
775 .tcg_features = TCG_KVM_FEATURES,
779 "kvm-hint-dedicated", NULL, NULL, NULL,
780 NULL, NULL, NULL, NULL,
781 NULL, NULL, NULL, NULL,
782 NULL, NULL, NULL, NULL,
783 NULL, NULL, NULL, NULL,
784 NULL, NULL, NULL, NULL,
785 NULL, NULL, NULL, NULL,
786 NULL, NULL, NULL, NULL,
788 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EDX,
789 .tcg_features = TCG_KVM_FEATURES,
791 * KVM hints aren't auto-enabled by -cpu host, they need to be
792 * explicitly enabled in the command-line.
794 .no_autoenable_flags = ~0U,
796 [FEAT_HYPERV_EAX] = {
798 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
799 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
800 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
801 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
802 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
803 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
804 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
806 NULL, NULL, NULL, NULL,
807 NULL, NULL, NULL, NULL,
808 NULL, NULL, NULL, NULL,
809 NULL, NULL, NULL, NULL,
811 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
813 [FEAT_HYPERV_EBX] = {
815 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
816 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
817 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
818 NULL /* hv_create_port */, NULL /* hv_connect_port */,
819 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
820 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
822 NULL, NULL, NULL, NULL,
823 NULL, NULL, NULL, NULL,
824 NULL, NULL, NULL, NULL,
825 NULL, NULL, NULL, NULL,
827 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
829 [FEAT_HYPERV_EDX] = {
831 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
832 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
833 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
835 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
836 NULL, NULL, NULL, NULL,
837 NULL, NULL, NULL, NULL,
838 NULL, NULL, NULL, NULL,
839 NULL, NULL, NULL, NULL,
840 NULL, NULL, NULL, NULL,
842 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
846 "npt", "lbrv", "svm-lock", "nrip-save",
847 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
848 NULL, NULL, "pause-filter", NULL,
849 "pfthreshold", NULL, NULL, NULL,
850 NULL, NULL, NULL, NULL,
851 NULL, NULL, NULL, NULL,
852 NULL, NULL, NULL, NULL,
853 NULL, NULL, NULL, NULL,
855 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
856 .tcg_features = TCG_SVM_FEATURES,
860 "fsgsbase", "tsc-adjust", NULL, "bmi1",
861 "hle", "avx2", NULL, "smep",
862 "bmi2", "erms", "invpcid", "rtm",
863 NULL, NULL, "mpx", NULL,
864 "avx512f", "avx512dq", "rdseed", "adx",
865 "smap", "avx512ifma", "pcommit", "clflushopt",
866 "clwb", "intel-pt", "avx512pf", "avx512er",
867 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
870 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
872 .tcg_features = TCG_7_0_EBX_FEATURES,
876 NULL, "avx512vbmi", "umip", "pku",
877 "ospke", NULL, "avx512vbmi2", NULL,
878 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
879 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
880 "la57", NULL, NULL, NULL,
881 NULL, NULL, "rdpid", NULL,
882 NULL, "cldemote", NULL, NULL,
883 NULL, NULL, NULL, NULL,
886 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
888 .tcg_features = TCG_7_0_ECX_FEATURES,
892 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
893 NULL, NULL, NULL, NULL,
894 NULL, NULL, NULL, NULL,
895 NULL, NULL, NULL, NULL,
896 NULL, NULL, NULL, NULL,
897 NULL, NULL, NULL, NULL,
898 NULL, NULL, "spec-ctrl", NULL,
899 NULL, NULL, NULL, "ssbd",
902 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
904 .tcg_features = TCG_7_0_EDX_FEATURES,
906 [FEAT_8000_0007_EDX] = {
908 NULL, NULL, NULL, NULL,
909 NULL, NULL, NULL, NULL,
910 "invtsc", NULL, NULL, NULL,
911 NULL, NULL, NULL, NULL,
912 NULL, NULL, NULL, NULL,
913 NULL, NULL, NULL, NULL,
914 NULL, NULL, NULL, NULL,
915 NULL, NULL, NULL, NULL,
917 .cpuid_eax = 0x80000007,
919 .tcg_features = TCG_APM_FEATURES,
920 .unmigratable_flags = CPUID_APM_INVTSC,
922 [FEAT_8000_0008_EBX] = {
924 NULL, NULL, NULL, NULL,
925 NULL, NULL, NULL, NULL,
926 NULL, NULL, NULL, NULL,
927 "ibpb", NULL, NULL, NULL,
928 NULL, NULL, NULL, NULL,
929 NULL, NULL, NULL, NULL,
930 NULL, "virt-ssbd", NULL, NULL,
931 NULL, NULL, NULL, NULL,
933 .cpuid_eax = 0x80000008,
936 .unmigratable_flags = 0,
940 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
941 NULL, NULL, NULL, NULL,
942 NULL, NULL, NULL, NULL,
943 NULL, NULL, NULL, NULL,
944 NULL, NULL, NULL, NULL,
945 NULL, NULL, NULL, NULL,
946 NULL, NULL, NULL, NULL,
947 NULL, NULL, NULL, NULL,
950 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
952 .tcg_features = TCG_XSAVE_FEATURES,
956 NULL, NULL, "arat", NULL,
957 NULL, NULL, NULL, NULL,
958 NULL, NULL, NULL, NULL,
959 NULL, NULL, NULL, NULL,
960 NULL, NULL, NULL, NULL,
961 NULL, NULL, NULL, NULL,
962 NULL, NULL, NULL, NULL,
963 NULL, NULL, NULL, NULL,
965 .cpuid_eax = 6, .cpuid_reg = R_EAX,
966 .tcg_features = TCG_6_EAX_FEATURES,
968 [FEAT_XSAVE_COMP_LO] = {
970 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
973 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
974 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
975 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
978 [FEAT_XSAVE_COMP_HI] = {
980 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
986 typedef struct X86RegisterInfo32 {
987 /* Name of register */
989 /* QAPI enum value register */
990 X86CPURegister32 qapi_enum;
993 #define REGISTER(reg) \
994 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
995 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1007 typedef struct ExtSaveArea {
1008 uint32_t feature, bits;
1009 uint32_t offset, size;
1012 static const ExtSaveArea x86_ext_save_areas[] = {
1014 /* x87 FP state component is always enabled if XSAVE is supported */
1015 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1016 /* x87 state is in the legacy region of the XSAVE area */
1018 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1020 [XSTATE_SSE_BIT] = {
1021 /* SSE state component is always enabled if XSAVE is supported */
1022 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1023 /* SSE state is in the legacy region of the XSAVE area */
1025 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1028 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1029 .offset = offsetof(X86XSaveArea, avx_state),
1030 .size = sizeof(XSaveAVX) },
1031 [XSTATE_BNDREGS_BIT] =
1032 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1033 .offset = offsetof(X86XSaveArea, bndreg_state),
1034 .size = sizeof(XSaveBNDREG) },
1035 [XSTATE_BNDCSR_BIT] =
1036 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1037 .offset = offsetof(X86XSaveArea, bndcsr_state),
1038 .size = sizeof(XSaveBNDCSR) },
1039 [XSTATE_OPMASK_BIT] =
1040 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1041 .offset = offsetof(X86XSaveArea, opmask_state),
1042 .size = sizeof(XSaveOpmask) },
1043 [XSTATE_ZMM_Hi256_BIT] =
1044 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1045 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1046 .size = sizeof(XSaveZMM_Hi256) },
1047 [XSTATE_Hi16_ZMM_BIT] =
1048 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1049 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1050 .size = sizeof(XSaveHi16_ZMM) },
1052 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1053 .offset = offsetof(X86XSaveArea, pkru_state),
1054 .size = sizeof(XSavePKRU) },
1057 static uint32_t xsave_area_size(uint64_t mask)
1062 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1063 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1064 if ((mask >> i) & 1) {
1065 ret = MAX(ret, esa->offset + esa->size);
1071 static inline bool accel_uses_host_cpuid(void)
1073 return kvm_enabled() || hvf_enabled();
1076 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1078 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1079 cpu->env.features[FEAT_XSAVE_COMP_LO];
1082 const char *get_register_name_32(unsigned int reg)
1084 if (reg >= CPU_NB_REGS32) {
1087 return x86_reg_info_32[reg].name;
1091 * Returns the set of feature flags that are supported and migratable by
1092 * QEMU, for a given FeatureWord.
1094 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
1096 FeatureWordInfo *wi = &feature_word_info[w];
1100 for (i = 0; i < 32; i++) {
1101 uint32_t f = 1U << i;
1103 /* If the feature name is known, it is implicitly considered migratable,
1104 * unless it is explicitly set in unmigratable_flags */
1105 if ((wi->migratable_flags & f) ||
1106 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1113 void host_cpuid(uint32_t function, uint32_t count,
1114 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1119 asm volatile("cpuid"
1120 : "=a"(vec[0]), "=b"(vec[1]),
1121 "=c"(vec[2]), "=d"(vec[3])
1122 : "0"(function), "c"(count) : "cc");
1123 #elif defined(__i386__)
1124 asm volatile("pusha \n\t"
1126 "mov %%eax, 0(%2) \n\t"
1127 "mov %%ebx, 4(%2) \n\t"
1128 "mov %%ecx, 8(%2) \n\t"
1129 "mov %%edx, 12(%2) \n\t"
1131 : : "a"(function), "c"(count), "S"(vec)
1147 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1149 uint32_t eax, ebx, ecx, edx;
1151 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1152 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1154 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1156 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1159 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1162 *stepping = eax & 0x0F;
1166 /* CPU class name definitions: */
1168 /* Return type name for a given CPU model name
1169 * Caller is responsible for freeing the returned string.
1171 static char *x86_cpu_type_name(const char *model_name)
1173 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1176 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1179 char *typename = x86_cpu_type_name(cpu_model);
1180 oc = object_class_by_name(typename);
1185 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1187 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1188 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1189 return g_strndup(class_name,
1190 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1193 struct X86CPUDefinition {
1197 /* vendor is zero-terminated, 12 character ASCII string */
1198 char vendor[CPUID_VENDOR_SZ + 1];
1202 FeatureWordArray features;
1203 const char *model_id;
1204 CPUCaches *cache_info;
1207 static CPUCaches epyc_cache_info = {
1208 .l1d_cache = &(CPUCacheInfo) {
1218 .no_invd_sharing = true,
1220 .l1i_cache = &(CPUCacheInfo) {
1230 .no_invd_sharing = true,
1232 .l2_cache = &(CPUCacheInfo) {
1233 .type = UNIFIED_CACHE,
1242 .l3_cache = &(CPUCacheInfo) {
1243 .type = UNIFIED_CACHE,
1247 .associativity = 16,
1253 .complex_indexing = true,
1257 static X86CPUDefinition builtin_x86_defs[] = {
1261 .vendor = CPUID_VENDOR_AMD,
1265 .features[FEAT_1_EDX] =
1267 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1269 .features[FEAT_1_ECX] =
1270 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1271 .features[FEAT_8000_0001_EDX] =
1272 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1273 .features[FEAT_8000_0001_ECX] =
1274 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1275 .xlevel = 0x8000000A,
1276 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1281 .vendor = CPUID_VENDOR_AMD,
1285 /* Missing: CPUID_HT */
1286 .features[FEAT_1_EDX] =
1288 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1289 CPUID_PSE36 | CPUID_VME,
1290 .features[FEAT_1_ECX] =
1291 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1293 .features[FEAT_8000_0001_EDX] =
1294 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1295 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1296 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1297 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1299 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1300 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1301 .features[FEAT_8000_0001_ECX] =
1302 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1303 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1304 /* Missing: CPUID_SVM_LBRV */
1305 .features[FEAT_SVM] =
1307 .xlevel = 0x8000001A,
1308 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1313 .vendor = CPUID_VENDOR_INTEL,
1317 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1318 .features[FEAT_1_EDX] =
1320 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1321 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1322 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1323 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1324 .features[FEAT_1_ECX] =
1325 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1327 .features[FEAT_8000_0001_EDX] =
1328 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1329 .features[FEAT_8000_0001_ECX] =
1331 .xlevel = 0x80000008,
1332 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1337 .vendor = CPUID_VENDOR_INTEL,
1341 /* Missing: CPUID_HT */
1342 .features[FEAT_1_EDX] =
1343 PPRO_FEATURES | CPUID_VME |
1344 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1346 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1347 .features[FEAT_1_ECX] =
1348 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1349 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1350 .features[FEAT_8000_0001_EDX] =
1351 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1352 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1353 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1354 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1355 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1356 .features[FEAT_8000_0001_ECX] =
1358 .xlevel = 0x80000008,
1359 .model_id = "Common KVM processor"
1364 .vendor = CPUID_VENDOR_INTEL,
1368 .features[FEAT_1_EDX] =
1370 .features[FEAT_1_ECX] =
1372 .xlevel = 0x80000004,
1373 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1378 .vendor = CPUID_VENDOR_INTEL,
1382 .features[FEAT_1_EDX] =
1383 PPRO_FEATURES | CPUID_VME |
1384 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1385 .features[FEAT_1_ECX] =
1387 .features[FEAT_8000_0001_ECX] =
1389 .xlevel = 0x80000008,
1390 .model_id = "Common 32-bit KVM processor"
1395 .vendor = CPUID_VENDOR_INTEL,
1399 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1400 .features[FEAT_1_EDX] =
1401 PPRO_FEATURES | CPUID_VME |
1402 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1404 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1405 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1406 .features[FEAT_1_ECX] =
1407 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1408 .features[FEAT_8000_0001_EDX] =
1410 .xlevel = 0x80000008,
1411 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1416 .vendor = CPUID_VENDOR_INTEL,
1420 .features[FEAT_1_EDX] =
1428 .vendor = CPUID_VENDOR_INTEL,
1432 .features[FEAT_1_EDX] =
1440 .vendor = CPUID_VENDOR_INTEL,
1444 .features[FEAT_1_EDX] =
1452 .vendor = CPUID_VENDOR_INTEL,
1456 .features[FEAT_1_EDX] =
1464 .vendor = CPUID_VENDOR_AMD,
1468 .features[FEAT_1_EDX] =
1469 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1471 .features[FEAT_8000_0001_EDX] =
1472 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1473 .xlevel = 0x80000008,
1474 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1479 .vendor = CPUID_VENDOR_INTEL,
1483 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1484 .features[FEAT_1_EDX] =
1486 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1487 CPUID_ACPI | CPUID_SS,
1488 /* Some CPUs got no CPUID_SEP */
1489 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1491 .features[FEAT_1_ECX] =
1492 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1494 .features[FEAT_8000_0001_EDX] =
1496 .features[FEAT_8000_0001_ECX] =
1498 .xlevel = 0x80000008,
1499 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1504 .vendor = CPUID_VENDOR_INTEL,
1508 .features[FEAT_1_EDX] =
1509 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1510 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1511 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1512 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1513 CPUID_DE | CPUID_FP87,
1514 .features[FEAT_1_ECX] =
1515 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1516 .features[FEAT_8000_0001_EDX] =
1517 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1518 .features[FEAT_8000_0001_ECX] =
1520 .xlevel = 0x80000008,
1521 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1526 .vendor = CPUID_VENDOR_INTEL,
1530 .features[FEAT_1_EDX] =
1531 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1532 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1533 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1534 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1535 CPUID_DE | CPUID_FP87,
1536 .features[FEAT_1_ECX] =
1537 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1539 .features[FEAT_8000_0001_EDX] =
1540 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1541 .features[FEAT_8000_0001_ECX] =
1543 .xlevel = 0x80000008,
1544 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1549 .vendor = CPUID_VENDOR_INTEL,
1553 .features[FEAT_1_EDX] =
1554 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1555 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1556 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1557 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1558 CPUID_DE | CPUID_FP87,
1559 .features[FEAT_1_ECX] =
1560 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1561 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1562 .features[FEAT_8000_0001_EDX] =
1563 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1564 .features[FEAT_8000_0001_ECX] =
1566 .xlevel = 0x80000008,
1567 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1570 .name = "Nehalem-IBRS",
1572 .vendor = CPUID_VENDOR_INTEL,
1576 .features[FEAT_1_EDX] =
1577 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1578 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1579 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1580 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1581 CPUID_DE | CPUID_FP87,
1582 .features[FEAT_1_ECX] =
1583 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1584 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1585 .features[FEAT_7_0_EDX] =
1586 CPUID_7_0_EDX_SPEC_CTRL,
1587 .features[FEAT_8000_0001_EDX] =
1588 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1589 .features[FEAT_8000_0001_ECX] =
1591 .xlevel = 0x80000008,
1592 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1597 .vendor = CPUID_VENDOR_INTEL,
1601 .features[FEAT_1_EDX] =
1602 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1603 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1604 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1605 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1606 CPUID_DE | CPUID_FP87,
1607 .features[FEAT_1_ECX] =
1608 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1609 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1610 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1611 .features[FEAT_8000_0001_EDX] =
1612 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1613 .features[FEAT_8000_0001_ECX] =
1615 .features[FEAT_6_EAX] =
1617 .xlevel = 0x80000008,
1618 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1621 .name = "Westmere-IBRS",
1623 .vendor = CPUID_VENDOR_INTEL,
1627 .features[FEAT_1_EDX] =
1628 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1629 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1630 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1631 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1632 CPUID_DE | CPUID_FP87,
1633 .features[FEAT_1_ECX] =
1634 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1635 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1636 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1637 .features[FEAT_8000_0001_EDX] =
1638 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1639 .features[FEAT_8000_0001_ECX] =
1641 .features[FEAT_7_0_EDX] =
1642 CPUID_7_0_EDX_SPEC_CTRL,
1643 .features[FEAT_6_EAX] =
1645 .xlevel = 0x80000008,
1646 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1649 .name = "SandyBridge",
1651 .vendor = CPUID_VENDOR_INTEL,
1655 .features[FEAT_1_EDX] =
1656 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1657 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1658 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1659 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1660 CPUID_DE | CPUID_FP87,
1661 .features[FEAT_1_ECX] =
1662 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1663 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1664 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1665 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1667 .features[FEAT_8000_0001_EDX] =
1668 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1670 .features[FEAT_8000_0001_ECX] =
1672 .features[FEAT_XSAVE] =
1673 CPUID_XSAVE_XSAVEOPT,
1674 .features[FEAT_6_EAX] =
1676 .xlevel = 0x80000008,
1677 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1680 .name = "SandyBridge-IBRS",
1682 .vendor = CPUID_VENDOR_INTEL,
1686 .features[FEAT_1_EDX] =
1687 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1688 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1689 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1690 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1691 CPUID_DE | CPUID_FP87,
1692 .features[FEAT_1_ECX] =
1693 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1694 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1695 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1696 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1698 .features[FEAT_8000_0001_EDX] =
1699 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1701 .features[FEAT_8000_0001_ECX] =
1703 .features[FEAT_7_0_EDX] =
1704 CPUID_7_0_EDX_SPEC_CTRL,
1705 .features[FEAT_XSAVE] =
1706 CPUID_XSAVE_XSAVEOPT,
1707 .features[FEAT_6_EAX] =
1709 .xlevel = 0x80000008,
1710 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1713 .name = "IvyBridge",
1715 .vendor = CPUID_VENDOR_INTEL,
1719 .features[FEAT_1_EDX] =
1720 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1721 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1722 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1723 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1724 CPUID_DE | CPUID_FP87,
1725 .features[FEAT_1_ECX] =
1726 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1727 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1728 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1729 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1730 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1731 .features[FEAT_7_0_EBX] =
1732 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1734 .features[FEAT_8000_0001_EDX] =
1735 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1737 .features[FEAT_8000_0001_ECX] =
1739 .features[FEAT_XSAVE] =
1740 CPUID_XSAVE_XSAVEOPT,
1741 .features[FEAT_6_EAX] =
1743 .xlevel = 0x80000008,
1744 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1747 .name = "IvyBridge-IBRS",
1749 .vendor = CPUID_VENDOR_INTEL,
1753 .features[FEAT_1_EDX] =
1754 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1755 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1756 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1757 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1758 CPUID_DE | CPUID_FP87,
1759 .features[FEAT_1_ECX] =
1760 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1761 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1762 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1763 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1764 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1765 .features[FEAT_7_0_EBX] =
1766 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1768 .features[FEAT_8000_0001_EDX] =
1769 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1771 .features[FEAT_8000_0001_ECX] =
1773 .features[FEAT_7_0_EDX] =
1774 CPUID_7_0_EDX_SPEC_CTRL,
1775 .features[FEAT_XSAVE] =
1776 CPUID_XSAVE_XSAVEOPT,
1777 .features[FEAT_6_EAX] =
1779 .xlevel = 0x80000008,
1780 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1783 .name = "Haswell-noTSX",
1785 .vendor = CPUID_VENDOR_INTEL,
1789 .features[FEAT_1_EDX] =
1790 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1791 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1792 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1793 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1794 CPUID_DE | CPUID_FP87,
1795 .features[FEAT_1_ECX] =
1796 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1797 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1798 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1799 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1800 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1801 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1802 .features[FEAT_8000_0001_EDX] =
1803 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1805 .features[FEAT_8000_0001_ECX] =
1806 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1807 .features[FEAT_7_0_EBX] =
1808 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1809 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1810 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1811 .features[FEAT_XSAVE] =
1812 CPUID_XSAVE_XSAVEOPT,
1813 .features[FEAT_6_EAX] =
1815 .xlevel = 0x80000008,
1816 .model_id = "Intel Core Processor (Haswell, no TSX)",
1819 .name = "Haswell-noTSX-IBRS",
1821 .vendor = CPUID_VENDOR_INTEL,
1825 .features[FEAT_1_EDX] =
1826 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1827 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1828 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1829 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1830 CPUID_DE | CPUID_FP87,
1831 .features[FEAT_1_ECX] =
1832 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1833 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1834 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1835 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1836 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1837 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1838 .features[FEAT_8000_0001_EDX] =
1839 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1841 .features[FEAT_8000_0001_ECX] =
1842 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1843 .features[FEAT_7_0_EDX] =
1844 CPUID_7_0_EDX_SPEC_CTRL,
1845 .features[FEAT_7_0_EBX] =
1846 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1847 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1848 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1849 .features[FEAT_XSAVE] =
1850 CPUID_XSAVE_XSAVEOPT,
1851 .features[FEAT_6_EAX] =
1853 .xlevel = 0x80000008,
1854 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
1859 .vendor = CPUID_VENDOR_INTEL,
1863 .features[FEAT_1_EDX] =
1864 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1865 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1866 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1867 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1868 CPUID_DE | CPUID_FP87,
1869 .features[FEAT_1_ECX] =
1870 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1871 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1872 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1873 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1874 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1875 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1876 .features[FEAT_8000_0001_EDX] =
1877 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1879 .features[FEAT_8000_0001_ECX] =
1880 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1881 .features[FEAT_7_0_EBX] =
1882 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1883 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1884 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1886 .features[FEAT_XSAVE] =
1887 CPUID_XSAVE_XSAVEOPT,
1888 .features[FEAT_6_EAX] =
1890 .xlevel = 0x80000008,
1891 .model_id = "Intel Core Processor (Haswell)",
1894 .name = "Haswell-IBRS",
1896 .vendor = CPUID_VENDOR_INTEL,
1900 .features[FEAT_1_EDX] =
1901 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1902 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1903 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1904 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1905 CPUID_DE | CPUID_FP87,
1906 .features[FEAT_1_ECX] =
1907 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1908 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1909 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1910 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1911 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1912 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1913 .features[FEAT_8000_0001_EDX] =
1914 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1916 .features[FEAT_8000_0001_ECX] =
1917 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1918 .features[FEAT_7_0_EDX] =
1919 CPUID_7_0_EDX_SPEC_CTRL,
1920 .features[FEAT_7_0_EBX] =
1921 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1922 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1923 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1925 .features[FEAT_XSAVE] =
1926 CPUID_XSAVE_XSAVEOPT,
1927 .features[FEAT_6_EAX] =
1929 .xlevel = 0x80000008,
1930 .model_id = "Intel Core Processor (Haswell, IBRS)",
1933 .name = "Broadwell-noTSX",
1935 .vendor = CPUID_VENDOR_INTEL,
1939 .features[FEAT_1_EDX] =
1940 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1941 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1942 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1943 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1944 CPUID_DE | CPUID_FP87,
1945 .features[FEAT_1_ECX] =
1946 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1947 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1948 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1949 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1950 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1951 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1952 .features[FEAT_8000_0001_EDX] =
1953 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1955 .features[FEAT_8000_0001_ECX] =
1956 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1957 .features[FEAT_7_0_EBX] =
1958 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1959 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1960 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1961 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1963 .features[FEAT_XSAVE] =
1964 CPUID_XSAVE_XSAVEOPT,
1965 .features[FEAT_6_EAX] =
1967 .xlevel = 0x80000008,
1968 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1971 .name = "Broadwell-noTSX-IBRS",
1973 .vendor = CPUID_VENDOR_INTEL,
1977 .features[FEAT_1_EDX] =
1978 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1979 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1980 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1981 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1982 CPUID_DE | CPUID_FP87,
1983 .features[FEAT_1_ECX] =
1984 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1985 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1986 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1987 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1988 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1989 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1990 .features[FEAT_8000_0001_EDX] =
1991 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1993 .features[FEAT_8000_0001_ECX] =
1994 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1995 .features[FEAT_7_0_EDX] =
1996 CPUID_7_0_EDX_SPEC_CTRL,
1997 .features[FEAT_7_0_EBX] =
1998 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1999 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2000 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2001 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2003 .features[FEAT_XSAVE] =
2004 CPUID_XSAVE_XSAVEOPT,
2005 .features[FEAT_6_EAX] =
2007 .xlevel = 0x80000008,
2008 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
2011 .name = "Broadwell",
2013 .vendor = CPUID_VENDOR_INTEL,
2017 .features[FEAT_1_EDX] =
2018 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2019 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2020 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2021 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2022 CPUID_DE | CPUID_FP87,
2023 .features[FEAT_1_ECX] =
2024 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2025 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2026 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2027 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2028 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2029 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2030 .features[FEAT_8000_0001_EDX] =
2031 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2033 .features[FEAT_8000_0001_ECX] =
2034 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2035 .features[FEAT_7_0_EBX] =
2036 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2037 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2038 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2039 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2041 .features[FEAT_XSAVE] =
2042 CPUID_XSAVE_XSAVEOPT,
2043 .features[FEAT_6_EAX] =
2045 .xlevel = 0x80000008,
2046 .model_id = "Intel Core Processor (Broadwell)",
2049 .name = "Broadwell-IBRS",
2051 .vendor = CPUID_VENDOR_INTEL,
2055 .features[FEAT_1_EDX] =
2056 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2057 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2058 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2059 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2060 CPUID_DE | CPUID_FP87,
2061 .features[FEAT_1_ECX] =
2062 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2063 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2064 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2065 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2066 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2067 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2068 .features[FEAT_8000_0001_EDX] =
2069 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2071 .features[FEAT_8000_0001_ECX] =
2072 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2073 .features[FEAT_7_0_EDX] =
2074 CPUID_7_0_EDX_SPEC_CTRL,
2075 .features[FEAT_7_0_EBX] =
2076 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2077 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2078 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2079 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2081 .features[FEAT_XSAVE] =
2082 CPUID_XSAVE_XSAVEOPT,
2083 .features[FEAT_6_EAX] =
2085 .xlevel = 0x80000008,
2086 .model_id = "Intel Core Processor (Broadwell, IBRS)",
2089 .name = "Skylake-Client",
2091 .vendor = CPUID_VENDOR_INTEL,
2095 .features[FEAT_1_EDX] =
2096 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2097 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2098 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2099 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2100 CPUID_DE | CPUID_FP87,
2101 .features[FEAT_1_ECX] =
2102 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2103 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2104 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2105 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2106 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2107 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2108 .features[FEAT_8000_0001_EDX] =
2109 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2111 .features[FEAT_8000_0001_ECX] =
2112 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2113 .features[FEAT_7_0_EBX] =
2114 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2115 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2116 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2117 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2118 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2119 /* Missing: XSAVES (not supported by some Linux versions,
2120 * including v4.1 to v4.12).
2121 * KVM doesn't yet expose any XSAVES state save component,
2122 * and the only one defined in Skylake (processor tracing)
2123 * probably will block migration anyway.
2125 .features[FEAT_XSAVE] =
2126 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2127 CPUID_XSAVE_XGETBV1,
2128 .features[FEAT_6_EAX] =
2130 .xlevel = 0x80000008,
2131 .model_id = "Intel Core Processor (Skylake)",
2134 .name = "Skylake-Client-IBRS",
2136 .vendor = CPUID_VENDOR_INTEL,
2140 .features[FEAT_1_EDX] =
2141 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2142 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2143 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2144 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2145 CPUID_DE | CPUID_FP87,
2146 .features[FEAT_1_ECX] =
2147 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2148 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2149 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2150 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2151 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2152 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2153 .features[FEAT_8000_0001_EDX] =
2154 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2156 .features[FEAT_8000_0001_ECX] =
2157 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2158 .features[FEAT_7_0_EDX] =
2159 CPUID_7_0_EDX_SPEC_CTRL,
2160 .features[FEAT_7_0_EBX] =
2161 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2162 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2163 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2164 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2165 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2166 /* Missing: XSAVES (not supported by some Linux versions,
2167 * including v4.1 to v4.12).
2168 * KVM doesn't yet expose any XSAVES state save component,
2169 * and the only one defined in Skylake (processor tracing)
2170 * probably will block migration anyway.
2172 .features[FEAT_XSAVE] =
2173 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2174 CPUID_XSAVE_XGETBV1,
2175 .features[FEAT_6_EAX] =
2177 .xlevel = 0x80000008,
2178 .model_id = "Intel Core Processor (Skylake, IBRS)",
2181 .name = "Skylake-Server",
2183 .vendor = CPUID_VENDOR_INTEL,
2187 .features[FEAT_1_EDX] =
2188 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2189 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2190 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2191 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2192 CPUID_DE | CPUID_FP87,
2193 .features[FEAT_1_ECX] =
2194 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2195 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2196 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2197 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2198 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2199 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2200 .features[FEAT_8000_0001_EDX] =
2201 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2202 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2203 .features[FEAT_8000_0001_ECX] =
2204 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2205 .features[FEAT_7_0_EBX] =
2206 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2207 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2208 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2209 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2210 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2211 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2212 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2213 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2214 /* Missing: XSAVES (not supported by some Linux versions,
2215 * including v4.1 to v4.12).
2216 * KVM doesn't yet expose any XSAVES state save component,
2217 * and the only one defined in Skylake (processor tracing)
2218 * probably will block migration anyway.
2220 .features[FEAT_XSAVE] =
2221 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2222 CPUID_XSAVE_XGETBV1,
2223 .features[FEAT_6_EAX] =
2225 .xlevel = 0x80000008,
2226 .model_id = "Intel Xeon Processor (Skylake)",
2229 .name = "Skylake-Server-IBRS",
2231 .vendor = CPUID_VENDOR_INTEL,
2235 .features[FEAT_1_EDX] =
2236 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2237 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2238 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2239 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2240 CPUID_DE | CPUID_FP87,
2241 .features[FEAT_1_ECX] =
2242 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2243 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2244 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2245 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2246 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2247 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2248 .features[FEAT_8000_0001_EDX] =
2249 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2250 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2251 .features[FEAT_8000_0001_ECX] =
2252 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2253 .features[FEAT_7_0_EDX] =
2254 CPUID_7_0_EDX_SPEC_CTRL,
2255 .features[FEAT_7_0_EBX] =
2256 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2257 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2258 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2259 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2260 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2261 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2262 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2263 CPUID_7_0_EBX_AVX512VL,
2264 /* Missing: XSAVES (not supported by some Linux versions,
2265 * including v4.1 to v4.12).
2266 * KVM doesn't yet expose any XSAVES state save component,
2267 * and the only one defined in Skylake (processor tracing)
2268 * probably will block migration anyway.
2270 .features[FEAT_XSAVE] =
2271 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2272 CPUID_XSAVE_XGETBV1,
2273 .features[FEAT_6_EAX] =
2275 .xlevel = 0x80000008,
2276 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
2279 .name = "KnightsMill",
2281 .vendor = CPUID_VENDOR_INTEL,
2285 .features[FEAT_1_EDX] =
2286 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2287 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2288 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2289 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2290 CPUID_PSE | CPUID_DE | CPUID_FP87,
2291 .features[FEAT_1_ECX] =
2292 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2293 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2294 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2295 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2296 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2297 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2298 .features[FEAT_8000_0001_EDX] =
2299 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2300 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2301 .features[FEAT_8000_0001_ECX] =
2302 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2303 .features[FEAT_7_0_EBX] =
2304 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2305 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2306 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2307 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2308 CPUID_7_0_EBX_AVX512ER,
2309 .features[FEAT_7_0_ECX] =
2310 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2311 .features[FEAT_7_0_EDX] =
2312 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2313 .features[FEAT_XSAVE] =
2314 CPUID_XSAVE_XSAVEOPT,
2315 .features[FEAT_6_EAX] =
2317 .xlevel = 0x80000008,
2318 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2321 .name = "Opteron_G1",
2323 .vendor = CPUID_VENDOR_AMD,
2327 .features[FEAT_1_EDX] =
2328 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2329 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2330 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2331 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2332 CPUID_DE | CPUID_FP87,
2333 .features[FEAT_1_ECX] =
2335 .features[FEAT_8000_0001_EDX] =
2336 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2337 .xlevel = 0x80000008,
2338 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2341 .name = "Opteron_G2",
2343 .vendor = CPUID_VENDOR_AMD,
2347 .features[FEAT_1_EDX] =
2348 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2349 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2350 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2351 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2352 CPUID_DE | CPUID_FP87,
2353 .features[FEAT_1_ECX] =
2354 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2355 /* Missing: CPUID_EXT2_RDTSCP */
2356 .features[FEAT_8000_0001_EDX] =
2357 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2358 .features[FEAT_8000_0001_ECX] =
2359 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2360 .xlevel = 0x80000008,
2361 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2364 .name = "Opteron_G3",
2366 .vendor = CPUID_VENDOR_AMD,
2370 .features[FEAT_1_EDX] =
2371 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2372 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2373 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2374 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2375 CPUID_DE | CPUID_FP87,
2376 .features[FEAT_1_ECX] =
2377 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2379 /* Missing: CPUID_EXT2_RDTSCP */
2380 .features[FEAT_8000_0001_EDX] =
2381 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2382 .features[FEAT_8000_0001_ECX] =
2383 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2384 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2385 .xlevel = 0x80000008,
2386 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2389 .name = "Opteron_G4",
2391 .vendor = CPUID_VENDOR_AMD,
2395 .features[FEAT_1_EDX] =
2396 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2397 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2398 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2399 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2400 CPUID_DE | CPUID_FP87,
2401 .features[FEAT_1_ECX] =
2402 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2403 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2404 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2406 /* Missing: CPUID_EXT2_RDTSCP */
2407 .features[FEAT_8000_0001_EDX] =
2408 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2410 .features[FEAT_8000_0001_ECX] =
2411 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2412 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2413 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2416 .xlevel = 0x8000001A,
2417 .model_id = "AMD Opteron 62xx class CPU",
2420 .name = "Opteron_G5",
2422 .vendor = CPUID_VENDOR_AMD,
2426 .features[FEAT_1_EDX] =
2427 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2428 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2429 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2430 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2431 CPUID_DE | CPUID_FP87,
2432 .features[FEAT_1_ECX] =
2433 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2434 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2435 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2436 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2437 /* Missing: CPUID_EXT2_RDTSCP */
2438 .features[FEAT_8000_0001_EDX] =
2439 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2441 .features[FEAT_8000_0001_ECX] =
2442 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2443 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2444 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2447 .xlevel = 0x8000001A,
2448 .model_id = "AMD Opteron 63xx class CPU",
2453 .vendor = CPUID_VENDOR_AMD,
2457 .features[FEAT_1_EDX] =
2458 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2459 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2460 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2461 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2462 CPUID_VME | CPUID_FP87,
2463 .features[FEAT_1_ECX] =
2464 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2465 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2466 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2467 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2468 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2469 .features[FEAT_8000_0001_EDX] =
2470 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2471 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2473 .features[FEAT_8000_0001_ECX] =
2474 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2475 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2476 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2477 .features[FEAT_7_0_EBX] =
2478 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2479 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2480 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2481 CPUID_7_0_EBX_SHA_NI,
2482 /* Missing: XSAVES (not supported by some Linux versions,
2483 * including v4.1 to v4.12).
2484 * KVM doesn't yet expose any XSAVES state save component.
2486 .features[FEAT_XSAVE] =
2487 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2488 CPUID_XSAVE_XGETBV1,
2489 .features[FEAT_6_EAX] =
2491 .xlevel = 0x8000000A,
2492 .model_id = "AMD EPYC Processor",
2493 .cache_info = &epyc_cache_info,
2496 .name = "EPYC-IBPB",
2498 .vendor = CPUID_VENDOR_AMD,
2502 .features[FEAT_1_EDX] =
2503 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2504 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2505 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2506 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2507 CPUID_VME | CPUID_FP87,
2508 .features[FEAT_1_ECX] =
2509 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2510 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2511 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2512 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2513 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2514 .features[FEAT_8000_0001_EDX] =
2515 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2516 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2518 .features[FEAT_8000_0001_ECX] =
2519 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2520 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2521 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2522 .features[FEAT_8000_0008_EBX] =
2523 CPUID_8000_0008_EBX_IBPB,
2524 .features[FEAT_7_0_EBX] =
2525 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2526 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2527 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2528 CPUID_7_0_EBX_SHA_NI,
2529 /* Missing: XSAVES (not supported by some Linux versions,
2530 * including v4.1 to v4.12).
2531 * KVM doesn't yet expose any XSAVES state save component.
2533 .features[FEAT_XSAVE] =
2534 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2535 CPUID_XSAVE_XGETBV1,
2536 .features[FEAT_6_EAX] =
2538 .xlevel = 0x8000000A,
2539 .model_id = "AMD EPYC Processor (with IBPB)",
2540 .cache_info = &epyc_cache_info,
2544 typedef struct PropValue {
2545 const char *prop, *value;
2548 /* KVM-specific features that are automatically added/removed
2549 * from all CPU models when KVM is enabled.
2551 static PropValue kvm_default_props[] = {
2552 { "kvmclock", "on" },
2553 { "kvm-nopiodelay", "on" },
2554 { "kvm-asyncpf", "on" },
2555 { "kvm-steal-time", "on" },
2556 { "kvm-pv-eoi", "on" },
2557 { "kvmclock-stable-bit", "on" },
2560 { "monitor", "off" },
2565 /* TCG-specific defaults that override all CPU models when using TCG
2567 static PropValue tcg_default_props[] = {
2573 void x86_cpu_change_kvm_default(const char *prop, const char *value)
2576 for (pv = kvm_default_props; pv->prop; pv++) {
2577 if (!strcmp(pv->prop, prop)) {
2583 /* It is valid to call this function only for properties that
2584 * are already present in the kvm_default_props table.
2589 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2590 bool migratable_only);
2592 static bool lmce_supported(void)
2594 uint64_t mce_cap = 0;
2597 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2602 return !!(mce_cap & MCG_LMCE_P);
2605 #define CPUID_MODEL_ID_SZ 48
2608 * cpu_x86_fill_model_id:
2609 * Get CPUID model ID string from host CPU.
2611 * @str should have at least CPUID_MODEL_ID_SZ bytes
2613 * The function does NOT add a null terminator to the string
2616 static int cpu_x86_fill_model_id(char *str)
2618 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2621 for (i = 0; i < 3; i++) {
2622 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
2623 memcpy(str + i * 16 + 0, &eax, 4);
2624 memcpy(str + i * 16 + 4, &ebx, 4);
2625 memcpy(str + i * 16 + 8, &ecx, 4);
2626 memcpy(str + i * 16 + 12, &edx, 4);
2631 static Property max_x86_cpu_properties[] = {
2632 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
2633 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
2634 DEFINE_PROP_END_OF_LIST()
2637 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
2639 DeviceClass *dc = DEVICE_CLASS(oc);
2640 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2644 xcc->model_description =
2645 "Enables all features supported by the accelerator in the current host";
2647 dc->props = max_x86_cpu_properties;
2650 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
2652 static void max_x86_cpu_initfn(Object *obj)
2654 X86CPU *cpu = X86_CPU(obj);
2655 CPUX86State *env = &cpu->env;
2656 KVMState *s = kvm_state;
2658 /* We can't fill the features array here because we don't know yet if
2659 * "migratable" is true or false.
2661 cpu->max_features = true;
2663 if (accel_uses_host_cpuid()) {
2664 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
2665 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
2666 int family, model, stepping;
2667 X86CPUDefinition host_cpudef = { };
2668 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2670 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
2671 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
2673 host_vendor_fms(vendor, &family, &model, &stepping);
2675 cpu_x86_fill_model_id(model_id);
2677 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
2678 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
2679 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
2680 object_property_set_int(OBJECT(cpu), stepping, "stepping",
2682 object_property_set_str(OBJECT(cpu), model_id, "model-id",
2685 if (kvm_enabled()) {
2686 env->cpuid_min_level =
2687 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
2688 env->cpuid_min_xlevel =
2689 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
2690 env->cpuid_min_xlevel2 =
2691 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
2693 env->cpuid_min_level =
2694 hvf_get_supported_cpuid(0x0, 0, R_EAX);
2695 env->cpuid_min_xlevel =
2696 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
2697 env->cpuid_min_xlevel2 =
2698 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
2701 if (lmce_supported()) {
2702 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
2705 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
2706 "vendor", &error_abort);
2707 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
2708 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
2709 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
2710 object_property_set_str(OBJECT(cpu),
2711 "QEMU TCG CPU version " QEMU_HW_VERSION,
2712 "model-id", &error_abort);
2715 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
2718 static const TypeInfo max_x86_cpu_type_info = {
2719 .name = X86_CPU_TYPE_NAME("max"),
2720 .parent = TYPE_X86_CPU,
2721 .instance_init = max_x86_cpu_initfn,
2722 .class_init = max_x86_cpu_class_init,
2725 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
2726 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
2728 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2730 xcc->host_cpuid_required = true;
2733 if (kvm_enabled()) {
2734 xcc->model_description =
2735 "KVM processor with all supported host features ";
2736 } else if (hvf_enabled()) {
2737 xcc->model_description =
2738 "HVF processor with all supported host features ";
2742 static const TypeInfo host_x86_cpu_type_info = {
2743 .name = X86_CPU_TYPE_NAME("host"),
2744 .parent = X86_CPU_TYPE_NAME("max"),
2745 .class_init = host_x86_cpu_class_init,
2750 static void report_unavailable_features(FeatureWord w, uint32_t mask)
2752 FeatureWordInfo *f = &feature_word_info[w];
2755 for (i = 0; i < 32; ++i) {
2756 if ((1UL << i) & mask) {
2757 const char *reg = get_register_name_32(f->cpuid_reg);
2759 warn_report("%s doesn't support requested feature: "
2760 "CPUID.%02XH:%s%s%s [bit %d]",
2761 accel_uses_host_cpuid() ? "host" : "TCG",
2763 f->feat_names[i] ? "." : "",
2764 f->feat_names[i] ? f->feat_names[i] : "", i);
2769 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
2770 const char *name, void *opaque,
2773 X86CPU *cpu = X86_CPU(obj);
2774 CPUX86State *env = &cpu->env;
2777 value = (env->cpuid_version >> 8) & 0xf;
2779 value += (env->cpuid_version >> 20) & 0xff;
2781 visit_type_int(v, name, &value, errp);
2784 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
2785 const char *name, void *opaque,
2788 X86CPU *cpu = X86_CPU(obj);
2789 CPUX86State *env = &cpu->env;
2790 const int64_t min = 0;
2791 const int64_t max = 0xff + 0xf;
2792 Error *local_err = NULL;
2795 visit_type_int(v, name, &value, &local_err);
2797 error_propagate(errp, local_err);
2800 if (value < min || value > max) {
2801 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2802 name ? name : "null", value, min, max);
2806 env->cpuid_version &= ~0xff00f00;
2808 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
2810 env->cpuid_version |= value << 8;
2814 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
2815 const char *name, void *opaque,
2818 X86CPU *cpu = X86_CPU(obj);
2819 CPUX86State *env = &cpu->env;
2822 value = (env->cpuid_version >> 4) & 0xf;
2823 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
2824 visit_type_int(v, name, &value, errp);
2827 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
2828 const char *name, void *opaque,
2831 X86CPU *cpu = X86_CPU(obj);
2832 CPUX86State *env = &cpu->env;
2833 const int64_t min = 0;
2834 const int64_t max = 0xff;
2835 Error *local_err = NULL;
2838 visit_type_int(v, name, &value, &local_err);
2840 error_propagate(errp, local_err);
2843 if (value < min || value > max) {
2844 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2845 name ? name : "null", value, min, max);
2849 env->cpuid_version &= ~0xf00f0;
2850 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
2853 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
2854 const char *name, void *opaque,
2857 X86CPU *cpu = X86_CPU(obj);
2858 CPUX86State *env = &cpu->env;
2861 value = env->cpuid_version & 0xf;
2862 visit_type_int(v, name, &value, errp);
2865 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
2866 const char *name, void *opaque,
2869 X86CPU *cpu = X86_CPU(obj);
2870 CPUX86State *env = &cpu->env;
2871 const int64_t min = 0;
2872 const int64_t max = 0xf;
2873 Error *local_err = NULL;
2876 visit_type_int(v, name, &value, &local_err);
2878 error_propagate(errp, local_err);
2881 if (value < min || value > max) {
2882 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2883 name ? name : "null", value, min, max);
2887 env->cpuid_version &= ~0xf;
2888 env->cpuid_version |= value & 0xf;
2891 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
2893 X86CPU *cpu = X86_CPU(obj);
2894 CPUX86State *env = &cpu->env;
2897 value = g_malloc(CPUID_VENDOR_SZ + 1);
2898 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
2899 env->cpuid_vendor3);
2903 static void x86_cpuid_set_vendor(Object *obj, const char *value,
2906 X86CPU *cpu = X86_CPU(obj);
2907 CPUX86State *env = &cpu->env;
2910 if (strlen(value) != CPUID_VENDOR_SZ) {
2911 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
2915 env->cpuid_vendor1 = 0;
2916 env->cpuid_vendor2 = 0;
2917 env->cpuid_vendor3 = 0;
2918 for (i = 0; i < 4; i++) {
2919 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
2920 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
2921 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
2925 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
2927 X86CPU *cpu = X86_CPU(obj);
2928 CPUX86State *env = &cpu->env;
2932 value = g_malloc(48 + 1);
2933 for (i = 0; i < 48; i++) {
2934 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
2940 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
2943 X86CPU *cpu = X86_CPU(obj);
2944 CPUX86State *env = &cpu->env;
2947 if (model_id == NULL) {
2950 len = strlen(model_id);
2951 memset(env->cpuid_model, 0, 48);
2952 for (i = 0; i < 48; i++) {
2956 c = (uint8_t)model_id[i];
2958 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
2962 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
2963 void *opaque, Error **errp)
2965 X86CPU *cpu = X86_CPU(obj);
2968 value = cpu->env.tsc_khz * 1000;
2969 visit_type_int(v, name, &value, errp);
2972 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
2973 void *opaque, Error **errp)
2975 X86CPU *cpu = X86_CPU(obj);
2976 const int64_t min = 0;
2977 const int64_t max = INT64_MAX;
2978 Error *local_err = NULL;
2981 visit_type_int(v, name, &value, &local_err);
2983 error_propagate(errp, local_err);
2986 if (value < min || value > max) {
2987 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2988 name ? name : "null", value, min, max);
2992 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
2995 /* Generic getter for "feature-words" and "filtered-features" properties */
2996 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
2997 const char *name, void *opaque,
3000 uint32_t *array = (uint32_t *)opaque;
3002 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
3003 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
3004 X86CPUFeatureWordInfoList *list = NULL;
3006 for (w = 0; w < FEATURE_WORDS; w++) {
3007 FeatureWordInfo *wi = &feature_word_info[w];
3008 X86CPUFeatureWordInfo *qwi = &word_infos[w];
3009 qwi->cpuid_input_eax = wi->cpuid_eax;
3010 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
3011 qwi->cpuid_input_ecx = wi->cpuid_ecx;
3012 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
3013 qwi->features = array[w];
3015 /* List will be in reverse order, but order shouldn't matter */
3016 list_entries[w].next = list;
3017 list_entries[w].value = &word_infos[w];
3018 list = &list_entries[w];
3021 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
3024 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3025 void *opaque, Error **errp)
3027 X86CPU *cpu = X86_CPU(obj);
3028 int64_t value = cpu->hyperv_spinlock_attempts;
3030 visit_type_int(v, name, &value, errp);
3033 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3034 void *opaque, Error **errp)
3036 const int64_t min = 0xFFF;
3037 const int64_t max = UINT_MAX;
3038 X86CPU *cpu = X86_CPU(obj);
3042 visit_type_int(v, name, &value, &err);
3044 error_propagate(errp, err);
3048 if (value < min || value > max) {
3049 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
3050 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
3051 object_get_typename(obj), name ? name : "null",
3055 cpu->hyperv_spinlock_attempts = value;
3058 static const PropertyInfo qdev_prop_spinlocks = {
3060 .get = x86_get_hv_spinlocks,
3061 .set = x86_set_hv_spinlocks,
3064 /* Convert all '_' in a feature string option name to '-', to make feature
3065 * name conform to QOM property naming rule, which uses '-' instead of '_'.
3067 static inline void feat2prop(char *s)
3069 while ((s = strchr(s, '_'))) {
3074 /* Return the feature property name for a feature flag bit */
3075 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
3077 /* XSAVE components are automatically enabled by other features,
3078 * so return the original feature name instead
3080 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
3081 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
3083 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
3084 x86_ext_save_areas[comp].bits) {
3085 w = x86_ext_save_areas[comp].feature;
3086 bitnr = ctz32(x86_ext_save_areas[comp].bits);
3091 assert(w < FEATURE_WORDS);
3092 return feature_word_info[w].feat_names[bitnr];
3095 /* Compatibily hack to maintain legacy +-feat semantic,
3096 * where +-feat overwrites any feature set by
3097 * feat=on|feat even if the later is parsed after +-feat
3098 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3100 static GList *plus_features, *minus_features;
3102 static gint compare_string(gconstpointer a, gconstpointer b)
3104 return g_strcmp0(a, b);
3107 /* Parse "+feature,-feature,feature=foo" CPU feature string
3109 static void x86_cpu_parse_featurestr(const char *typename, char *features,
3112 char *featurestr; /* Single 'key=value" string being parsed */
3113 static bool cpu_globals_initialized;
3114 bool ambiguous = false;
3116 if (cpu_globals_initialized) {
3119 cpu_globals_initialized = true;
3125 for (featurestr = strtok(features, ",");
3127 featurestr = strtok(NULL, ",")) {
3129 const char *val = NULL;
3132 GlobalProperty *prop;
3134 /* Compatibility syntax: */
3135 if (featurestr[0] == '+') {
3136 plus_features = g_list_append(plus_features,
3137 g_strdup(featurestr + 1));
3139 } else if (featurestr[0] == '-') {
3140 minus_features = g_list_append(minus_features,
3141 g_strdup(featurestr + 1));
3145 eq = strchr(featurestr, '=');
3153 feat2prop(featurestr);
3156 if (g_list_find_custom(plus_features, name, compare_string)) {
3157 warn_report("Ambiguous CPU model string. "
3158 "Don't mix both \"+%s\" and \"%s=%s\"",
3162 if (g_list_find_custom(minus_features, name, compare_string)) {
3163 warn_report("Ambiguous CPU model string. "
3164 "Don't mix both \"-%s\" and \"%s=%s\"",
3170 if (!strcmp(name, "tsc-freq")) {
3174 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3175 if (ret < 0 || tsc_freq > INT64_MAX) {
3176 error_setg(errp, "bad numerical value %s", val);
3179 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3181 name = "tsc-frequency";
3184 prop = g_new0(typeof(*prop), 1);
3185 prop->driver = typename;
3186 prop->property = g_strdup(name);
3187 prop->value = g_strdup(val);
3188 prop->errp = &error_fatal;
3189 qdev_prop_register_global(prop);
3193 warn_report("Compatibility of ambiguous CPU model "
3194 "strings won't be kept on future QEMU versions");
3198 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3199 static int x86_cpu_filter_features(X86CPU *cpu);
3201 /* Check for missing features that may prevent the CPU class from
3202 * running using the current machine and accelerator.
3204 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3205 strList **missing_feats)
3210 strList **next = missing_feats;
3212 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3213 strList *new = g_new0(strList, 1);
3214 new->value = g_strdup("kvm");
3215 *missing_feats = new;
3219 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3221 x86_cpu_expand_features(xc, &err);
3223 /* Errors at x86_cpu_expand_features should never happen,
3224 * but in case it does, just report the model as not
3225 * runnable at all using the "type" property.
3227 strList *new = g_new0(strList, 1);
3228 new->value = g_strdup("type");
3233 x86_cpu_filter_features(xc);
3235 for (w = 0; w < FEATURE_WORDS; w++) {
3236 uint32_t filtered = xc->filtered_features[w];
3238 for (i = 0; i < 32; i++) {
3239 if (filtered & (1UL << i)) {
3240 strList *new = g_new0(strList, 1);
3241 new->value = g_strdup(x86_cpu_feature_name(w, i));
3248 object_unref(OBJECT(xc));
3251 /* Print all cpuid feature names in featureset
3253 static void listflags(FILE *f, fprintf_function print, const char **featureset)
3258 for (bit = 0; bit < 32; bit++) {
3259 if (featureset[bit]) {
3260 print(f, "%s%s", first ? "" : " ", featureset[bit]);
3266 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3267 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3269 ObjectClass *class_a = (ObjectClass *)a;
3270 ObjectClass *class_b = (ObjectClass *)b;
3271 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3272 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3273 const char *name_a, *name_b;
3275 if (cc_a->ordering != cc_b->ordering) {
3276 return cc_a->ordering - cc_b->ordering;
3278 name_a = object_class_get_name(class_a);
3279 name_b = object_class_get_name(class_b);
3280 return strcmp(name_a, name_b);
3284 static GSList *get_sorted_cpu_model_list(void)
3286 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3287 list = g_slist_sort(list, x86_cpu_list_compare);
3291 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3293 ObjectClass *oc = data;
3294 X86CPUClass *cc = X86_CPU_CLASS(oc);
3295 CPUListState *s = user_data;
3296 char *name = x86_cpu_class_get_model_name(cc);
3297 const char *desc = cc->model_description;
3298 if (!desc && cc->cpu_def) {
3299 desc = cc->cpu_def->model_id;
3302 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
3307 /* list available CPU models and flags */
3308 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
3313 .cpu_fprintf = cpu_fprintf,
3317 (*cpu_fprintf)(f, "Available CPUs:\n");
3318 list = get_sorted_cpu_model_list();
3319 g_slist_foreach(list, x86_cpu_list_entry, &s);
3322 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
3323 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3324 FeatureWordInfo *fw = &feature_word_info[i];
3326 (*cpu_fprintf)(f, " ");
3327 listflags(f, cpu_fprintf, fw->feat_names);
3328 (*cpu_fprintf)(f, "\n");
3332 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3334 ObjectClass *oc = data;
3335 X86CPUClass *cc = X86_CPU_CLASS(oc);
3336 CpuDefinitionInfoList **cpu_list = user_data;
3337 CpuDefinitionInfoList *entry;
3338 CpuDefinitionInfo *info;
3340 info = g_malloc0(sizeof(*info));
3341 info->name = x86_cpu_class_get_model_name(cc);
3342 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3343 info->has_unavailable_features = true;
3344 info->q_typename = g_strdup(object_class_get_name(oc));
3345 info->migration_safe = cc->migration_safe;
3346 info->has_migration_safe = true;
3347 info->q_static = cc->static_model;
3349 entry = g_malloc0(sizeof(*entry));
3350 entry->value = info;
3351 entry->next = *cpu_list;
3355 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
3357 CpuDefinitionInfoList *cpu_list = NULL;
3358 GSList *list = get_sorted_cpu_model_list();
3359 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
3364 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3365 bool migratable_only)
3367 FeatureWordInfo *wi = &feature_word_info[w];
3370 if (kvm_enabled()) {
3371 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
3374 } else if (hvf_enabled()) {
3375 r = hvf_get_supported_cpuid(wi->cpuid_eax,
3378 } else if (tcg_enabled()) {
3379 r = wi->tcg_features;
3383 if (migratable_only) {
3384 r &= x86_cpu_get_migratable_flags(w);
3389 static void x86_cpu_report_filtered_features(X86CPU *cpu)
3393 for (w = 0; w < FEATURE_WORDS; w++) {
3394 report_unavailable_features(w, cpu->filtered_features[w]);
3398 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
3401 for (pv = props; pv->prop; pv++) {
3405 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
3410 /* Load data from X86CPUDefinition into a X86CPU object
3412 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
3414 CPUX86State *env = &cpu->env;
3416 char host_vendor[CPUID_VENDOR_SZ + 1];
3419 /*NOTE: any property set by this function should be returned by
3420 * x86_cpu_static_props(), so static expansion of
3421 * query-cpu-model-expansion is always complete.
3424 /* CPU models only set _minimum_ values for level/xlevel: */
3425 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
3426 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
3428 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
3429 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
3430 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
3431 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
3432 for (w = 0; w < FEATURE_WORDS; w++) {
3433 env->features[w] = def->features[w];
3436 /* legacy-cache defaults to 'off' if CPU model provides cache info */
3437 cpu->legacy_cache = !def->cache_info;
3439 /* Special cases not set in the X86CPUDefinition structs: */
3440 /* TODO: in-kernel irqchip for hvf */
3441 if (kvm_enabled()) {
3442 if (!kvm_irqchip_in_kernel()) {
3443 x86_cpu_change_kvm_default("x2apic", "off");
3446 x86_cpu_apply_props(cpu, kvm_default_props);
3447 } else if (tcg_enabled()) {
3448 x86_cpu_apply_props(cpu, tcg_default_props);
3451 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
3453 /* sysenter isn't supported in compatibility mode on AMD,
3454 * syscall isn't supported in compatibility mode on Intel.
3455 * Normally we advertise the actual CPU vendor, but you can
3456 * override this using the 'vendor' property if you want to use
3457 * KVM's sysenter/syscall emulation in compatibility mode and
3458 * when doing cross vendor migration
3460 vendor = def->vendor;
3461 if (accel_uses_host_cpuid()) {
3462 uint32_t ebx = 0, ecx = 0, edx = 0;
3463 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
3464 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
3465 vendor = host_vendor;
3468 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
3472 /* Return a QDict containing keys for all properties that can be included
3473 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
3474 * must be included in the dictionary.
3476 static QDict *x86_cpu_static_props(void)
3480 static const char *props[] = {
3498 for (i = 0; props[i]; i++) {
3499 qdict_put_null(d, props[i]);
3502 for (w = 0; w < FEATURE_WORDS; w++) {
3503 FeatureWordInfo *fi = &feature_word_info[w];
3505 for (bit = 0; bit < 32; bit++) {
3506 if (!fi->feat_names[bit]) {
3509 qdict_put_null(d, fi->feat_names[bit]);
3516 /* Add an entry to @props dict, with the value for property. */
3517 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
3519 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
3522 qdict_put_obj(props, prop, value);
3525 /* Convert CPU model data from X86CPU object to a property dictionary
3526 * that can recreate exactly the same CPU model.
3528 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3530 QDict *sprops = x86_cpu_static_props();
3531 const QDictEntry *e;
3533 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3534 const char *prop = qdict_entry_key(e);
3535 x86_cpu_expand_prop(cpu, props, prop);
3539 /* Convert CPU model data from X86CPU object to a property dictionary
3540 * that can recreate exactly the same CPU model, including every
3541 * writeable QOM property.
3543 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3545 ObjectPropertyIterator iter;
3546 ObjectProperty *prop;
3548 object_property_iter_init(&iter, OBJECT(cpu));
3549 while ((prop = object_property_iter_next(&iter))) {
3550 /* skip read-only or write-only properties */
3551 if (!prop->get || !prop->set) {
3555 /* "hotplugged" is the only property that is configurable
3556 * on the command-line but will be set differently on CPUs
3557 * created using "-cpu ... -smp ..." and by CPUs created
3558 * on the fly by x86_cpu_from_model() for querying. Skip it.
3560 if (!strcmp(prop->name, "hotplugged")) {
3563 x86_cpu_expand_prop(cpu, props, prop->name);
3567 static void object_apply_props(Object *obj, QDict *props, Error **errp)
3569 const QDictEntry *prop;
3572 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
3573 object_property_set_qobject(obj, qdict_entry_value(prop),
3574 qdict_entry_key(prop), &err);
3580 error_propagate(errp, err);
3583 /* Create X86CPU object according to model+props specification */
3584 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
3590 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
3592 error_setg(&err, "CPU model '%s' not found", model);
3596 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3598 object_apply_props(OBJECT(xc), props, &err);
3604 x86_cpu_expand_features(xc, &err);
3611 error_propagate(errp, err);
3612 object_unref(OBJECT(xc));
3618 CpuModelExpansionInfo *
3619 arch_query_cpu_model_expansion(CpuModelExpansionType type,
3620 CpuModelInfo *model,
3625 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
3626 QDict *props = NULL;
3627 const char *base_name;
3629 xc = x86_cpu_from_model(model->name,
3631 qobject_to(QDict, model->props) :
3637 props = qdict_new();
3640 case CPU_MODEL_EXPANSION_TYPE_STATIC:
3641 /* Static expansion will be based on "base" only */
3643 x86_cpu_to_dict(xc, props);
3645 case CPU_MODEL_EXPANSION_TYPE_FULL:
3646 /* As we don't return every single property, full expansion needs
3647 * to keep the original model name+props, and add extra
3648 * properties on top of that.
3650 base_name = model->name;
3651 x86_cpu_to_dict_full(xc, props);
3654 error_setg(&err, "Unsupportted expansion type");
3659 props = qdict_new();
3661 x86_cpu_to_dict(xc, props);
3663 ret->model = g_new0(CpuModelInfo, 1);
3664 ret->model->name = g_strdup(base_name);
3665 ret->model->props = QOBJECT(props);
3666 ret->model->has_props = true;
3669 object_unref(OBJECT(xc));
3671 error_propagate(errp, err);
3672 qapi_free_CpuModelExpansionInfo(ret);
3678 static gchar *x86_gdb_arch_name(CPUState *cs)
3680 #ifdef TARGET_X86_64
3681 return g_strdup("i386:x86-64");
3683 return g_strdup("i386");
3687 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
3689 X86CPUDefinition *cpudef = data;
3690 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3692 xcc->cpu_def = cpudef;
3693 xcc->migration_safe = true;
3696 static void x86_register_cpudef_type(X86CPUDefinition *def)
3698 char *typename = x86_cpu_type_name(def->name);
3701 .parent = TYPE_X86_CPU,
3702 .class_init = x86_cpu_cpudef_class_init,
3706 /* AMD aliases are handled at runtime based on CPUID vendor, so
3707 * they shouldn't be set on the CPU model table.
3709 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
3710 /* catch mistakes instead of silently truncating model_id when too long */
3711 assert(def->model_id && strlen(def->model_id) <= 48);
3718 #if !defined(CONFIG_USER_ONLY)
3720 void cpu_clear_apic_feature(CPUX86State *env)
3722 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
3725 #endif /* !CONFIG_USER_ONLY */
3727 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
3728 uint32_t *eax, uint32_t *ebx,
3729 uint32_t *ecx, uint32_t *edx)
3731 X86CPU *cpu = x86_env_get_cpu(env);
3732 CPUState *cs = CPU(cpu);
3733 uint32_t pkg_offset;
3735 uint32_t signature[3];
3737 /* Calculate & apply limits for different index ranges */
3738 if (index >= 0xC0000000) {
3739 limit = env->cpuid_xlevel2;
3740 } else if (index >= 0x80000000) {
3741 limit = env->cpuid_xlevel;
3742 } else if (index >= 0x40000000) {
3745 limit = env->cpuid_level;
3748 if (index > limit) {
3749 /* Intel documentation states that invalid EAX input will
3750 * return the same information as EAX=cpuid_level
3751 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
3753 index = env->cpuid_level;
3758 *eax = env->cpuid_level;
3759 *ebx = env->cpuid_vendor1;
3760 *edx = env->cpuid_vendor2;
3761 *ecx = env->cpuid_vendor3;
3764 *eax = env->cpuid_version;
3765 *ebx = (cpu->apic_id << 24) |
3766 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
3767 *ecx = env->features[FEAT_1_ECX];
3768 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
3769 *ecx |= CPUID_EXT_OSXSAVE;
3771 *edx = env->features[FEAT_1_EDX];
3772 if (cs->nr_cores * cs->nr_threads > 1) {
3773 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
3778 /* cache info: needed for Pentium Pro compatibility */
3779 if (cpu->cache_info_passthrough) {
3780 host_cpuid(index, 0, eax, ebx, ecx, edx);
3783 *eax = 1; /* Number of CPUID[EAX=2] calls required */
3785 if (!cpu->enable_l3_cache) {
3788 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
3790 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
3791 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
3792 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
3795 /* cache info: needed for Core compatibility */
3796 if (cpu->cache_info_passthrough) {
3797 host_cpuid(index, count, eax, ebx, ecx, edx);
3798 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
3799 *eax &= ~0xFC000000;
3800 if ((*eax & 31) && cs->nr_cores > 1) {
3801 *eax |= (cs->nr_cores - 1) << 26;
3806 case 0: /* L1 dcache info */
3807 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
3809 eax, ebx, ecx, edx);
3811 case 1: /* L1 icache info */
3812 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
3814 eax, ebx, ecx, edx);
3816 case 2: /* L2 cache info */
3817 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
3818 cs->nr_threads, cs->nr_cores,
3819 eax, ebx, ecx, edx);
3821 case 3: /* L3 cache info */
3822 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3823 if (cpu->enable_l3_cache) {
3824 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
3825 (1 << pkg_offset), cs->nr_cores,
3826 eax, ebx, ecx, edx);
3830 default: /* end of info */
3831 *eax = *ebx = *ecx = *edx = 0;
3837 /* mwait info: needed for Core compatibility */
3838 *eax = 0; /* Smallest monitor-line size in bytes */
3839 *ebx = 0; /* Largest monitor-line size in bytes */
3840 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
3844 /* Thermal and Power Leaf */
3845 *eax = env->features[FEAT_6_EAX];
3851 /* Structured Extended Feature Flags Enumeration Leaf */
3853 *eax = 0; /* Maximum ECX value for sub-leaves */
3854 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
3855 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
3856 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
3857 *ecx |= CPUID_7_0_ECX_OSPKE;
3859 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
3868 /* Direct Cache Access Information Leaf */
3869 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
3875 /* Architectural Performance Monitoring Leaf */
3876 if (kvm_enabled() && cpu->enable_pmu) {
3877 KVMState *s = cs->kvm_state;
3879 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
3880 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
3881 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
3882 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
3883 } else if (hvf_enabled() && cpu->enable_pmu) {
3884 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
3885 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
3886 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
3887 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
3896 /* Extended Topology Enumeration Leaf */
3897 if (!cpu->enable_cpuid_0xb) {
3898 *eax = *ebx = *ecx = *edx = 0;
3902 *ecx = count & 0xff;
3903 *edx = cpu->apic_id;
3907 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
3908 *ebx = cs->nr_threads;
3909 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
3912 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3913 *ebx = cs->nr_cores * cs->nr_threads;
3914 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
3919 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
3922 assert(!(*eax & ~0x1f));
3923 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
3926 /* Processor Extended State */
3931 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3936 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
3937 *eax = env->features[FEAT_XSAVE_COMP_LO];
3938 *edx = env->features[FEAT_XSAVE_COMP_HI];
3940 } else if (count == 1) {
3941 *eax = env->features[FEAT_XSAVE];
3942 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
3943 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
3944 const ExtSaveArea *esa = &x86_ext_save_areas[count];
3952 /* Intel Processor Trace Enumeration */
3957 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
3963 *eax = INTEL_PT_MAX_SUBLEAF;
3964 *ebx = INTEL_PT_MINIMAL_EBX;
3965 *ecx = INTEL_PT_MINIMAL_ECX;
3966 } else if (count == 1) {
3967 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
3968 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
3974 * CPUID code in kvm_arch_init_vcpu() ignores stuff
3975 * set here, but we restrict to TCG none the less.
3977 if (tcg_enabled() && cpu->expose_tcg) {
3978 memcpy(signature, "TCGTCGTCGTCG", 12);
3980 *ebx = signature[0];
3981 *ecx = signature[1];
3982 *edx = signature[2];
3997 *eax = env->cpuid_xlevel;
3998 *ebx = env->cpuid_vendor1;
3999 *edx = env->cpuid_vendor2;
4000 *ecx = env->cpuid_vendor3;
4003 *eax = env->cpuid_version;
4005 *ecx = env->features[FEAT_8000_0001_ECX];
4006 *edx = env->features[FEAT_8000_0001_EDX];
4008 /* The Linux kernel checks for the CMPLegacy bit and
4009 * discards multiple thread information if it is set.
4010 * So don't set it here for Intel to make Linux guests happy.
4012 if (cs->nr_cores * cs->nr_threads > 1) {
4013 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
4014 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
4015 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
4016 *ecx |= 1 << 1; /* CmpLegacy bit */
4023 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
4024 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
4025 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
4026 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
4029 /* cache info (L1 cache) */
4030 if (cpu->cache_info_passthrough) {
4031 host_cpuid(index, 0, eax, ebx, ecx, edx);
4034 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
4035 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
4036 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
4037 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
4038 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
4039 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
4042 /* cache info (L2 cache) */
4043 if (cpu->cache_info_passthrough) {
4044 host_cpuid(index, 0, eax, ebx, ecx, edx);
4047 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
4048 (L2_DTLB_2M_ENTRIES << 16) | \
4049 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
4050 (L2_ITLB_2M_ENTRIES);
4051 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
4052 (L2_DTLB_4K_ENTRIES << 16) | \
4053 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
4054 (L2_ITLB_4K_ENTRIES);
4055 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
4056 cpu->enable_l3_cache ?
4057 env->cache_info_amd.l3_cache : NULL,
4064 *edx = env->features[FEAT_8000_0007_EDX];
4067 /* virtual & phys address size in low 2 bytes. */
4068 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4069 /* 64 bit processor */
4070 *eax = cpu->phys_bits; /* configurable physical bits */
4071 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
4072 *eax |= 0x00003900; /* 57 bits virtual */
4074 *eax |= 0x00003000; /* 48 bits virtual */
4077 *eax = cpu->phys_bits;
4079 *ebx = env->features[FEAT_8000_0008_EBX];
4082 if (cs->nr_cores * cs->nr_threads > 1) {
4083 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
4087 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4088 *eax = 0x00000001; /* SVM Revision */
4089 *ebx = 0x00000010; /* nr of ASIDs */
4091 *edx = env->features[FEAT_SVM]; /* optional features */
4102 case 0: /* L1 dcache info */
4103 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
4104 eax, ebx, ecx, edx);
4106 case 1: /* L1 icache info */
4107 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
4108 eax, ebx, ecx, edx);
4110 case 2: /* L2 cache info */
4111 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
4112 eax, ebx, ecx, edx);
4114 case 3: /* L3 cache info */
4115 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
4116 eax, ebx, ecx, edx);
4118 default: /* end of info */
4119 *eax = *ebx = *ecx = *edx = 0;
4124 *eax = env->cpuid_xlevel2;
4130 /* Support for VIA CPU's CPUID instruction */
4131 *eax = env->cpuid_version;
4134 *edx = env->features[FEAT_C000_0001_EDX];
4139 /* Reserved for the future, and now filled with zero */
4146 *eax = sev_enabled() ? 0x2 : 0;
4147 *ebx = sev_get_cbit_position();
4148 *ebx |= sev_get_reduced_phys_bits() << 6;
4153 /* reserved values: zero */
4162 /* CPUClass::reset() */
4163 static void x86_cpu_reset(CPUState *s)
4165 X86CPU *cpu = X86_CPU(s);
4166 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4167 CPUX86State *env = &cpu->env;
4172 xcc->parent_reset(s);
4174 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4176 env->old_exception = -1;
4178 /* init to reset state */
4180 env->hflags2 |= HF2_GIF_MASK;
4182 cpu_x86_update_cr0(env, 0x60000010);
4183 env->a20_mask = ~0x0;
4184 env->smbase = 0x30000;
4185 env->msr_smi_count = 0;
4187 env->idt.limit = 0xffff;
4188 env->gdt.limit = 0xffff;
4189 env->ldt.limit = 0xffff;
4190 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4191 env->tr.limit = 0xffff;
4192 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4194 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4195 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4196 DESC_R_MASK | DESC_A_MASK);
4197 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4198 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4200 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4201 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4203 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4204 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4206 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4207 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4209 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4210 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4214 env->regs[R_EDX] = env->cpuid_version;
4219 for (i = 0; i < 8; i++) {
4222 cpu_set_fpuc(env, 0x37f);
4224 env->mxcsr = 0x1f80;
4225 /* All units are in INIT state. */
4228 env->pat = 0x0007040600070406ULL;
4229 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4231 memset(env->dr, 0, sizeof(env->dr));
4232 env->dr[6] = DR6_FIXED_1;
4233 env->dr[7] = DR7_FIXED_1;
4234 cpu_breakpoint_remove_all(s, BP_CPU);
4235 cpu_watchpoint_remove_all(s, BP_CPU);
4238 xcr0 = XSTATE_FP_MASK;
4240 #ifdef CONFIG_USER_ONLY
4241 /* Enable all the features for user-mode. */
4242 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4243 xcr0 |= XSTATE_SSE_MASK;
4245 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4246 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4247 if (env->features[esa->feature] & esa->bits) {
4252 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
4253 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
4255 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
4256 cr4 |= CR4_FSGSBASE_MASK;
4261 cpu_x86_update_cr4(env, cr4);
4264 * SDM 11.11.5 requires:
4265 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4266 * - IA32_MTRR_PHYSMASKn.V = 0
4267 * All other bits are undefined. For simplification, zero it all.
4269 env->mtrr_deftype = 0;
4270 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
4271 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
4273 env->interrupt_injected = -1;
4274 env->exception_injected = -1;
4275 env->nmi_injected = false;
4276 #if !defined(CONFIG_USER_ONLY)
4277 /* We hard-wire the BSP to the first CPU. */
4278 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
4280 s->halted = !cpu_is_bsp(cpu);
4282 if (kvm_enabled()) {
4283 kvm_arch_reset_vcpu(cpu);
4285 else if (hvf_enabled()) {
4291 #ifndef CONFIG_USER_ONLY
4292 bool cpu_is_bsp(X86CPU *cpu)
4294 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
4297 /* TODO: remove me, when reset over QOM tree is implemented */
4298 static void x86_cpu_machine_reset_cb(void *opaque)
4300 X86CPU *cpu = opaque;
4301 cpu_reset(CPU(cpu));
4305 static void mce_init(X86CPU *cpu)
4307 CPUX86State *cenv = &cpu->env;
4310 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
4311 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
4312 (CPUID_MCE | CPUID_MCA)) {
4313 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
4314 (cpu->enable_lmce ? MCG_LMCE_P : 0);
4315 cenv->mcg_ctl = ~(uint64_t)0;
4316 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
4317 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
4322 #ifndef CONFIG_USER_ONLY
4323 APICCommonClass *apic_get_class(void)
4325 const char *apic_type = "apic";
4327 /* TODO: in-kernel irqchip for hvf */
4328 if (kvm_apic_in_kernel()) {
4329 apic_type = "kvm-apic";
4330 } else if (xen_enabled()) {
4331 apic_type = "xen-apic";
4334 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
4337 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
4339 APICCommonState *apic;
4340 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
4342 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
4344 object_property_add_child(OBJECT(cpu), "lapic",
4345 OBJECT(cpu->apic_state), &error_abort);
4346 object_unref(OBJECT(cpu->apic_state));
4348 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
4349 /* TODO: convert to link<> */
4350 apic = APIC_COMMON(cpu->apic_state);
4352 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
4355 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4357 APICCommonState *apic;
4358 static bool apic_mmio_map_once;
4360 if (cpu->apic_state == NULL) {
4363 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
4366 /* Map APIC MMIO area */
4367 apic = APIC_COMMON(cpu->apic_state);
4368 if (!apic_mmio_map_once) {
4369 memory_region_add_subregion_overlap(get_system_memory(),
4371 MSR_IA32_APICBASE_BASE,
4374 apic_mmio_map_once = true;
4378 static void x86_cpu_machine_done(Notifier *n, void *unused)
4380 X86CPU *cpu = container_of(n, X86CPU, machine_done);
4381 MemoryRegion *smram =
4382 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
4385 cpu->smram = g_new(MemoryRegion, 1);
4386 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
4387 smram, 0, 1ull << 32);
4388 memory_region_set_enabled(cpu->smram, true);
4389 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
4393 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4398 /* Note: Only safe for use on x86(-64) hosts */
4399 static uint32_t x86_host_phys_bits(void)
4402 uint32_t host_phys_bits;
4404 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
4405 if (eax >= 0x80000008) {
4406 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
4407 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4408 * at 23:16 that can specify a maximum physical address bits for
4409 * the guest that can override this value; but I've not seen
4410 * anything with that set.
4412 host_phys_bits = eax & 0xff;
4414 /* It's an odd 64 bit machine that doesn't have the leaf for
4415 * physical address bits; fall back to 36 that's most older
4418 host_phys_bits = 36;
4421 return host_phys_bits;
4424 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
4431 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4432 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
4434 CPUX86State *env = &cpu->env;
4435 FeatureWordInfo *fi = &feature_word_info[w];
4436 uint32_t eax = fi->cpuid_eax;
4437 uint32_t region = eax & 0xF0000000;
4439 if (!env->features[w]) {
4445 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
4448 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
4451 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
4456 /* Calculate XSAVE components based on the configured CPU feature flags */
4457 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
4459 CPUX86State *env = &cpu->env;
4463 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4468 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4469 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4470 if (env->features[esa->feature] & esa->bits) {
4471 mask |= (1ULL << i);
4475 env->features[FEAT_XSAVE_COMP_LO] = mask;
4476 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
4479 /***** Steps involved on loading and filtering CPUID data
4481 * When initializing and realizing a CPU object, the steps
4482 * involved in setting up CPUID data are:
4484 * 1) Loading CPU model definition (X86CPUDefinition). This is
4485 * implemented by x86_cpu_load_def() and should be completely
4486 * transparent, as it is done automatically by instance_init.
4487 * No code should need to look at X86CPUDefinition structs
4488 * outside instance_init.
4490 * 2) CPU expansion. This is done by realize before CPUID
4491 * filtering, and will make sure host/accelerator data is
4492 * loaded for CPU models that depend on host capabilities
4493 * (e.g. "host"). Done by x86_cpu_expand_features().
4495 * 3) CPUID filtering. This initializes extra data related to
4496 * CPUID, and checks if the host supports all capabilities
4497 * required by the CPU. Runnability of a CPU model is
4498 * determined at this step. Done by x86_cpu_filter_features().
4500 * Some operations don't require all steps to be performed.
4503 * - CPU instance creation (instance_init) will run only CPU
4504 * model loading. CPU expansion can't run at instance_init-time
4505 * because host/accelerator data may be not available yet.
4506 * - CPU realization will perform both CPU model expansion and CPUID
4507 * filtering, and return an error in case one of them fails.
4508 * - query-cpu-definitions needs to run all 3 steps. It needs
4509 * to run CPUID filtering, as the 'unavailable-features'
4510 * field is set based on the filtering results.
4511 * - The query-cpu-model-expansion QMP command only needs to run
4512 * CPU model loading and CPU expansion. It should not filter
4513 * any CPUID data based on host capabilities.
4516 /* Expand CPU configuration data, based on configured features
4517 * and host/accelerator capabilities when appropriate.
4519 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
4521 CPUX86State *env = &cpu->env;
4524 Error *local_err = NULL;
4526 /*TODO: Now cpu->max_features doesn't overwrite features
4527 * set using QOM properties, and we can convert
4528 * plus_features & minus_features to global properties
4529 * inside x86_cpu_parse_featurestr() too.
4531 if (cpu->max_features) {
4532 for (w = 0; w < FEATURE_WORDS; w++) {
4533 /* Override only features that weren't set explicitly
4537 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
4538 ~env->user_features[w] & \
4539 ~feature_word_info[w].no_autoenable_flags;
4543 for (l = plus_features; l; l = l->next) {
4544 const char *prop = l->data;
4545 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4551 for (l = minus_features; l; l = l->next) {
4552 const char *prop = l->data;
4553 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
4559 if (!kvm_enabled() || !cpu->expose_kvm) {
4560 env->features[FEAT_KVM] = 0;
4563 x86_cpu_enable_xsave_components(cpu);
4565 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
4566 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
4567 if (cpu->full_cpuid_auto_level) {
4568 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
4569 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
4570 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
4571 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
4572 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
4573 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
4574 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
4575 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
4576 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
4577 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
4578 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
4579 /* SVM requires CPUID[0x8000000A] */
4580 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4581 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
4584 /* SEV requires CPUID[0x8000001F] */
4585 if (sev_enabled()) {
4586 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
4590 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
4591 if (env->cpuid_level == UINT32_MAX) {
4592 env->cpuid_level = env->cpuid_min_level;
4594 if (env->cpuid_xlevel == UINT32_MAX) {
4595 env->cpuid_xlevel = env->cpuid_min_xlevel;
4597 if (env->cpuid_xlevel2 == UINT32_MAX) {
4598 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
4602 if (local_err != NULL) {
4603 error_propagate(errp, local_err);
4608 * Finishes initialization of CPUID data, filters CPU feature
4609 * words based on host availability of each feature.
4611 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
4613 static int x86_cpu_filter_features(X86CPU *cpu)
4615 CPUX86State *env = &cpu->env;
4619 for (w = 0; w < FEATURE_WORDS; w++) {
4620 uint32_t host_feat =
4621 x86_cpu_get_supported_feature_word(w, false);
4622 uint32_t requested_features = env->features[w];
4623 env->features[w] &= host_feat;
4624 cpu->filtered_features[w] = requested_features & ~env->features[w];
4625 if (cpu->filtered_features[w]) {
4630 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
4632 KVMState *s = CPU(cpu)->kvm_state;
4633 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
4634 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
4635 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
4636 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
4637 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
4640 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
4641 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
4642 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
4643 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
4644 INTEL_PT_ADDR_RANGES_NUM) ||
4645 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
4646 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
4647 (ecx_0 & INTEL_PT_IP_LIP)) {
4649 * Processor Trace capabilities aren't configurable, so if the
4650 * host can't emulate the capabilities we report on
4651 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
4653 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
4654 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
4662 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
4663 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
4664 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
4665 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
4666 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
4667 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
4668 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
4670 CPUState *cs = CPU(dev);
4671 X86CPU *cpu = X86_CPU(dev);
4672 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4673 CPUX86State *env = &cpu->env;
4674 Error *local_err = NULL;
4675 static bool ht_warned;
4677 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
4678 char *name = x86_cpu_class_get_model_name(xcc);
4679 error_setg(&local_err, "CPU model '%s' requires KVM", name);
4684 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
4685 error_setg(errp, "apic-id property was not initialized properly");
4689 x86_cpu_expand_features(cpu, &local_err);
4694 if (x86_cpu_filter_features(cpu) &&
4695 (cpu->check_cpuid || cpu->enforce_cpuid)) {
4696 x86_cpu_report_filtered_features(cpu);
4697 if (cpu->enforce_cpuid) {
4698 error_setg(&local_err,
4699 accel_uses_host_cpuid() ?
4700 "Host doesn't support requested features" :
4701 "TCG doesn't support requested features");
4706 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
4709 if (IS_AMD_CPU(env)) {
4710 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
4711 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
4712 & CPUID_EXT2_AMD_ALIASES);
4715 /* For 64bit systems think about the number of physical bits to present.
4716 * ideally this should be the same as the host; anything other than matching
4717 * the host can cause incorrect guest behaviour.
4718 * QEMU used to pick the magic value of 40 bits that corresponds to
4719 * consumer AMD devices but nothing else.
4721 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4722 if (accel_uses_host_cpuid()) {
4723 uint32_t host_phys_bits = x86_host_phys_bits();
4726 if (cpu->host_phys_bits) {
4727 /* The user asked for us to use the host physical bits */
4728 cpu->phys_bits = host_phys_bits;
4731 /* Print a warning if the user set it to a value that's not the
4734 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
4736 warn_report("Host physical bits (%u)"
4737 " does not match phys-bits property (%u)",
4738 host_phys_bits, cpu->phys_bits);
4742 if (cpu->phys_bits &&
4743 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
4744 cpu->phys_bits < 32)) {
4745 error_setg(errp, "phys-bits should be between 32 and %u "
4747 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
4751 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
4752 error_setg(errp, "TCG only supports phys-bits=%u",
4753 TCG_PHYS_ADDR_BITS);
4757 /* 0 means it was not explicitly set by the user (or by machine
4758 * compat_props or by the host code above). In this case, the default
4759 * is the value used by TCG (40).
4761 if (cpu->phys_bits == 0) {
4762 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
4765 /* For 32 bit systems don't use the user set value, but keep
4766 * phys_bits consistent with what we tell the guest.
4768 if (cpu->phys_bits != 0) {
4769 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
4773 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
4774 cpu->phys_bits = 36;
4776 cpu->phys_bits = 32;
4780 /* Cache information initialization */
4781 if (!cpu->legacy_cache) {
4782 if (!xcc->cpu_def || !xcc->cpu_def->cache_info) {
4783 char *name = x86_cpu_class_get_model_name(xcc);
4785 "CPU model '%s' doesn't support legacy-cache=off", name);
4789 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
4790 *xcc->cpu_def->cache_info;
4792 /* Build legacy cache information */
4793 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
4794 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
4795 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
4796 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
4798 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
4799 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
4800 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
4801 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
4803 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
4804 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
4805 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
4806 env->cache_info_amd.l3_cache = &legacy_l3_cache;
4810 cpu_exec_realizefn(cs, &local_err);
4811 if (local_err != NULL) {
4812 error_propagate(errp, local_err);
4816 #ifndef CONFIG_USER_ONLY
4817 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
4819 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
4820 x86_cpu_apic_create(cpu, &local_err);
4821 if (local_err != NULL) {
4829 #ifndef CONFIG_USER_ONLY
4830 if (tcg_enabled()) {
4831 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
4832 cpu->cpu_as_root = g_new(MemoryRegion, 1);
4834 /* Outer container... */
4835 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
4836 memory_region_set_enabled(cpu->cpu_as_root, true);
4838 /* ... with two regions inside: normal system memory with low
4841 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
4842 get_system_memory(), 0, ~0ull);
4843 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
4844 memory_region_set_enabled(cpu->cpu_as_mem, true);
4847 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
4848 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
4850 /* ... SMRAM with higher priority, linked from /machine/smram. */
4851 cpu->machine_done.notify = x86_cpu_machine_done;
4852 qemu_add_machine_init_done_notifier(&cpu->machine_done);
4858 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
4859 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
4860 * based on inputs (sockets,cores,threads), it is still better to gives
4863 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
4864 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
4866 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
4867 error_report("AMD CPU doesn't support hyperthreading. Please configure"
4868 " -smp options properly.");
4872 x86_cpu_apic_realize(cpu, &local_err);
4873 if (local_err != NULL) {
4878 xcc->parent_realize(dev, &local_err);
4881 if (local_err != NULL) {
4882 error_propagate(errp, local_err);
4887 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
4889 X86CPU *cpu = X86_CPU(dev);
4890 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4891 Error *local_err = NULL;
4893 #ifndef CONFIG_USER_ONLY
4894 cpu_remove_sync(CPU(dev));
4895 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
4898 if (cpu->apic_state) {
4899 object_unparent(OBJECT(cpu->apic_state));
4900 cpu->apic_state = NULL;
4903 xcc->parent_unrealize(dev, &local_err);
4904 if (local_err != NULL) {
4905 error_propagate(errp, local_err);
4910 typedef struct BitProperty {
4915 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
4916 void *opaque, Error **errp)
4918 X86CPU *cpu = X86_CPU(obj);
4919 BitProperty *fp = opaque;
4920 uint32_t f = cpu->env.features[fp->w];
4921 bool value = (f & fp->mask) == fp->mask;
4922 visit_type_bool(v, name, &value, errp);
4925 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
4926 void *opaque, Error **errp)
4928 DeviceState *dev = DEVICE(obj);
4929 X86CPU *cpu = X86_CPU(obj);
4930 BitProperty *fp = opaque;
4931 Error *local_err = NULL;
4934 if (dev->realized) {
4935 qdev_prop_set_after_realize(dev, name, errp);
4939 visit_type_bool(v, name, &value, &local_err);
4941 error_propagate(errp, local_err);
4946 cpu->env.features[fp->w] |= fp->mask;
4948 cpu->env.features[fp->w] &= ~fp->mask;
4950 cpu->env.user_features[fp->w] |= fp->mask;
4953 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
4956 BitProperty *prop = opaque;
4960 /* Register a boolean property to get/set a single bit in a uint32_t field.
4962 * The same property name can be registered multiple times to make it affect
4963 * multiple bits in the same FeatureWord. In that case, the getter will return
4964 * true only if all bits are set.
4966 static void x86_cpu_register_bit_prop(X86CPU *cpu,
4967 const char *prop_name,
4973 uint32_t mask = (1UL << bitnr);
4975 op = object_property_find(OBJECT(cpu), prop_name, NULL);
4981 fp = g_new0(BitProperty, 1);
4984 object_property_add(OBJECT(cpu), prop_name, "bool",
4985 x86_cpu_get_bit_prop,
4986 x86_cpu_set_bit_prop,
4987 x86_cpu_release_bit_prop, fp, &error_abort);
4991 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
4995 FeatureWordInfo *fi = &feature_word_info[w];
4996 const char *name = fi->feat_names[bitnr];
5002 /* Property names should use "-" instead of "_".
5003 * Old names containing underscores are registered as aliases
5004 * using object_property_add_alias()
5006 assert(!strchr(name, '_'));
5007 /* aliases don't use "|" delimiters anymore, they are registered
5008 * manually using object_property_add_alias() */
5009 assert(!strchr(name, '|'));
5010 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
5013 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
5015 X86CPU *cpu = X86_CPU(cs);
5016 CPUX86State *env = &cpu->env;
5017 GuestPanicInformation *panic_info = NULL;
5019 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
5020 panic_info = g_malloc0(sizeof(GuestPanicInformation));
5022 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
5024 assert(HV_CRASH_PARAMS >= 5);
5025 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
5026 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
5027 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
5028 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
5029 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
5034 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
5035 const char *name, void *opaque,
5038 CPUState *cs = CPU(obj);
5039 GuestPanicInformation *panic_info;
5041 if (!cs->crash_occurred) {
5042 error_setg(errp, "No crash occured");
5046 panic_info = x86_cpu_get_crash_info(cs);
5047 if (panic_info == NULL) {
5048 error_setg(errp, "No crash information");
5052 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
5054 qapi_free_GuestPanicInformation(panic_info);
5057 static void x86_cpu_initfn(Object *obj)
5059 CPUState *cs = CPU(obj);
5060 X86CPU *cpu = X86_CPU(obj);
5061 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
5062 CPUX86State *env = &cpu->env;
5067 object_property_add(obj, "family", "int",
5068 x86_cpuid_version_get_family,
5069 x86_cpuid_version_set_family, NULL, NULL, NULL);
5070 object_property_add(obj, "model", "int",
5071 x86_cpuid_version_get_model,
5072 x86_cpuid_version_set_model, NULL, NULL, NULL);
5073 object_property_add(obj, "stepping", "int",
5074 x86_cpuid_version_get_stepping,
5075 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
5076 object_property_add_str(obj, "vendor",
5077 x86_cpuid_get_vendor,
5078 x86_cpuid_set_vendor, NULL);
5079 object_property_add_str(obj, "model-id",
5080 x86_cpuid_get_model_id,
5081 x86_cpuid_set_model_id, NULL);
5082 object_property_add(obj, "tsc-frequency", "int",
5083 x86_cpuid_get_tsc_freq,
5084 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
5085 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
5086 x86_cpu_get_feature_words,
5087 NULL, NULL, (void *)env->features, NULL);
5088 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
5089 x86_cpu_get_feature_words,
5090 NULL, NULL, (void *)cpu->filtered_features, NULL);
5092 object_property_add(obj, "crash-information", "GuestPanicInformation",
5093 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
5095 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
5097 for (w = 0; w < FEATURE_WORDS; w++) {
5100 for (bitnr = 0; bitnr < 32; bitnr++) {
5101 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
5105 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
5106 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
5107 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
5108 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
5109 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
5110 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
5111 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
5113 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
5114 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
5115 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
5116 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5117 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5118 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5119 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5120 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5121 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5122 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5123 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5124 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5125 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5126 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5127 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5128 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5129 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5130 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5131 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5132 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5133 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5136 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
5140 static int64_t x86_cpu_get_arch_id(CPUState *cs)
5142 X86CPU *cpu = X86_CPU(cs);
5144 return cpu->apic_id;
5147 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
5149 X86CPU *cpu = X86_CPU(cs);
5151 return cpu->env.cr[0] & CR0_PG_MASK;
5154 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5156 X86CPU *cpu = X86_CPU(cs);
5158 cpu->env.eip = value;
5161 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5163 X86CPU *cpu = X86_CPU(cs);
5165 cpu->env.eip = tb->pc - tb->cs_base;
5168 static bool x86_cpu_has_work(CPUState *cs)
5170 X86CPU *cpu = X86_CPU(cs);
5171 CPUX86State *env = &cpu->env;
5173 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
5174 CPU_INTERRUPT_POLL)) &&
5175 (env->eflags & IF_MASK)) ||
5176 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
5177 CPU_INTERRUPT_INIT |
5178 CPU_INTERRUPT_SIPI |
5179 CPU_INTERRUPT_MCE)) ||
5180 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
5181 !(env->hflags & HF_SMM_MASK));
5184 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
5186 X86CPU *cpu = X86_CPU(cs);
5187 CPUX86State *env = &cpu->env;
5189 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
5190 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
5191 : bfd_mach_i386_i8086);
5192 info->print_insn = print_insn_i386;
5194 info->cap_arch = CS_ARCH_X86;
5195 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
5196 : env->hflags & HF_CS32_MASK ? CS_MODE_32
5198 info->cap_insn_unit = 1;
5199 info->cap_insn_split = 8;
5202 void x86_update_hflags(CPUX86State *env)
5205 #define HFLAG_COPY_MASK \
5206 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5207 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5208 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5209 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5211 hflags = env->hflags & HFLAG_COPY_MASK;
5212 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
5213 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
5214 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
5215 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
5216 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
5218 if (env->cr[4] & CR4_OSFXSR_MASK) {
5219 hflags |= HF_OSFXSR_MASK;
5222 if (env->efer & MSR_EFER_LMA) {
5223 hflags |= HF_LMA_MASK;
5226 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
5227 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
5229 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
5230 (DESC_B_SHIFT - HF_CS32_SHIFT);
5231 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
5232 (DESC_B_SHIFT - HF_SS32_SHIFT);
5233 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
5234 !(hflags & HF_CS32_MASK)) {
5235 hflags |= HF_ADDSEG_MASK;
5237 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
5238 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
5241 env->hflags = hflags;
5244 static Property x86_cpu_properties[] = {
5245 #ifdef CONFIG_USER_ONLY
5246 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5247 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
5248 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
5249 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
5250 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
5252 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
5253 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
5254 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
5255 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
5257 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
5258 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
5259 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
5260 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
5261 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
5262 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
5263 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
5264 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
5265 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
5266 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
5267 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
5268 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
5269 DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false),
5270 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false),
5271 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
5272 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
5273 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
5274 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
5275 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
5276 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
5277 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
5278 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
5279 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
5280 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
5281 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
5282 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
5283 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
5284 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
5285 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
5286 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
5287 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
5288 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
5290 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
5291 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
5293 * lecacy_cache defaults to true unless the CPU model provides its
5294 * own cache information (see x86_cpu_load_def()).
5296 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
5299 * From "Requirements for Implementing the Microsoft
5300 * Hypervisor Interface":
5301 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5303 * "Starting with Windows Server 2012 and Windows 8, if
5304 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5305 * the hypervisor imposes no specific limit to the number of VPs.
5306 * In this case, Windows Server 2012 guest VMs may use more than
5307 * 64 VPs, up to the maximum supported number of processors applicable
5308 * to the specific Windows version being used."
5310 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
5311 DEFINE_PROP_END_OF_LIST()
5314 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
5316 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5317 CPUClass *cc = CPU_CLASS(oc);
5318 DeviceClass *dc = DEVICE_CLASS(oc);
5320 device_class_set_parent_realize(dc, x86_cpu_realizefn,
5321 &xcc->parent_realize);
5322 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
5323 &xcc->parent_unrealize);
5324 dc->props = x86_cpu_properties;
5326 xcc->parent_reset = cc->reset;
5327 cc->reset = x86_cpu_reset;
5328 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
5330 cc->class_by_name = x86_cpu_class_by_name;
5331 cc->parse_features = x86_cpu_parse_featurestr;
5332 cc->has_work = x86_cpu_has_work;
5334 cc->do_interrupt = x86_cpu_do_interrupt;
5335 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
5337 cc->dump_state = x86_cpu_dump_state;
5338 cc->get_crash_info = x86_cpu_get_crash_info;
5339 cc->set_pc = x86_cpu_set_pc;
5340 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
5341 cc->gdb_read_register = x86_cpu_gdb_read_register;
5342 cc->gdb_write_register = x86_cpu_gdb_write_register;
5343 cc->get_arch_id = x86_cpu_get_arch_id;
5344 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
5345 #ifdef CONFIG_USER_ONLY
5346 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
5348 cc->asidx_from_attrs = x86_asidx_from_attrs;
5349 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
5350 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
5351 cc->write_elf64_note = x86_cpu_write_elf64_note;
5352 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
5353 cc->write_elf32_note = x86_cpu_write_elf32_note;
5354 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
5355 cc->vmsd = &vmstate_x86_cpu;
5357 cc->gdb_arch_name = x86_gdb_arch_name;
5358 #ifdef TARGET_X86_64
5359 cc->gdb_core_xml_file = "i386-64bit.xml";
5360 cc->gdb_num_core_regs = 57;
5362 cc->gdb_core_xml_file = "i386-32bit.xml";
5363 cc->gdb_num_core_regs = 41;
5365 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
5366 cc->debug_excp_handler = breakpoint_handler;
5368 cc->cpu_exec_enter = x86_cpu_exec_enter;
5369 cc->cpu_exec_exit = x86_cpu_exec_exit;
5371 cc->tcg_initialize = tcg_x86_init;
5373 cc->disas_set_info = x86_disas_set_info;
5375 dc->user_creatable = true;
5378 static const TypeInfo x86_cpu_type_info = {
5379 .name = TYPE_X86_CPU,
5381 .instance_size = sizeof(X86CPU),
5382 .instance_init = x86_cpu_initfn,
5384 .class_size = sizeof(X86CPUClass),
5385 .class_init = x86_cpu_common_class_init,
5389 /* "base" CPU model, used by query-cpu-model-expansion */
5390 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
5392 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5394 xcc->static_model = true;
5395 xcc->migration_safe = true;
5396 xcc->model_description = "base CPU model type with no features enabled";
5400 static const TypeInfo x86_base_cpu_type_info = {
5401 .name = X86_CPU_TYPE_NAME("base"),
5402 .parent = TYPE_X86_CPU,
5403 .class_init = x86_cpu_base_class_init,
5406 static void x86_cpu_register_types(void)
5410 type_register_static(&x86_cpu_type_info);
5411 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
5412 x86_register_cpudef_type(&builtin_x86_defs[i]);
5414 type_register_static(&max_x86_cpu_type_info);
5415 type_register_static(&x86_base_cpu_type_info);
5416 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
5417 type_register_static(&host_x86_cpu_type_info);
5421 type_init(x86_cpu_register_types)