2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu/cutils.h"
23 #include "qemu/bitops.h"
26 #include "exec/exec-all.h"
27 #include "sysemu/kvm.h"
28 #include "sysemu/hvf.h"
29 #include "sysemu/cpus.h"
33 #include "qemu/error-report.h"
34 #include "qemu/option.h"
35 #include "qemu/config-file.h"
36 #include "qapi/error.h"
37 #include "qapi/qapi-visit-misc.h"
38 #include "qapi/qapi-visit-run-state.h"
39 #include "qapi/qmp/qdict.h"
40 #include "qapi/qmp/qerror.h"
41 #include "qapi/visitor.h"
42 #include "qom/qom-qobject.h"
43 #include "sysemu/arch_init.h"
45 #include "standard-headers/asm-x86/kvm_para.h"
47 #include "sysemu/sysemu.h"
48 #include "hw/qdev-properties.h"
49 #include "hw/i386/topology.h"
50 #ifndef CONFIG_USER_ONLY
51 #include "exec/address-spaces.h"
53 #include "hw/xen/xen.h"
54 #include "hw/i386/apic_internal.h"
57 #include "disas/capstone.h"
59 /* Helpers for building CPUID[2] descriptors: */
61 struct CPUID2CacheDescriptorInfo {
70 * Known CPUID 2 cache descriptors.
71 * From Intel SDM Volume 2A, CPUID instruction
73 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
74 [0x06] = { .level = 1, .type = ICACHE, .size = 8 * KiB,
75 .associativity = 4, .line_size = 32, },
76 [0x08] = { .level = 1, .type = ICACHE, .size = 16 * KiB,
77 .associativity = 4, .line_size = 32, },
78 [0x09] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
79 .associativity = 4, .line_size = 64, },
80 [0x0A] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
81 .associativity = 2, .line_size = 32, },
82 [0x0C] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
83 .associativity = 4, .line_size = 32, },
84 [0x0D] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
85 .associativity = 4, .line_size = 64, },
86 [0x0E] = { .level = 1, .type = DCACHE, .size = 24 * KiB,
87 .associativity = 6, .line_size = 64, },
88 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
89 .associativity = 2, .line_size = 64, },
90 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
91 .associativity = 8, .line_size = 64, },
92 /* lines per sector is not supported cpuid2_cache_descriptor(),
93 * so descriptors 0x22, 0x23 are not included
95 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
96 .associativity = 16, .line_size = 64, },
97 /* lines per sector is not supported cpuid2_cache_descriptor(),
98 * so descriptors 0x25, 0x20 are not included
100 [0x2C] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
101 .associativity = 8, .line_size = 64, },
102 [0x30] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
103 .associativity = 8, .line_size = 64, },
104 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
105 .associativity = 4, .line_size = 32, },
106 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
107 .associativity = 4, .line_size = 32, },
108 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
109 .associativity = 4, .line_size = 32, },
110 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
111 .associativity = 4, .line_size = 32, },
112 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
113 .associativity = 4, .line_size = 32, },
114 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
115 .associativity = 4, .line_size = 64, },
116 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
117 .associativity = 8, .line_size = 64, },
118 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
119 .associativity = 12, .line_size = 64, },
120 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
121 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
122 .associativity = 12, .line_size = 64, },
123 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
124 .associativity = 16, .line_size = 64, },
125 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
126 .associativity = 12, .line_size = 64, },
127 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
128 .associativity = 16, .line_size = 64, },
129 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
130 .associativity = 24, .line_size = 64, },
131 [0x60] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
132 .associativity = 8, .line_size = 64, },
133 [0x66] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
134 .associativity = 4, .line_size = 64, },
135 [0x67] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
136 .associativity = 4, .line_size = 64, },
137 [0x68] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
138 .associativity = 4, .line_size = 64, },
139 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
140 .associativity = 4, .line_size = 64, },
141 /* lines per sector is not supported cpuid2_cache_descriptor(),
142 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
144 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
145 .associativity = 8, .line_size = 64, },
146 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
147 .associativity = 2, .line_size = 64, },
148 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
149 .associativity = 8, .line_size = 64, },
150 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
151 .associativity = 8, .line_size = 32, },
152 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
153 .associativity = 8, .line_size = 32, },
154 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
155 .associativity = 8, .line_size = 32, },
156 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
157 .associativity = 8, .line_size = 32, },
158 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
159 .associativity = 4, .line_size = 64, },
160 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
161 .associativity = 8, .line_size = 64, },
162 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
163 .associativity = 4, .line_size = 64, },
164 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
165 .associativity = 4, .line_size = 64, },
166 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
167 .associativity = 4, .line_size = 64, },
168 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
169 .associativity = 8, .line_size = 64, },
170 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
171 .associativity = 8, .line_size = 64, },
172 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
173 .associativity = 8, .line_size = 64, },
174 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
175 .associativity = 12, .line_size = 64, },
176 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
177 .associativity = 12, .line_size = 64, },
178 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
179 .associativity = 12, .line_size = 64, },
180 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
181 .associativity = 16, .line_size = 64, },
182 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
183 .associativity = 16, .line_size = 64, },
184 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
185 .associativity = 16, .line_size = 64, },
186 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
187 .associativity = 24, .line_size = 64, },
188 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
189 .associativity = 24, .line_size = 64, },
190 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
191 .associativity = 24, .line_size = 64, },
195 * "CPUID leaf 2 does not report cache descriptor information,
196 * use CPUID leaf 4 to query cache parameters"
198 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
201 * Return a CPUID 2 cache descriptor for a given cache.
202 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
204 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
208 assert(cache->size > 0);
209 assert(cache->level > 0);
210 assert(cache->line_size > 0);
211 assert(cache->associativity > 0);
212 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
213 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
214 if (d->level == cache->level && d->type == cache->type &&
215 d->size == cache->size && d->line_size == cache->line_size &&
216 d->associativity == cache->associativity) {
221 return CACHE_DESCRIPTOR_UNAVAILABLE;
224 /* CPUID Leaf 4 constants: */
227 #define CACHE_TYPE_D 1
228 #define CACHE_TYPE_I 2
229 #define CACHE_TYPE_UNIFIED 3
231 #define CACHE_LEVEL(l) (l << 5)
233 #define CACHE_SELF_INIT_LEVEL (1 << 8)
236 #define CACHE_NO_INVD_SHARING (1 << 0)
237 #define CACHE_INCLUSIVE (1 << 1)
238 #define CACHE_COMPLEX_IDX (1 << 2)
240 /* Encode CacheType for CPUID[4].EAX */
241 #define CACHE_TYPE(t) (((t) == DCACHE) ? CACHE_TYPE_D : \
242 ((t) == ICACHE) ? CACHE_TYPE_I : \
243 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
244 0 /* Invalid value */)
247 /* Encode cache info for CPUID[4] */
248 static void encode_cache_cpuid4(CPUCacheInfo *cache,
249 int num_apic_ids, int num_cores,
250 uint32_t *eax, uint32_t *ebx,
251 uint32_t *ecx, uint32_t *edx)
253 assert(cache->size == cache->line_size * cache->associativity *
254 cache->partitions * cache->sets);
256 assert(num_apic_ids > 0);
257 *eax = CACHE_TYPE(cache->type) |
258 CACHE_LEVEL(cache->level) |
259 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
260 ((num_cores - 1) << 26) |
261 ((num_apic_ids - 1) << 14);
263 assert(cache->line_size > 0);
264 assert(cache->partitions > 0);
265 assert(cache->associativity > 0);
266 /* We don't implement fully-associative caches */
267 assert(cache->associativity < cache->sets);
268 *ebx = (cache->line_size - 1) |
269 ((cache->partitions - 1) << 12) |
270 ((cache->associativity - 1) << 22);
272 assert(cache->sets > 0);
273 *ecx = cache->sets - 1;
275 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
276 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
277 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
280 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
281 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
283 assert(cache->size % 1024 == 0);
284 assert(cache->lines_per_tag > 0);
285 assert(cache->associativity > 0);
286 assert(cache->line_size > 0);
287 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
288 (cache->lines_per_tag << 8) | (cache->line_size);
291 #define ASSOC_FULL 0xFF
293 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
294 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
304 a == ASSOC_FULL ? 0xF : \
305 0 /* invalid value */)
308 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
311 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
313 uint32_t *ecx, uint32_t *edx)
315 assert(l2->size % 1024 == 0);
316 assert(l2->associativity > 0);
317 assert(l2->lines_per_tag > 0);
318 assert(l2->line_size > 0);
319 *ecx = ((l2->size / 1024) << 16) |
320 (AMD_ENC_ASSOC(l2->associativity) << 12) |
321 (l2->lines_per_tag << 8) | (l2->line_size);
324 assert(l3->size % (512 * 1024) == 0);
325 assert(l3->associativity > 0);
326 assert(l3->lines_per_tag > 0);
327 assert(l3->line_size > 0);
328 *edx = ((l3->size / (512 * 1024)) << 18) |
329 (AMD_ENC_ASSOC(l3->associativity) << 12) |
330 (l3->lines_per_tag << 8) | (l3->line_size);
337 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
338 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
339 * Define the constants to build the cpu topology. Right now, TOPOEXT
340 * feature is enabled only on EPYC. So, these constants are based on
341 * EPYC supported configurations. We may need to handle the cases if
342 * these values change in future.
344 /* Maximum core complexes in a node */
346 /* Maximum cores in a core complex */
347 #define MAX_CORES_IN_CCX 4
348 /* Maximum cores in a node */
349 #define MAX_CORES_IN_NODE 8
350 /* Maximum nodes in a socket */
351 #define MAX_NODES_PER_SOCKET 4
354 * Figure out the number of nodes required to build this config.
355 * Max cores in a node is 8
357 static int nodes_in_socket(int nr_cores)
361 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
363 /* Hardware does not support config with 3 nodes, return 4 in that case */
364 return (nodes == 3) ? 4 : nodes;
368 * Decide the number of cores in a core complex with the given nr_cores using
369 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
370 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
371 * L3 cache is shared across all cores in a core complex. So, this will also
372 * tell us how many cores are sharing the L3 cache.
374 static int cores_in_core_complex(int nr_cores)
378 /* Check if we can fit all the cores in one core complex */
379 if (nr_cores <= MAX_CORES_IN_CCX) {
382 /* Get the number of nodes required to build this config */
383 nodes = nodes_in_socket(nr_cores);
386 * Divide the cores accros all the core complexes
387 * Return rounded up value
389 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
392 /* Encode cache info for CPUID[8000001D] */
393 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
394 uint32_t *eax, uint32_t *ebx,
395 uint32_t *ecx, uint32_t *edx)
398 assert(cache->size == cache->line_size * cache->associativity *
399 cache->partitions * cache->sets);
401 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
402 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
404 /* L3 is shared among multiple cores */
405 if (cache->level == 3) {
406 l3_cores = cores_in_core_complex(cs->nr_cores);
407 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
409 *eax |= ((cs->nr_threads - 1) << 14);
412 assert(cache->line_size > 0);
413 assert(cache->partitions > 0);
414 assert(cache->associativity > 0);
415 /* We don't implement fully-associative caches */
416 assert(cache->associativity < cache->sets);
417 *ebx = (cache->line_size - 1) |
418 ((cache->partitions - 1) << 12) |
419 ((cache->associativity - 1) << 22);
421 assert(cache->sets > 0);
422 *ecx = cache->sets - 1;
424 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
425 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
426 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
429 /* Data structure to hold the configuration info for a given core index */
430 struct core_topology {
431 /* core complex id of the current core index */
434 * Adjusted core index for this core in the topology
435 * This can be 0,1,2,3 with max 4 cores in a core complex
438 /* Node id for this core index */
440 /* Number of nodes in this config */
445 * Build the configuration closely match the EPYC hardware. Using the EPYC
446 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
447 * right now. This could change in future.
448 * nr_cores : Total number of cores in the config
449 * core_id : Core index of the current CPU
450 * topo : Data structure to hold all the config info for this core index
452 static void build_core_topology(int nr_cores, int core_id,
453 struct core_topology *topo)
455 int nodes, cores_in_ccx;
457 /* First get the number of nodes required */
458 nodes = nodes_in_socket(nr_cores);
460 cores_in_ccx = cores_in_core_complex(nr_cores);
462 topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
463 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
464 topo->core_id = core_id % cores_in_ccx;
465 topo->num_nodes = nodes;
468 /* Encode cache info for CPUID[8000001E] */
469 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
470 uint32_t *eax, uint32_t *ebx,
471 uint32_t *ecx, uint32_t *edx)
473 struct core_topology topo = {0};
477 build_core_topology(cs->nr_cores, cpu->core_id, &topo);
480 * CPUID_Fn8000001E_EBX
482 * 15:8 Threads per core (The number of threads per core is
483 * Threads per core + 1)
484 * 7:0 Core id (see bit decoding below)
494 if (cs->nr_threads - 1) {
495 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
496 (topo.ccx_id << 2) | topo.core_id;
498 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
501 * CPUID_Fn8000001E_ECX
503 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
504 * 7:0 Node id (see bit decoding below)
508 if (topo.num_nodes <= 4) {
509 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
513 * Node id fix up. Actual hardware supports up to 4 nodes. But with
514 * more than 32 cores, we may end up with more than 4 nodes.
515 * Node id is a combination of socket id and node id. Only requirement
516 * here is that this number should be unique accross the system.
517 * Shift the socket id to accommodate more nodes. We dont expect both
518 * socket id and node id to be big number at the same time. This is not
519 * an ideal config but we need to to support it. Max nodes we can have
520 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
521 * 5 bits for nodes. Find the left most set bit to represent the total
522 * number of nodes. find_last_bit returns last set bit(0 based). Left
523 * shift(+1) the socket id to represent all the nodes.
525 nodes = topo.num_nodes - 1;
526 shift = find_last_bit(&nodes, 8);
527 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
534 * Definitions of the hardcoded cache entries we expose:
535 * These are legacy cache values. If there is a need to change any
536 * of these values please use builtin_x86_defs
540 static CPUCacheInfo legacy_l1d_cache = {
549 .no_invd_sharing = true,
552 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
553 static CPUCacheInfo legacy_l1d_cache_amd = {
563 .no_invd_sharing = true,
566 /* L1 instruction cache: */
567 static CPUCacheInfo legacy_l1i_cache = {
576 .no_invd_sharing = true,
579 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
580 static CPUCacheInfo legacy_l1i_cache_amd = {
590 .no_invd_sharing = true,
593 /* Level 2 unified cache: */
594 static CPUCacheInfo legacy_l2_cache = {
595 .type = UNIFIED_CACHE,
603 .no_invd_sharing = true,
606 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
607 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
608 .type = UNIFIED_CACHE,
616 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
617 static CPUCacheInfo legacy_l2_cache_amd = {
618 .type = UNIFIED_CACHE,
628 /* Level 3 unified cache: */
629 static CPUCacheInfo legacy_l3_cache = {
630 .type = UNIFIED_CACHE,
640 .complex_indexing = true,
643 /* TLB definitions: */
645 #define L1_DTLB_2M_ASSOC 1
646 #define L1_DTLB_2M_ENTRIES 255
647 #define L1_DTLB_4K_ASSOC 1
648 #define L1_DTLB_4K_ENTRIES 255
650 #define L1_ITLB_2M_ASSOC 1
651 #define L1_ITLB_2M_ENTRIES 255
652 #define L1_ITLB_4K_ASSOC 1
653 #define L1_ITLB_4K_ENTRIES 255
655 #define L2_DTLB_2M_ASSOC 0 /* disabled */
656 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
657 #define L2_DTLB_4K_ASSOC 4
658 #define L2_DTLB_4K_ENTRIES 512
660 #define L2_ITLB_2M_ASSOC 0 /* disabled */
661 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
662 #define L2_ITLB_4K_ASSOC 4
663 #define L2_ITLB_4K_ENTRIES 512
665 /* CPUID Leaf 0x14 constants: */
666 #define INTEL_PT_MAX_SUBLEAF 0x1
668 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
669 * MSR can be accessed;
670 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
671 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
672 * of Intel PT MSRs across warm reset;
673 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
675 #define INTEL_PT_MINIMAL_EBX 0xf
677 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
678 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
680 * bit[01]: ToPA tables can hold any number of output entries, up to the
681 * maximum allowed by the MaskOrTableOffset field of
682 * IA32_RTIT_OUTPUT_MASK_PTRS;
683 * bit[02]: Support Single-Range Output scheme;
685 #define INTEL_PT_MINIMAL_ECX 0x7
686 /* generated packets which contain IP payloads have LIP values */
687 #define INTEL_PT_IP_LIP (1 << 31)
688 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
689 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
690 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
691 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
692 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
694 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
695 uint32_t vendor2, uint32_t vendor3)
698 for (i = 0; i < 4; i++) {
699 dst[i] = vendor1 >> (8 * i);
700 dst[i + 4] = vendor2 >> (8 * i);
701 dst[i + 8] = vendor3 >> (8 * i);
703 dst[CPUID_VENDOR_SZ] = '\0';
706 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
707 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
708 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
709 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
710 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
711 CPUID_PSE36 | CPUID_FXSR)
712 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
713 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
714 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
715 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
716 CPUID_PAE | CPUID_SEP | CPUID_APIC)
718 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
719 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
720 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
721 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
722 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
723 /* partly implemented:
724 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
726 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
727 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
728 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
729 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
730 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
731 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
733 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
734 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
735 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
736 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
737 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
740 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
742 #define TCG_EXT2_X86_64_FEATURES 0
745 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
746 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
747 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
748 TCG_EXT2_X86_64_FEATURES)
749 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
750 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
751 #define TCG_EXT4_FEATURES 0
752 #define TCG_SVM_FEATURES CPUID_SVM_NPT
753 #define TCG_KVM_FEATURES 0
754 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
755 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
756 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
757 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
760 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
761 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
762 CPUID_7_0_EBX_RDSEED */
763 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
764 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
766 #define TCG_7_0_EDX_FEATURES 0
767 #define TCG_APM_FEATURES 0
768 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
769 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
771 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
773 typedef struct FeatureWordInfo {
774 /* feature flags names are taken from "Intel Processor Identification and
775 * the CPUID Instruction" and AMD's "CPUID Specification".
776 * In cases of disagreement between feature naming conventions,
777 * aliases may be added.
779 const char *feat_names[32];
780 uint32_t cpuid_eax; /* Input EAX for CPUID */
781 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
782 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
783 int cpuid_reg; /* output register (R_* constant) */
784 uint32_t tcg_features; /* Feature flags supported by TCG */
785 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
786 uint32_t migratable_flags; /* Feature flags known to be migratable */
787 /* Features that shouldn't be auto-enabled by "-cpu host" */
788 uint32_t no_autoenable_flags;
791 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
794 "fpu", "vme", "de", "pse",
795 "tsc", "msr", "pae", "mce",
796 "cx8", "apic", NULL, "sep",
797 "mtrr", "pge", "mca", "cmov",
798 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
799 NULL, "ds" /* Intel dts */, "acpi", "mmx",
800 "fxsr", "sse", "sse2", "ss",
801 "ht" /* Intel htt */, "tm", "ia64", "pbe",
803 .cpuid_eax = 1, .cpuid_reg = R_EDX,
804 .tcg_features = TCG_FEATURES,
808 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
809 "ds-cpl", "vmx", "smx", "est",
810 "tm2", "ssse3", "cid", NULL,
811 "fma", "cx16", "xtpr", "pdcm",
812 NULL, "pcid", "dca", "sse4.1",
813 "sse4.2", "x2apic", "movbe", "popcnt",
814 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
815 "avx", "f16c", "rdrand", "hypervisor",
817 .cpuid_eax = 1, .cpuid_reg = R_ECX,
818 .tcg_features = TCG_EXT_FEATURES,
820 /* Feature names that are already defined on feature_name[] but
821 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
822 * names on feat_names below. They are copied automatically
823 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
825 [FEAT_8000_0001_EDX] = {
827 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
828 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
829 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
830 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
831 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
832 "nx", NULL, "mmxext", NULL /* mmx */,
833 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
834 NULL, "lm", "3dnowext", "3dnow",
836 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
837 .tcg_features = TCG_EXT2_FEATURES,
839 [FEAT_8000_0001_ECX] = {
841 "lahf-lm", "cmp-legacy", "svm", "extapic",
842 "cr8legacy", "abm", "sse4a", "misalignsse",
843 "3dnowprefetch", "osvw", "ibs", "xop",
844 "skinit", "wdt", NULL, "lwp",
845 "fma4", "tce", NULL, "nodeid-msr",
846 NULL, "tbm", "topoext", "perfctr-core",
847 "perfctr-nb", NULL, NULL, NULL,
848 NULL, NULL, NULL, NULL,
850 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
851 .tcg_features = TCG_EXT3_FEATURES,
853 [FEAT_C000_0001_EDX] = {
855 NULL, NULL, "xstore", "xstore-en",
856 NULL, NULL, "xcrypt", "xcrypt-en",
857 "ace2", "ace2-en", "phe", "phe-en",
858 "pmm", "pmm-en", NULL, NULL,
859 NULL, NULL, NULL, NULL,
860 NULL, NULL, NULL, NULL,
861 NULL, NULL, NULL, NULL,
862 NULL, NULL, NULL, NULL,
864 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
865 .tcg_features = TCG_EXT4_FEATURES,
869 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
870 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
871 NULL, "kvm-pv-tlb-flush", NULL, NULL,
872 NULL, NULL, NULL, NULL,
873 NULL, NULL, NULL, NULL,
874 NULL, NULL, NULL, NULL,
875 "kvmclock-stable-bit", NULL, NULL, NULL,
876 NULL, NULL, NULL, NULL,
878 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
879 .tcg_features = TCG_KVM_FEATURES,
883 "kvm-hint-dedicated", NULL, NULL, NULL,
884 NULL, NULL, NULL, NULL,
885 NULL, NULL, NULL, NULL,
886 NULL, NULL, NULL, NULL,
887 NULL, NULL, NULL, NULL,
888 NULL, NULL, NULL, NULL,
889 NULL, NULL, NULL, NULL,
890 NULL, NULL, NULL, NULL,
892 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EDX,
893 .tcg_features = TCG_KVM_FEATURES,
895 * KVM hints aren't auto-enabled by -cpu host, they need to be
896 * explicitly enabled in the command-line.
898 .no_autoenable_flags = ~0U,
900 [FEAT_HYPERV_EAX] = {
902 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
903 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
904 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
905 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
906 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
907 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
908 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
910 NULL, NULL, NULL, NULL,
911 NULL, NULL, NULL, NULL,
912 NULL, NULL, NULL, NULL,
913 NULL, NULL, NULL, NULL,
915 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
917 [FEAT_HYPERV_EBX] = {
919 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
920 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
921 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
922 NULL /* hv_create_port */, NULL /* hv_connect_port */,
923 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
924 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
926 NULL, NULL, NULL, NULL,
927 NULL, NULL, NULL, NULL,
928 NULL, NULL, NULL, NULL,
929 NULL, NULL, NULL, NULL,
931 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
933 [FEAT_HYPERV_EDX] = {
935 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
936 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
937 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
939 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
940 NULL, NULL, NULL, NULL,
941 NULL, NULL, NULL, NULL,
942 NULL, NULL, NULL, NULL,
943 NULL, NULL, NULL, NULL,
944 NULL, NULL, NULL, NULL,
946 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
950 "npt", "lbrv", "svm-lock", "nrip-save",
951 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
952 NULL, NULL, "pause-filter", NULL,
953 "pfthreshold", NULL, NULL, NULL,
954 NULL, NULL, NULL, NULL,
955 NULL, NULL, NULL, NULL,
956 NULL, NULL, NULL, NULL,
957 NULL, NULL, NULL, NULL,
959 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
960 .tcg_features = TCG_SVM_FEATURES,
964 "fsgsbase", "tsc-adjust", NULL, "bmi1",
965 "hle", "avx2", NULL, "smep",
966 "bmi2", "erms", "invpcid", "rtm",
967 NULL, NULL, "mpx", NULL,
968 "avx512f", "avx512dq", "rdseed", "adx",
969 "smap", "avx512ifma", "pcommit", "clflushopt",
970 "clwb", "intel-pt", "avx512pf", "avx512er",
971 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
974 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
976 .tcg_features = TCG_7_0_EBX_FEATURES,
980 NULL, "avx512vbmi", "umip", "pku",
981 NULL /* ospke */, NULL, "avx512vbmi2", NULL,
982 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
983 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
984 "la57", NULL, NULL, NULL,
985 NULL, NULL, "rdpid", NULL,
986 NULL, "cldemote", NULL, NULL,
987 NULL, NULL, NULL, NULL,
990 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
992 .tcg_features = TCG_7_0_ECX_FEATURES,
996 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
997 NULL, NULL, NULL, NULL,
998 NULL, NULL, NULL, NULL,
999 NULL, NULL, NULL, NULL,
1000 NULL, NULL, NULL, NULL,
1001 NULL, NULL, NULL, NULL,
1002 NULL, NULL, "spec-ctrl", NULL,
1003 NULL, NULL, NULL, "ssbd",
1006 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
1008 .tcg_features = TCG_7_0_EDX_FEATURES,
1010 [FEAT_8000_0007_EDX] = {
1012 NULL, NULL, NULL, NULL,
1013 NULL, NULL, NULL, NULL,
1014 "invtsc", NULL, NULL, NULL,
1015 NULL, NULL, NULL, NULL,
1016 NULL, NULL, NULL, NULL,
1017 NULL, NULL, NULL, NULL,
1018 NULL, NULL, NULL, NULL,
1019 NULL, NULL, NULL, NULL,
1021 .cpuid_eax = 0x80000007,
1023 .tcg_features = TCG_APM_FEATURES,
1024 .unmigratable_flags = CPUID_APM_INVTSC,
1026 [FEAT_8000_0008_EBX] = {
1028 NULL, NULL, NULL, NULL,
1029 NULL, NULL, NULL, NULL,
1030 NULL, NULL, NULL, NULL,
1031 "ibpb", NULL, NULL, NULL,
1032 NULL, NULL, NULL, NULL,
1033 NULL, NULL, NULL, NULL,
1034 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1035 NULL, NULL, NULL, NULL,
1037 .cpuid_eax = 0x80000008,
1040 .unmigratable_flags = 0,
1044 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1045 NULL, NULL, NULL, NULL,
1046 NULL, NULL, NULL, NULL,
1047 NULL, NULL, NULL, NULL,
1048 NULL, NULL, NULL, NULL,
1049 NULL, NULL, NULL, NULL,
1050 NULL, NULL, NULL, NULL,
1051 NULL, NULL, NULL, NULL,
1054 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
1056 .tcg_features = TCG_XSAVE_FEATURES,
1060 NULL, NULL, "arat", NULL,
1061 NULL, NULL, NULL, NULL,
1062 NULL, NULL, NULL, NULL,
1063 NULL, NULL, NULL, NULL,
1064 NULL, NULL, NULL, NULL,
1065 NULL, NULL, NULL, NULL,
1066 NULL, NULL, NULL, NULL,
1067 NULL, NULL, NULL, NULL,
1069 .cpuid_eax = 6, .cpuid_reg = R_EAX,
1070 .tcg_features = TCG_6_EAX_FEATURES,
1072 [FEAT_XSAVE_COMP_LO] = {
1074 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
1076 .tcg_features = ~0U,
1077 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1078 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1079 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1082 [FEAT_XSAVE_COMP_HI] = {
1084 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
1086 .tcg_features = ~0U,
1090 typedef struct X86RegisterInfo32 {
1091 /* Name of register */
1093 /* QAPI enum value register */
1094 X86CPURegister32 qapi_enum;
1095 } X86RegisterInfo32;
1097 #define REGISTER(reg) \
1098 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1099 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1111 typedef struct ExtSaveArea {
1112 uint32_t feature, bits;
1113 uint32_t offset, size;
1116 static const ExtSaveArea x86_ext_save_areas[] = {
1118 /* x87 FP state component is always enabled if XSAVE is supported */
1119 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1120 /* x87 state is in the legacy region of the XSAVE area */
1122 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1124 [XSTATE_SSE_BIT] = {
1125 /* SSE state component is always enabled if XSAVE is supported */
1126 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1127 /* SSE state is in the legacy region of the XSAVE area */
1129 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1132 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1133 .offset = offsetof(X86XSaveArea, avx_state),
1134 .size = sizeof(XSaveAVX) },
1135 [XSTATE_BNDREGS_BIT] =
1136 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1137 .offset = offsetof(X86XSaveArea, bndreg_state),
1138 .size = sizeof(XSaveBNDREG) },
1139 [XSTATE_BNDCSR_BIT] =
1140 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1141 .offset = offsetof(X86XSaveArea, bndcsr_state),
1142 .size = sizeof(XSaveBNDCSR) },
1143 [XSTATE_OPMASK_BIT] =
1144 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1145 .offset = offsetof(X86XSaveArea, opmask_state),
1146 .size = sizeof(XSaveOpmask) },
1147 [XSTATE_ZMM_Hi256_BIT] =
1148 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1149 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1150 .size = sizeof(XSaveZMM_Hi256) },
1151 [XSTATE_Hi16_ZMM_BIT] =
1152 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1153 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1154 .size = sizeof(XSaveHi16_ZMM) },
1156 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1157 .offset = offsetof(X86XSaveArea, pkru_state),
1158 .size = sizeof(XSavePKRU) },
1161 static uint32_t xsave_area_size(uint64_t mask)
1166 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1167 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1168 if ((mask >> i) & 1) {
1169 ret = MAX(ret, esa->offset + esa->size);
1175 static inline bool accel_uses_host_cpuid(void)
1177 return kvm_enabled() || hvf_enabled();
1180 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1182 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1183 cpu->env.features[FEAT_XSAVE_COMP_LO];
1186 const char *get_register_name_32(unsigned int reg)
1188 if (reg >= CPU_NB_REGS32) {
1191 return x86_reg_info_32[reg].name;
1195 * Returns the set of feature flags that are supported and migratable by
1196 * QEMU, for a given FeatureWord.
1198 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
1200 FeatureWordInfo *wi = &feature_word_info[w];
1204 for (i = 0; i < 32; i++) {
1205 uint32_t f = 1U << i;
1207 /* If the feature name is known, it is implicitly considered migratable,
1208 * unless it is explicitly set in unmigratable_flags */
1209 if ((wi->migratable_flags & f) ||
1210 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1217 void host_cpuid(uint32_t function, uint32_t count,
1218 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1223 asm volatile("cpuid"
1224 : "=a"(vec[0]), "=b"(vec[1]),
1225 "=c"(vec[2]), "=d"(vec[3])
1226 : "0"(function), "c"(count) : "cc");
1227 #elif defined(__i386__)
1228 asm volatile("pusha \n\t"
1230 "mov %%eax, 0(%2) \n\t"
1231 "mov %%ebx, 4(%2) \n\t"
1232 "mov %%ecx, 8(%2) \n\t"
1233 "mov %%edx, 12(%2) \n\t"
1235 : : "a"(function), "c"(count), "S"(vec)
1251 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1253 uint32_t eax, ebx, ecx, edx;
1255 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1256 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1258 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1260 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1263 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1266 *stepping = eax & 0x0F;
1270 /* CPU class name definitions: */
1272 /* Return type name for a given CPU model name
1273 * Caller is responsible for freeing the returned string.
1275 static char *x86_cpu_type_name(const char *model_name)
1277 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1280 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1283 char *typename = x86_cpu_type_name(cpu_model);
1284 oc = object_class_by_name(typename);
1289 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1291 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1292 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1293 return g_strndup(class_name,
1294 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1297 struct X86CPUDefinition {
1301 /* vendor is zero-terminated, 12 character ASCII string */
1302 char vendor[CPUID_VENDOR_SZ + 1];
1306 FeatureWordArray features;
1307 const char *model_id;
1308 CPUCaches *cache_info;
1311 static CPUCaches epyc_cache_info = {
1312 .l1d_cache = &(CPUCacheInfo) {
1322 .no_invd_sharing = true,
1324 .l1i_cache = &(CPUCacheInfo) {
1334 .no_invd_sharing = true,
1336 .l2_cache = &(CPUCacheInfo) {
1337 .type = UNIFIED_CACHE,
1346 .l3_cache = &(CPUCacheInfo) {
1347 .type = UNIFIED_CACHE,
1351 .associativity = 16,
1357 .complex_indexing = true,
1361 static X86CPUDefinition builtin_x86_defs[] = {
1365 .vendor = CPUID_VENDOR_AMD,
1369 .features[FEAT_1_EDX] =
1371 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1373 .features[FEAT_1_ECX] =
1374 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1375 .features[FEAT_8000_0001_EDX] =
1376 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1377 .features[FEAT_8000_0001_ECX] =
1378 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1379 .xlevel = 0x8000000A,
1380 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1385 .vendor = CPUID_VENDOR_AMD,
1389 /* Missing: CPUID_HT */
1390 .features[FEAT_1_EDX] =
1392 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1393 CPUID_PSE36 | CPUID_VME,
1394 .features[FEAT_1_ECX] =
1395 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1397 .features[FEAT_8000_0001_EDX] =
1398 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1399 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1400 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1401 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1403 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1404 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1405 .features[FEAT_8000_0001_ECX] =
1406 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1407 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1408 /* Missing: CPUID_SVM_LBRV */
1409 .features[FEAT_SVM] =
1411 .xlevel = 0x8000001A,
1412 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1417 .vendor = CPUID_VENDOR_INTEL,
1421 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1422 .features[FEAT_1_EDX] =
1424 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1425 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1426 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1427 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1428 .features[FEAT_1_ECX] =
1429 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1431 .features[FEAT_8000_0001_EDX] =
1432 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1433 .features[FEAT_8000_0001_ECX] =
1435 .xlevel = 0x80000008,
1436 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1441 .vendor = CPUID_VENDOR_INTEL,
1445 /* Missing: CPUID_HT */
1446 .features[FEAT_1_EDX] =
1447 PPRO_FEATURES | CPUID_VME |
1448 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1450 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1451 .features[FEAT_1_ECX] =
1452 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1453 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1454 .features[FEAT_8000_0001_EDX] =
1455 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1456 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1457 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1458 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1459 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1460 .features[FEAT_8000_0001_ECX] =
1462 .xlevel = 0x80000008,
1463 .model_id = "Common KVM processor"
1468 .vendor = CPUID_VENDOR_INTEL,
1472 .features[FEAT_1_EDX] =
1474 .features[FEAT_1_ECX] =
1476 .xlevel = 0x80000004,
1477 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1482 .vendor = CPUID_VENDOR_INTEL,
1486 .features[FEAT_1_EDX] =
1487 PPRO_FEATURES | CPUID_VME |
1488 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1489 .features[FEAT_1_ECX] =
1491 .features[FEAT_8000_0001_ECX] =
1493 .xlevel = 0x80000008,
1494 .model_id = "Common 32-bit KVM processor"
1499 .vendor = CPUID_VENDOR_INTEL,
1503 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1504 .features[FEAT_1_EDX] =
1505 PPRO_FEATURES | CPUID_VME |
1506 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1508 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1509 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1510 .features[FEAT_1_ECX] =
1511 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1512 .features[FEAT_8000_0001_EDX] =
1514 .xlevel = 0x80000008,
1515 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1520 .vendor = CPUID_VENDOR_INTEL,
1524 .features[FEAT_1_EDX] =
1532 .vendor = CPUID_VENDOR_INTEL,
1536 .features[FEAT_1_EDX] =
1544 .vendor = CPUID_VENDOR_INTEL,
1548 .features[FEAT_1_EDX] =
1556 .vendor = CPUID_VENDOR_INTEL,
1560 .features[FEAT_1_EDX] =
1568 .vendor = CPUID_VENDOR_AMD,
1572 .features[FEAT_1_EDX] =
1573 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1575 .features[FEAT_8000_0001_EDX] =
1576 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1577 .xlevel = 0x80000008,
1578 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1583 .vendor = CPUID_VENDOR_INTEL,
1587 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1588 .features[FEAT_1_EDX] =
1590 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1591 CPUID_ACPI | CPUID_SS,
1592 /* Some CPUs got no CPUID_SEP */
1593 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1595 .features[FEAT_1_ECX] =
1596 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1598 .features[FEAT_8000_0001_EDX] =
1600 .features[FEAT_8000_0001_ECX] =
1602 .xlevel = 0x80000008,
1603 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1608 .vendor = CPUID_VENDOR_INTEL,
1612 .features[FEAT_1_EDX] =
1613 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1614 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1615 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1616 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1617 CPUID_DE | CPUID_FP87,
1618 .features[FEAT_1_ECX] =
1619 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1620 .features[FEAT_8000_0001_EDX] =
1621 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1622 .features[FEAT_8000_0001_ECX] =
1624 .xlevel = 0x80000008,
1625 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1630 .vendor = CPUID_VENDOR_INTEL,
1634 .features[FEAT_1_EDX] =
1635 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1636 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1637 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1638 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1639 CPUID_DE | CPUID_FP87,
1640 .features[FEAT_1_ECX] =
1641 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1643 .features[FEAT_8000_0001_EDX] =
1644 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1645 .features[FEAT_8000_0001_ECX] =
1647 .xlevel = 0x80000008,
1648 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1653 .vendor = CPUID_VENDOR_INTEL,
1657 .features[FEAT_1_EDX] =
1658 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1659 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1660 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1661 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1662 CPUID_DE | CPUID_FP87,
1663 .features[FEAT_1_ECX] =
1664 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1665 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1666 .features[FEAT_8000_0001_EDX] =
1667 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1668 .features[FEAT_8000_0001_ECX] =
1670 .xlevel = 0x80000008,
1671 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1674 .name = "Nehalem-IBRS",
1676 .vendor = CPUID_VENDOR_INTEL,
1680 .features[FEAT_1_EDX] =
1681 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1682 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1683 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1684 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1685 CPUID_DE | CPUID_FP87,
1686 .features[FEAT_1_ECX] =
1687 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1688 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1689 .features[FEAT_7_0_EDX] =
1690 CPUID_7_0_EDX_SPEC_CTRL,
1691 .features[FEAT_8000_0001_EDX] =
1692 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1693 .features[FEAT_8000_0001_ECX] =
1695 .xlevel = 0x80000008,
1696 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1701 .vendor = CPUID_VENDOR_INTEL,
1705 .features[FEAT_1_EDX] =
1706 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1707 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1708 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1709 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1710 CPUID_DE | CPUID_FP87,
1711 .features[FEAT_1_ECX] =
1712 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1713 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1714 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1715 .features[FEAT_8000_0001_EDX] =
1716 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1717 .features[FEAT_8000_0001_ECX] =
1719 .features[FEAT_6_EAX] =
1721 .xlevel = 0x80000008,
1722 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1725 .name = "Westmere-IBRS",
1727 .vendor = CPUID_VENDOR_INTEL,
1731 .features[FEAT_1_EDX] =
1732 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1733 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1734 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1735 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1736 CPUID_DE | CPUID_FP87,
1737 .features[FEAT_1_ECX] =
1738 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1739 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1740 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1741 .features[FEAT_8000_0001_EDX] =
1742 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1743 .features[FEAT_8000_0001_ECX] =
1745 .features[FEAT_7_0_EDX] =
1746 CPUID_7_0_EDX_SPEC_CTRL,
1747 .features[FEAT_6_EAX] =
1749 .xlevel = 0x80000008,
1750 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1753 .name = "SandyBridge",
1755 .vendor = CPUID_VENDOR_INTEL,
1759 .features[FEAT_1_EDX] =
1760 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1761 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1762 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1763 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1764 CPUID_DE | CPUID_FP87,
1765 .features[FEAT_1_ECX] =
1766 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1767 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1768 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1769 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1771 .features[FEAT_8000_0001_EDX] =
1772 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1774 .features[FEAT_8000_0001_ECX] =
1776 .features[FEAT_XSAVE] =
1777 CPUID_XSAVE_XSAVEOPT,
1778 .features[FEAT_6_EAX] =
1780 .xlevel = 0x80000008,
1781 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1784 .name = "SandyBridge-IBRS",
1786 .vendor = CPUID_VENDOR_INTEL,
1790 .features[FEAT_1_EDX] =
1791 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1792 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1793 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1794 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1795 CPUID_DE | CPUID_FP87,
1796 .features[FEAT_1_ECX] =
1797 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1798 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1799 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1800 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1802 .features[FEAT_8000_0001_EDX] =
1803 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1805 .features[FEAT_8000_0001_ECX] =
1807 .features[FEAT_7_0_EDX] =
1808 CPUID_7_0_EDX_SPEC_CTRL,
1809 .features[FEAT_XSAVE] =
1810 CPUID_XSAVE_XSAVEOPT,
1811 .features[FEAT_6_EAX] =
1813 .xlevel = 0x80000008,
1814 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1817 .name = "IvyBridge",
1819 .vendor = CPUID_VENDOR_INTEL,
1823 .features[FEAT_1_EDX] =
1824 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1825 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1826 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1827 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1828 CPUID_DE | CPUID_FP87,
1829 .features[FEAT_1_ECX] =
1830 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1831 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1832 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1833 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1834 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1835 .features[FEAT_7_0_EBX] =
1836 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1838 .features[FEAT_8000_0001_EDX] =
1839 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1841 .features[FEAT_8000_0001_ECX] =
1843 .features[FEAT_XSAVE] =
1844 CPUID_XSAVE_XSAVEOPT,
1845 .features[FEAT_6_EAX] =
1847 .xlevel = 0x80000008,
1848 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1851 .name = "IvyBridge-IBRS",
1853 .vendor = CPUID_VENDOR_INTEL,
1857 .features[FEAT_1_EDX] =
1858 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1859 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1860 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1861 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1862 CPUID_DE | CPUID_FP87,
1863 .features[FEAT_1_ECX] =
1864 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1865 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1866 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1867 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1868 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1869 .features[FEAT_7_0_EBX] =
1870 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1872 .features[FEAT_8000_0001_EDX] =
1873 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1875 .features[FEAT_8000_0001_ECX] =
1877 .features[FEAT_7_0_EDX] =
1878 CPUID_7_0_EDX_SPEC_CTRL,
1879 .features[FEAT_XSAVE] =
1880 CPUID_XSAVE_XSAVEOPT,
1881 .features[FEAT_6_EAX] =
1883 .xlevel = 0x80000008,
1884 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1887 .name = "Haswell-noTSX",
1889 .vendor = CPUID_VENDOR_INTEL,
1893 .features[FEAT_1_EDX] =
1894 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1895 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1896 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1897 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1898 CPUID_DE | CPUID_FP87,
1899 .features[FEAT_1_ECX] =
1900 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1901 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1902 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1903 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1904 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1905 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1906 .features[FEAT_8000_0001_EDX] =
1907 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1909 .features[FEAT_8000_0001_ECX] =
1910 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1911 .features[FEAT_7_0_EBX] =
1912 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1913 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1914 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1915 .features[FEAT_XSAVE] =
1916 CPUID_XSAVE_XSAVEOPT,
1917 .features[FEAT_6_EAX] =
1919 .xlevel = 0x80000008,
1920 .model_id = "Intel Core Processor (Haswell, no TSX)",
1923 .name = "Haswell-noTSX-IBRS",
1925 .vendor = CPUID_VENDOR_INTEL,
1929 .features[FEAT_1_EDX] =
1930 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1931 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1932 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1933 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1934 CPUID_DE | CPUID_FP87,
1935 .features[FEAT_1_ECX] =
1936 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1937 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1938 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1939 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1940 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1941 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1942 .features[FEAT_8000_0001_EDX] =
1943 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1945 .features[FEAT_8000_0001_ECX] =
1946 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1947 .features[FEAT_7_0_EDX] =
1948 CPUID_7_0_EDX_SPEC_CTRL,
1949 .features[FEAT_7_0_EBX] =
1950 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1951 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1952 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1953 .features[FEAT_XSAVE] =
1954 CPUID_XSAVE_XSAVEOPT,
1955 .features[FEAT_6_EAX] =
1957 .xlevel = 0x80000008,
1958 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
1963 .vendor = CPUID_VENDOR_INTEL,
1967 .features[FEAT_1_EDX] =
1968 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1969 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1970 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1971 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1972 CPUID_DE | CPUID_FP87,
1973 .features[FEAT_1_ECX] =
1974 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1975 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1976 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1977 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1978 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1979 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1980 .features[FEAT_8000_0001_EDX] =
1981 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1983 .features[FEAT_8000_0001_ECX] =
1984 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1985 .features[FEAT_7_0_EBX] =
1986 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1987 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1988 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1990 .features[FEAT_XSAVE] =
1991 CPUID_XSAVE_XSAVEOPT,
1992 .features[FEAT_6_EAX] =
1994 .xlevel = 0x80000008,
1995 .model_id = "Intel Core Processor (Haswell)",
1998 .name = "Haswell-IBRS",
2000 .vendor = CPUID_VENDOR_INTEL,
2004 .features[FEAT_1_EDX] =
2005 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2006 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2007 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2008 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2009 CPUID_DE | CPUID_FP87,
2010 .features[FEAT_1_ECX] =
2011 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2012 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2013 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2014 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2015 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2016 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2017 .features[FEAT_8000_0001_EDX] =
2018 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2020 .features[FEAT_8000_0001_ECX] =
2021 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2022 .features[FEAT_7_0_EDX] =
2023 CPUID_7_0_EDX_SPEC_CTRL,
2024 .features[FEAT_7_0_EBX] =
2025 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2026 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2027 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2029 .features[FEAT_XSAVE] =
2030 CPUID_XSAVE_XSAVEOPT,
2031 .features[FEAT_6_EAX] =
2033 .xlevel = 0x80000008,
2034 .model_id = "Intel Core Processor (Haswell, IBRS)",
2037 .name = "Broadwell-noTSX",
2039 .vendor = CPUID_VENDOR_INTEL,
2043 .features[FEAT_1_EDX] =
2044 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2045 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2046 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2047 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2048 CPUID_DE | CPUID_FP87,
2049 .features[FEAT_1_ECX] =
2050 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2051 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2052 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2053 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2054 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2055 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2056 .features[FEAT_8000_0001_EDX] =
2057 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2059 .features[FEAT_8000_0001_ECX] =
2060 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2061 .features[FEAT_7_0_EBX] =
2062 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2063 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2064 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2065 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2067 .features[FEAT_XSAVE] =
2068 CPUID_XSAVE_XSAVEOPT,
2069 .features[FEAT_6_EAX] =
2071 .xlevel = 0x80000008,
2072 .model_id = "Intel Core Processor (Broadwell, no TSX)",
2075 .name = "Broadwell-noTSX-IBRS",
2077 .vendor = CPUID_VENDOR_INTEL,
2081 .features[FEAT_1_EDX] =
2082 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2083 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2084 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2085 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2086 CPUID_DE | CPUID_FP87,
2087 .features[FEAT_1_ECX] =
2088 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2089 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2090 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2091 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2092 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2093 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2094 .features[FEAT_8000_0001_EDX] =
2095 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2097 .features[FEAT_8000_0001_ECX] =
2098 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2099 .features[FEAT_7_0_EDX] =
2100 CPUID_7_0_EDX_SPEC_CTRL,
2101 .features[FEAT_7_0_EBX] =
2102 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2103 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2104 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2105 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2107 .features[FEAT_XSAVE] =
2108 CPUID_XSAVE_XSAVEOPT,
2109 .features[FEAT_6_EAX] =
2111 .xlevel = 0x80000008,
2112 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
2115 .name = "Broadwell",
2117 .vendor = CPUID_VENDOR_INTEL,
2121 .features[FEAT_1_EDX] =
2122 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2123 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2124 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2125 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2126 CPUID_DE | CPUID_FP87,
2127 .features[FEAT_1_ECX] =
2128 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2129 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2130 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2131 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2132 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2133 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2134 .features[FEAT_8000_0001_EDX] =
2135 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2137 .features[FEAT_8000_0001_ECX] =
2138 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2139 .features[FEAT_7_0_EBX] =
2140 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2141 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2142 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2143 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2145 .features[FEAT_XSAVE] =
2146 CPUID_XSAVE_XSAVEOPT,
2147 .features[FEAT_6_EAX] =
2149 .xlevel = 0x80000008,
2150 .model_id = "Intel Core Processor (Broadwell)",
2153 .name = "Broadwell-IBRS",
2155 .vendor = CPUID_VENDOR_INTEL,
2159 .features[FEAT_1_EDX] =
2160 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2161 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2162 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2163 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2164 CPUID_DE | CPUID_FP87,
2165 .features[FEAT_1_ECX] =
2166 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2167 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2168 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2169 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2170 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2171 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2172 .features[FEAT_8000_0001_EDX] =
2173 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2175 .features[FEAT_8000_0001_ECX] =
2176 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2177 .features[FEAT_7_0_EDX] =
2178 CPUID_7_0_EDX_SPEC_CTRL,
2179 .features[FEAT_7_0_EBX] =
2180 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2181 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2182 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2183 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2185 .features[FEAT_XSAVE] =
2186 CPUID_XSAVE_XSAVEOPT,
2187 .features[FEAT_6_EAX] =
2189 .xlevel = 0x80000008,
2190 .model_id = "Intel Core Processor (Broadwell, IBRS)",
2193 .name = "Skylake-Client",
2195 .vendor = CPUID_VENDOR_INTEL,
2199 .features[FEAT_1_EDX] =
2200 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2201 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2202 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2203 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2204 CPUID_DE | CPUID_FP87,
2205 .features[FEAT_1_ECX] =
2206 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2207 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2208 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2209 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2210 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2211 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2212 .features[FEAT_8000_0001_EDX] =
2213 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2215 .features[FEAT_8000_0001_ECX] =
2216 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2217 .features[FEAT_7_0_EBX] =
2218 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2219 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2220 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2221 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2222 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2223 /* Missing: XSAVES (not supported by some Linux versions,
2224 * including v4.1 to v4.12).
2225 * KVM doesn't yet expose any XSAVES state save component,
2226 * and the only one defined in Skylake (processor tracing)
2227 * probably will block migration anyway.
2229 .features[FEAT_XSAVE] =
2230 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2231 CPUID_XSAVE_XGETBV1,
2232 .features[FEAT_6_EAX] =
2234 .xlevel = 0x80000008,
2235 .model_id = "Intel Core Processor (Skylake)",
2238 .name = "Skylake-Client-IBRS",
2240 .vendor = CPUID_VENDOR_INTEL,
2244 .features[FEAT_1_EDX] =
2245 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2246 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2247 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2248 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2249 CPUID_DE | CPUID_FP87,
2250 .features[FEAT_1_ECX] =
2251 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2252 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2253 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2254 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2255 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2256 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2257 .features[FEAT_8000_0001_EDX] =
2258 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2260 .features[FEAT_8000_0001_ECX] =
2261 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2262 .features[FEAT_7_0_EDX] =
2263 CPUID_7_0_EDX_SPEC_CTRL,
2264 .features[FEAT_7_0_EBX] =
2265 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2266 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2267 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2268 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2269 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2270 /* Missing: XSAVES (not supported by some Linux versions,
2271 * including v4.1 to v4.12).
2272 * KVM doesn't yet expose any XSAVES state save component,
2273 * and the only one defined in Skylake (processor tracing)
2274 * probably will block migration anyway.
2276 .features[FEAT_XSAVE] =
2277 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2278 CPUID_XSAVE_XGETBV1,
2279 .features[FEAT_6_EAX] =
2281 .xlevel = 0x80000008,
2282 .model_id = "Intel Core Processor (Skylake, IBRS)",
2285 .name = "Skylake-Server",
2287 .vendor = CPUID_VENDOR_INTEL,
2291 .features[FEAT_1_EDX] =
2292 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2293 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2294 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2295 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2296 CPUID_DE | CPUID_FP87,
2297 .features[FEAT_1_ECX] =
2298 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2299 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2300 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2301 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2302 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2303 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2304 .features[FEAT_8000_0001_EDX] =
2305 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2306 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2307 .features[FEAT_8000_0001_ECX] =
2308 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2309 .features[FEAT_7_0_EBX] =
2310 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2311 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2312 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2313 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2314 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2315 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2316 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2317 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2318 /* Missing: XSAVES (not supported by some Linux versions,
2319 * including v4.1 to v4.12).
2320 * KVM doesn't yet expose any XSAVES state save component,
2321 * and the only one defined in Skylake (processor tracing)
2322 * probably will block migration anyway.
2324 .features[FEAT_XSAVE] =
2325 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2326 CPUID_XSAVE_XGETBV1,
2327 .features[FEAT_6_EAX] =
2329 .xlevel = 0x80000008,
2330 .model_id = "Intel Xeon Processor (Skylake)",
2333 .name = "Skylake-Server-IBRS",
2335 .vendor = CPUID_VENDOR_INTEL,
2339 .features[FEAT_1_EDX] =
2340 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2341 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2342 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2343 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2344 CPUID_DE | CPUID_FP87,
2345 .features[FEAT_1_ECX] =
2346 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2347 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2348 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2349 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2350 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2351 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2352 .features[FEAT_8000_0001_EDX] =
2353 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2354 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2355 .features[FEAT_8000_0001_ECX] =
2356 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2357 .features[FEAT_7_0_EDX] =
2358 CPUID_7_0_EDX_SPEC_CTRL,
2359 .features[FEAT_7_0_EBX] =
2360 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2361 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2362 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2363 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2364 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2365 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2366 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2367 CPUID_7_0_EBX_AVX512VL,
2368 /* Missing: XSAVES (not supported by some Linux versions,
2369 * including v4.1 to v4.12).
2370 * KVM doesn't yet expose any XSAVES state save component,
2371 * and the only one defined in Skylake (processor tracing)
2372 * probably will block migration anyway.
2374 .features[FEAT_XSAVE] =
2375 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2376 CPUID_XSAVE_XGETBV1,
2377 .features[FEAT_6_EAX] =
2379 .xlevel = 0x80000008,
2380 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
2383 .name = "KnightsMill",
2385 .vendor = CPUID_VENDOR_INTEL,
2389 .features[FEAT_1_EDX] =
2390 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2391 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2392 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2393 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2394 CPUID_PSE | CPUID_DE | CPUID_FP87,
2395 .features[FEAT_1_ECX] =
2396 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2397 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2398 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2399 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2400 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2401 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2402 .features[FEAT_8000_0001_EDX] =
2403 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2404 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2405 .features[FEAT_8000_0001_ECX] =
2406 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2407 .features[FEAT_7_0_EBX] =
2408 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2409 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2410 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2411 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2412 CPUID_7_0_EBX_AVX512ER,
2413 .features[FEAT_7_0_ECX] =
2414 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2415 .features[FEAT_7_0_EDX] =
2416 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2417 .features[FEAT_XSAVE] =
2418 CPUID_XSAVE_XSAVEOPT,
2419 .features[FEAT_6_EAX] =
2421 .xlevel = 0x80000008,
2422 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2425 .name = "Opteron_G1",
2427 .vendor = CPUID_VENDOR_AMD,
2431 .features[FEAT_1_EDX] =
2432 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2433 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2434 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2435 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2436 CPUID_DE | CPUID_FP87,
2437 .features[FEAT_1_ECX] =
2439 .features[FEAT_8000_0001_EDX] =
2440 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2441 .xlevel = 0x80000008,
2442 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2445 .name = "Opteron_G2",
2447 .vendor = CPUID_VENDOR_AMD,
2451 .features[FEAT_1_EDX] =
2452 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2453 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2454 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2455 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2456 CPUID_DE | CPUID_FP87,
2457 .features[FEAT_1_ECX] =
2458 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2459 /* Missing: CPUID_EXT2_RDTSCP */
2460 .features[FEAT_8000_0001_EDX] =
2461 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2462 .features[FEAT_8000_0001_ECX] =
2463 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2464 .xlevel = 0x80000008,
2465 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2468 .name = "Opteron_G3",
2470 .vendor = CPUID_VENDOR_AMD,
2474 .features[FEAT_1_EDX] =
2475 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2476 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2477 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2478 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2479 CPUID_DE | CPUID_FP87,
2480 .features[FEAT_1_ECX] =
2481 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2483 /* Missing: CPUID_EXT2_RDTSCP */
2484 .features[FEAT_8000_0001_EDX] =
2485 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2486 .features[FEAT_8000_0001_ECX] =
2487 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2488 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2489 .xlevel = 0x80000008,
2490 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2493 .name = "Opteron_G4",
2495 .vendor = CPUID_VENDOR_AMD,
2499 .features[FEAT_1_EDX] =
2500 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2501 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2502 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2503 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2504 CPUID_DE | CPUID_FP87,
2505 .features[FEAT_1_ECX] =
2506 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2507 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2508 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2510 /* Missing: CPUID_EXT2_RDTSCP */
2511 .features[FEAT_8000_0001_EDX] =
2512 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2514 .features[FEAT_8000_0001_ECX] =
2515 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2516 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2517 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2520 .xlevel = 0x8000001A,
2521 .model_id = "AMD Opteron 62xx class CPU",
2524 .name = "Opteron_G5",
2526 .vendor = CPUID_VENDOR_AMD,
2530 .features[FEAT_1_EDX] =
2531 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2532 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2533 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2534 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2535 CPUID_DE | CPUID_FP87,
2536 .features[FEAT_1_ECX] =
2537 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2538 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2539 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2540 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2541 /* Missing: CPUID_EXT2_RDTSCP */
2542 .features[FEAT_8000_0001_EDX] =
2543 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2545 .features[FEAT_8000_0001_ECX] =
2546 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2547 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2548 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2551 .xlevel = 0x8000001A,
2552 .model_id = "AMD Opteron 63xx class CPU",
2557 .vendor = CPUID_VENDOR_AMD,
2561 .features[FEAT_1_EDX] =
2562 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2563 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2564 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2565 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2566 CPUID_VME | CPUID_FP87,
2567 .features[FEAT_1_ECX] =
2568 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2569 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2570 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2571 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2572 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2573 .features[FEAT_8000_0001_EDX] =
2574 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2575 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2577 .features[FEAT_8000_0001_ECX] =
2578 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2579 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2580 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2582 .features[FEAT_7_0_EBX] =
2583 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2584 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2585 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2586 CPUID_7_0_EBX_SHA_NI,
2587 /* Missing: XSAVES (not supported by some Linux versions,
2588 * including v4.1 to v4.12).
2589 * KVM doesn't yet expose any XSAVES state save component.
2591 .features[FEAT_XSAVE] =
2592 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2593 CPUID_XSAVE_XGETBV1,
2594 .features[FEAT_6_EAX] =
2596 .xlevel = 0x8000001E,
2597 .model_id = "AMD EPYC Processor",
2598 .cache_info = &epyc_cache_info,
2601 .name = "EPYC-IBPB",
2603 .vendor = CPUID_VENDOR_AMD,
2607 .features[FEAT_1_EDX] =
2608 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2609 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2610 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2611 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2612 CPUID_VME | CPUID_FP87,
2613 .features[FEAT_1_ECX] =
2614 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2615 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2616 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2617 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2618 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2619 .features[FEAT_8000_0001_EDX] =
2620 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2621 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2623 .features[FEAT_8000_0001_ECX] =
2624 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2625 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2626 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2628 .features[FEAT_8000_0008_EBX] =
2629 CPUID_8000_0008_EBX_IBPB,
2630 .features[FEAT_7_0_EBX] =
2631 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2632 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2633 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2634 CPUID_7_0_EBX_SHA_NI,
2635 /* Missing: XSAVES (not supported by some Linux versions,
2636 * including v4.1 to v4.12).
2637 * KVM doesn't yet expose any XSAVES state save component.
2639 .features[FEAT_XSAVE] =
2640 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2641 CPUID_XSAVE_XGETBV1,
2642 .features[FEAT_6_EAX] =
2644 .xlevel = 0x8000001E,
2645 .model_id = "AMD EPYC Processor (with IBPB)",
2646 .cache_info = &epyc_cache_info,
2650 typedef struct PropValue {
2651 const char *prop, *value;
2654 /* KVM-specific features that are automatically added/removed
2655 * from all CPU models when KVM is enabled.
2657 static PropValue kvm_default_props[] = {
2658 { "kvmclock", "on" },
2659 { "kvm-nopiodelay", "on" },
2660 { "kvm-asyncpf", "on" },
2661 { "kvm-steal-time", "on" },
2662 { "kvm-pv-eoi", "on" },
2663 { "kvmclock-stable-bit", "on" },
2666 { "monitor", "off" },
2671 /* TCG-specific defaults that override all CPU models when using TCG
2673 static PropValue tcg_default_props[] = {
2679 void x86_cpu_change_kvm_default(const char *prop, const char *value)
2682 for (pv = kvm_default_props; pv->prop; pv++) {
2683 if (!strcmp(pv->prop, prop)) {
2689 /* It is valid to call this function only for properties that
2690 * are already present in the kvm_default_props table.
2695 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2696 bool migratable_only);
2698 static bool lmce_supported(void)
2700 uint64_t mce_cap = 0;
2703 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2708 return !!(mce_cap & MCG_LMCE_P);
2711 #define CPUID_MODEL_ID_SZ 48
2714 * cpu_x86_fill_model_id:
2715 * Get CPUID model ID string from host CPU.
2717 * @str should have at least CPUID_MODEL_ID_SZ bytes
2719 * The function does NOT add a null terminator to the string
2722 static int cpu_x86_fill_model_id(char *str)
2724 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2727 for (i = 0; i < 3; i++) {
2728 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
2729 memcpy(str + i * 16 + 0, &eax, 4);
2730 memcpy(str + i * 16 + 4, &ebx, 4);
2731 memcpy(str + i * 16 + 8, &ecx, 4);
2732 memcpy(str + i * 16 + 12, &edx, 4);
2737 static Property max_x86_cpu_properties[] = {
2738 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
2739 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
2740 DEFINE_PROP_END_OF_LIST()
2743 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
2745 DeviceClass *dc = DEVICE_CLASS(oc);
2746 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2750 xcc->model_description =
2751 "Enables all features supported by the accelerator in the current host";
2753 dc->props = max_x86_cpu_properties;
2756 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
2758 static void max_x86_cpu_initfn(Object *obj)
2760 X86CPU *cpu = X86_CPU(obj);
2761 CPUX86State *env = &cpu->env;
2762 KVMState *s = kvm_state;
2764 /* We can't fill the features array here because we don't know yet if
2765 * "migratable" is true or false.
2767 cpu->max_features = true;
2769 if (accel_uses_host_cpuid()) {
2770 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
2771 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
2772 int family, model, stepping;
2773 X86CPUDefinition host_cpudef = { };
2774 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2776 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
2777 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
2779 host_vendor_fms(vendor, &family, &model, &stepping);
2781 cpu_x86_fill_model_id(model_id);
2783 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
2784 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
2785 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
2786 object_property_set_int(OBJECT(cpu), stepping, "stepping",
2788 object_property_set_str(OBJECT(cpu), model_id, "model-id",
2791 if (kvm_enabled()) {
2792 env->cpuid_min_level =
2793 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
2794 env->cpuid_min_xlevel =
2795 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
2796 env->cpuid_min_xlevel2 =
2797 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
2799 env->cpuid_min_level =
2800 hvf_get_supported_cpuid(0x0, 0, R_EAX);
2801 env->cpuid_min_xlevel =
2802 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
2803 env->cpuid_min_xlevel2 =
2804 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
2807 if (lmce_supported()) {
2808 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
2811 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
2812 "vendor", &error_abort);
2813 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
2814 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
2815 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
2816 object_property_set_str(OBJECT(cpu),
2817 "QEMU TCG CPU version " QEMU_HW_VERSION,
2818 "model-id", &error_abort);
2821 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
2824 static const TypeInfo max_x86_cpu_type_info = {
2825 .name = X86_CPU_TYPE_NAME("max"),
2826 .parent = TYPE_X86_CPU,
2827 .instance_init = max_x86_cpu_initfn,
2828 .class_init = max_x86_cpu_class_init,
2831 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
2832 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
2834 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2836 xcc->host_cpuid_required = true;
2839 if (kvm_enabled()) {
2840 xcc->model_description =
2841 "KVM processor with all supported host features ";
2842 } else if (hvf_enabled()) {
2843 xcc->model_description =
2844 "HVF processor with all supported host features ";
2848 static const TypeInfo host_x86_cpu_type_info = {
2849 .name = X86_CPU_TYPE_NAME("host"),
2850 .parent = X86_CPU_TYPE_NAME("max"),
2851 .class_init = host_x86_cpu_class_init,
2856 static void report_unavailable_features(FeatureWord w, uint32_t mask)
2858 FeatureWordInfo *f = &feature_word_info[w];
2861 for (i = 0; i < 32; ++i) {
2862 if ((1UL << i) & mask) {
2863 const char *reg = get_register_name_32(f->cpuid_reg);
2865 warn_report("%s doesn't support requested feature: "
2866 "CPUID.%02XH:%s%s%s [bit %d]",
2867 accel_uses_host_cpuid() ? "host" : "TCG",
2869 f->feat_names[i] ? "." : "",
2870 f->feat_names[i] ? f->feat_names[i] : "", i);
2875 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
2876 const char *name, void *opaque,
2879 X86CPU *cpu = X86_CPU(obj);
2880 CPUX86State *env = &cpu->env;
2883 value = (env->cpuid_version >> 8) & 0xf;
2885 value += (env->cpuid_version >> 20) & 0xff;
2887 visit_type_int(v, name, &value, errp);
2890 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
2891 const char *name, void *opaque,
2894 X86CPU *cpu = X86_CPU(obj);
2895 CPUX86State *env = &cpu->env;
2896 const int64_t min = 0;
2897 const int64_t max = 0xff + 0xf;
2898 Error *local_err = NULL;
2901 visit_type_int(v, name, &value, &local_err);
2903 error_propagate(errp, local_err);
2906 if (value < min || value > max) {
2907 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2908 name ? name : "null", value, min, max);
2912 env->cpuid_version &= ~0xff00f00;
2914 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
2916 env->cpuid_version |= value << 8;
2920 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
2921 const char *name, void *opaque,
2924 X86CPU *cpu = X86_CPU(obj);
2925 CPUX86State *env = &cpu->env;
2928 value = (env->cpuid_version >> 4) & 0xf;
2929 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
2930 visit_type_int(v, name, &value, errp);
2933 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
2934 const char *name, void *opaque,
2937 X86CPU *cpu = X86_CPU(obj);
2938 CPUX86State *env = &cpu->env;
2939 const int64_t min = 0;
2940 const int64_t max = 0xff;
2941 Error *local_err = NULL;
2944 visit_type_int(v, name, &value, &local_err);
2946 error_propagate(errp, local_err);
2949 if (value < min || value > max) {
2950 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2951 name ? name : "null", value, min, max);
2955 env->cpuid_version &= ~0xf00f0;
2956 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
2959 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
2960 const char *name, void *opaque,
2963 X86CPU *cpu = X86_CPU(obj);
2964 CPUX86State *env = &cpu->env;
2967 value = env->cpuid_version & 0xf;
2968 visit_type_int(v, name, &value, errp);
2971 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
2972 const char *name, void *opaque,
2975 X86CPU *cpu = X86_CPU(obj);
2976 CPUX86State *env = &cpu->env;
2977 const int64_t min = 0;
2978 const int64_t max = 0xf;
2979 Error *local_err = NULL;
2982 visit_type_int(v, name, &value, &local_err);
2984 error_propagate(errp, local_err);
2987 if (value < min || value > max) {
2988 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2989 name ? name : "null", value, min, max);
2993 env->cpuid_version &= ~0xf;
2994 env->cpuid_version |= value & 0xf;
2997 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
2999 X86CPU *cpu = X86_CPU(obj);
3000 CPUX86State *env = &cpu->env;
3003 value = g_malloc(CPUID_VENDOR_SZ + 1);
3004 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
3005 env->cpuid_vendor3);
3009 static void x86_cpuid_set_vendor(Object *obj, const char *value,
3012 X86CPU *cpu = X86_CPU(obj);
3013 CPUX86State *env = &cpu->env;
3016 if (strlen(value) != CPUID_VENDOR_SZ) {
3017 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
3021 env->cpuid_vendor1 = 0;
3022 env->cpuid_vendor2 = 0;
3023 env->cpuid_vendor3 = 0;
3024 for (i = 0; i < 4; i++) {
3025 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
3026 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
3027 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
3031 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
3033 X86CPU *cpu = X86_CPU(obj);
3034 CPUX86State *env = &cpu->env;
3038 value = g_malloc(48 + 1);
3039 for (i = 0; i < 48; i++) {
3040 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
3046 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
3049 X86CPU *cpu = X86_CPU(obj);
3050 CPUX86State *env = &cpu->env;
3053 if (model_id == NULL) {
3056 len = strlen(model_id);
3057 memset(env->cpuid_model, 0, 48);
3058 for (i = 0; i < 48; i++) {
3062 c = (uint8_t)model_id[i];
3064 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
3068 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
3069 void *opaque, Error **errp)
3071 X86CPU *cpu = X86_CPU(obj);
3074 value = cpu->env.tsc_khz * 1000;
3075 visit_type_int(v, name, &value, errp);
3078 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
3079 void *opaque, Error **errp)
3081 X86CPU *cpu = X86_CPU(obj);
3082 const int64_t min = 0;
3083 const int64_t max = INT64_MAX;
3084 Error *local_err = NULL;
3087 visit_type_int(v, name, &value, &local_err);
3089 error_propagate(errp, local_err);
3092 if (value < min || value > max) {
3093 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3094 name ? name : "null", value, min, max);
3098 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
3101 /* Generic getter for "feature-words" and "filtered-features" properties */
3102 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
3103 const char *name, void *opaque,
3106 uint32_t *array = (uint32_t *)opaque;
3108 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
3109 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
3110 X86CPUFeatureWordInfoList *list = NULL;
3112 for (w = 0; w < FEATURE_WORDS; w++) {
3113 FeatureWordInfo *wi = &feature_word_info[w];
3114 X86CPUFeatureWordInfo *qwi = &word_infos[w];
3115 qwi->cpuid_input_eax = wi->cpuid_eax;
3116 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
3117 qwi->cpuid_input_ecx = wi->cpuid_ecx;
3118 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
3119 qwi->features = array[w];
3121 /* List will be in reverse order, but order shouldn't matter */
3122 list_entries[w].next = list;
3123 list_entries[w].value = &word_infos[w];
3124 list = &list_entries[w];
3127 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
3130 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3131 void *opaque, Error **errp)
3133 X86CPU *cpu = X86_CPU(obj);
3134 int64_t value = cpu->hyperv_spinlock_attempts;
3136 visit_type_int(v, name, &value, errp);
3139 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3140 void *opaque, Error **errp)
3142 const int64_t min = 0xFFF;
3143 const int64_t max = UINT_MAX;
3144 X86CPU *cpu = X86_CPU(obj);
3148 visit_type_int(v, name, &value, &err);
3150 error_propagate(errp, err);
3154 if (value < min || value > max) {
3155 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
3156 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
3157 object_get_typename(obj), name ? name : "null",
3161 cpu->hyperv_spinlock_attempts = value;
3164 static const PropertyInfo qdev_prop_spinlocks = {
3166 .get = x86_get_hv_spinlocks,
3167 .set = x86_set_hv_spinlocks,
3170 /* Convert all '_' in a feature string option name to '-', to make feature
3171 * name conform to QOM property naming rule, which uses '-' instead of '_'.
3173 static inline void feat2prop(char *s)
3175 while ((s = strchr(s, '_'))) {
3180 /* Return the feature property name for a feature flag bit */
3181 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
3183 /* XSAVE components are automatically enabled by other features,
3184 * so return the original feature name instead
3186 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
3187 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
3189 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
3190 x86_ext_save_areas[comp].bits) {
3191 w = x86_ext_save_areas[comp].feature;
3192 bitnr = ctz32(x86_ext_save_areas[comp].bits);
3197 assert(w < FEATURE_WORDS);
3198 return feature_word_info[w].feat_names[bitnr];
3201 /* Compatibily hack to maintain legacy +-feat semantic,
3202 * where +-feat overwrites any feature set by
3203 * feat=on|feat even if the later is parsed after +-feat
3204 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3206 static GList *plus_features, *minus_features;
3208 static gint compare_string(gconstpointer a, gconstpointer b)
3210 return g_strcmp0(a, b);
3213 /* Parse "+feature,-feature,feature=foo" CPU feature string
3215 static void x86_cpu_parse_featurestr(const char *typename, char *features,
3218 char *featurestr; /* Single 'key=value" string being parsed */
3219 static bool cpu_globals_initialized;
3220 bool ambiguous = false;
3222 if (cpu_globals_initialized) {
3225 cpu_globals_initialized = true;
3231 for (featurestr = strtok(features, ",");
3233 featurestr = strtok(NULL, ",")) {
3235 const char *val = NULL;
3238 GlobalProperty *prop;
3240 /* Compatibility syntax: */
3241 if (featurestr[0] == '+') {
3242 plus_features = g_list_append(plus_features,
3243 g_strdup(featurestr + 1));
3245 } else if (featurestr[0] == '-') {
3246 minus_features = g_list_append(minus_features,
3247 g_strdup(featurestr + 1));
3251 eq = strchr(featurestr, '=');
3259 feat2prop(featurestr);
3262 if (g_list_find_custom(plus_features, name, compare_string)) {
3263 warn_report("Ambiguous CPU model string. "
3264 "Don't mix both \"+%s\" and \"%s=%s\"",
3268 if (g_list_find_custom(minus_features, name, compare_string)) {
3269 warn_report("Ambiguous CPU model string. "
3270 "Don't mix both \"-%s\" and \"%s=%s\"",
3276 if (!strcmp(name, "tsc-freq")) {
3280 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3281 if (ret < 0 || tsc_freq > INT64_MAX) {
3282 error_setg(errp, "bad numerical value %s", val);
3285 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3287 name = "tsc-frequency";
3290 prop = g_new0(typeof(*prop), 1);
3291 prop->driver = typename;
3292 prop->property = g_strdup(name);
3293 prop->value = g_strdup(val);
3294 prop->errp = &error_fatal;
3295 qdev_prop_register_global(prop);
3299 warn_report("Compatibility of ambiguous CPU model "
3300 "strings won't be kept on future QEMU versions");
3304 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3305 static int x86_cpu_filter_features(X86CPU *cpu);
3307 /* Check for missing features that may prevent the CPU class from
3308 * running using the current machine and accelerator.
3310 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3311 strList **missing_feats)
3316 strList **next = missing_feats;
3318 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3319 strList *new = g_new0(strList, 1);
3320 new->value = g_strdup("kvm");
3321 *missing_feats = new;
3325 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3327 x86_cpu_expand_features(xc, &err);
3329 /* Errors at x86_cpu_expand_features should never happen,
3330 * but in case it does, just report the model as not
3331 * runnable at all using the "type" property.
3333 strList *new = g_new0(strList, 1);
3334 new->value = g_strdup("type");
3339 x86_cpu_filter_features(xc);
3341 for (w = 0; w < FEATURE_WORDS; w++) {
3342 uint32_t filtered = xc->filtered_features[w];
3344 for (i = 0; i < 32; i++) {
3345 if (filtered & (1UL << i)) {
3346 strList *new = g_new0(strList, 1);
3347 new->value = g_strdup(x86_cpu_feature_name(w, i));
3354 object_unref(OBJECT(xc));
3357 /* Print all cpuid feature names in featureset
3359 static void listflags(FILE *f, fprintf_function print, GList *features)
3364 for (tmp = features; tmp; tmp = tmp->next) {
3365 const char *name = tmp->data;
3366 if ((len + strlen(name) + 1) >= 75) {
3370 print(f, "%s%s", len == 0 ? " " : " ", name);
3371 len += strlen(name) + 1;
3376 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3377 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3379 ObjectClass *class_a = (ObjectClass *)a;
3380 ObjectClass *class_b = (ObjectClass *)b;
3381 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3382 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3383 char *name_a, *name_b;
3386 if (cc_a->ordering != cc_b->ordering) {
3387 ret = cc_a->ordering - cc_b->ordering;
3389 name_a = x86_cpu_class_get_model_name(cc_a);
3390 name_b = x86_cpu_class_get_model_name(cc_b);
3391 ret = strcmp(name_a, name_b);
3398 static GSList *get_sorted_cpu_model_list(void)
3400 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3401 list = g_slist_sort(list, x86_cpu_list_compare);
3405 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3407 ObjectClass *oc = data;
3408 X86CPUClass *cc = X86_CPU_CLASS(oc);
3409 CPUListState *s = user_data;
3410 char *name = x86_cpu_class_get_model_name(cc);
3411 const char *desc = cc->model_description;
3412 if (!desc && cc->cpu_def) {
3413 desc = cc->cpu_def->model_id;
3416 (*s->cpu_fprintf)(s->file, "x86 %-20s %-48s\n",
3421 /* list available CPU models and flags */
3422 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
3427 .cpu_fprintf = cpu_fprintf,
3430 GList *names = NULL;
3432 (*cpu_fprintf)(f, "Available CPUs:\n");
3433 list = get_sorted_cpu_model_list();
3434 g_slist_foreach(list, x86_cpu_list_entry, &s);
3438 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3439 FeatureWordInfo *fw = &feature_word_info[i];
3440 for (j = 0; j < 32; j++) {
3441 if (fw->feat_names[j]) {
3442 names = g_list_append(names, (gpointer)fw->feat_names[j]);
3447 names = g_list_sort(names, (GCompareFunc)strcmp);
3449 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
3450 listflags(f, cpu_fprintf, names);
3451 (*cpu_fprintf)(f, "\n");
3455 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3457 ObjectClass *oc = data;
3458 X86CPUClass *cc = X86_CPU_CLASS(oc);
3459 CpuDefinitionInfoList **cpu_list = user_data;
3460 CpuDefinitionInfoList *entry;
3461 CpuDefinitionInfo *info;
3463 info = g_malloc0(sizeof(*info));
3464 info->name = x86_cpu_class_get_model_name(cc);
3465 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3466 info->has_unavailable_features = true;
3467 info->q_typename = g_strdup(object_class_get_name(oc));
3468 info->migration_safe = cc->migration_safe;
3469 info->has_migration_safe = true;
3470 info->q_static = cc->static_model;
3472 entry = g_malloc0(sizeof(*entry));
3473 entry->value = info;
3474 entry->next = *cpu_list;
3478 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
3480 CpuDefinitionInfoList *cpu_list = NULL;
3481 GSList *list = get_sorted_cpu_model_list();
3482 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
3487 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3488 bool migratable_only)
3490 FeatureWordInfo *wi = &feature_word_info[w];
3493 if (kvm_enabled()) {
3494 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
3497 } else if (hvf_enabled()) {
3498 r = hvf_get_supported_cpuid(wi->cpuid_eax,
3501 } else if (tcg_enabled()) {
3502 r = wi->tcg_features;
3506 if (migratable_only) {
3507 r &= x86_cpu_get_migratable_flags(w);
3512 static void x86_cpu_report_filtered_features(X86CPU *cpu)
3516 for (w = 0; w < FEATURE_WORDS; w++) {
3517 report_unavailable_features(w, cpu->filtered_features[w]);
3521 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
3524 for (pv = props; pv->prop; pv++) {
3528 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
3533 /* Load data from X86CPUDefinition into a X86CPU object
3535 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
3537 CPUX86State *env = &cpu->env;
3539 char host_vendor[CPUID_VENDOR_SZ + 1];
3542 /*NOTE: any property set by this function should be returned by
3543 * x86_cpu_static_props(), so static expansion of
3544 * query-cpu-model-expansion is always complete.
3547 /* CPU models only set _minimum_ values for level/xlevel: */
3548 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
3549 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
3551 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
3552 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
3553 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
3554 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
3555 for (w = 0; w < FEATURE_WORDS; w++) {
3556 env->features[w] = def->features[w];
3559 /* legacy-cache defaults to 'off' if CPU model provides cache info */
3560 cpu->legacy_cache = !def->cache_info;
3562 /* Special cases not set in the X86CPUDefinition structs: */
3563 /* TODO: in-kernel irqchip for hvf */
3564 if (kvm_enabled()) {
3565 if (!kvm_irqchip_in_kernel()) {
3566 x86_cpu_change_kvm_default("x2apic", "off");
3569 x86_cpu_apply_props(cpu, kvm_default_props);
3570 } else if (tcg_enabled()) {
3571 x86_cpu_apply_props(cpu, tcg_default_props);
3574 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
3576 /* sysenter isn't supported in compatibility mode on AMD,
3577 * syscall isn't supported in compatibility mode on Intel.
3578 * Normally we advertise the actual CPU vendor, but you can
3579 * override this using the 'vendor' property if you want to use
3580 * KVM's sysenter/syscall emulation in compatibility mode and
3581 * when doing cross vendor migration
3583 vendor = def->vendor;
3584 if (accel_uses_host_cpuid()) {
3585 uint32_t ebx = 0, ecx = 0, edx = 0;
3586 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
3587 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
3588 vendor = host_vendor;
3591 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
3595 /* Return a QDict containing keys for all properties that can be included
3596 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
3597 * must be included in the dictionary.
3599 static QDict *x86_cpu_static_props(void)
3603 static const char *props[] = {
3621 for (i = 0; props[i]; i++) {
3622 qdict_put_null(d, props[i]);
3625 for (w = 0; w < FEATURE_WORDS; w++) {
3626 FeatureWordInfo *fi = &feature_word_info[w];
3628 for (bit = 0; bit < 32; bit++) {
3629 if (!fi->feat_names[bit]) {
3632 qdict_put_null(d, fi->feat_names[bit]);
3639 /* Add an entry to @props dict, with the value for property. */
3640 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
3642 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
3645 qdict_put_obj(props, prop, value);
3648 /* Convert CPU model data from X86CPU object to a property dictionary
3649 * that can recreate exactly the same CPU model.
3651 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3653 QDict *sprops = x86_cpu_static_props();
3654 const QDictEntry *e;
3656 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3657 const char *prop = qdict_entry_key(e);
3658 x86_cpu_expand_prop(cpu, props, prop);
3662 /* Convert CPU model data from X86CPU object to a property dictionary
3663 * that can recreate exactly the same CPU model, including every
3664 * writeable QOM property.
3666 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3668 ObjectPropertyIterator iter;
3669 ObjectProperty *prop;
3671 object_property_iter_init(&iter, OBJECT(cpu));
3672 while ((prop = object_property_iter_next(&iter))) {
3673 /* skip read-only or write-only properties */
3674 if (!prop->get || !prop->set) {
3678 /* "hotplugged" is the only property that is configurable
3679 * on the command-line but will be set differently on CPUs
3680 * created using "-cpu ... -smp ..." and by CPUs created
3681 * on the fly by x86_cpu_from_model() for querying. Skip it.
3683 if (!strcmp(prop->name, "hotplugged")) {
3686 x86_cpu_expand_prop(cpu, props, prop->name);
3690 static void object_apply_props(Object *obj, QDict *props, Error **errp)
3692 const QDictEntry *prop;
3695 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
3696 object_property_set_qobject(obj, qdict_entry_value(prop),
3697 qdict_entry_key(prop), &err);
3703 error_propagate(errp, err);
3706 /* Create X86CPU object according to model+props specification */
3707 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
3713 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
3715 error_setg(&err, "CPU model '%s' not found", model);
3719 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3721 object_apply_props(OBJECT(xc), props, &err);
3727 x86_cpu_expand_features(xc, &err);
3734 error_propagate(errp, err);
3735 object_unref(OBJECT(xc));
3741 CpuModelExpansionInfo *
3742 arch_query_cpu_model_expansion(CpuModelExpansionType type,
3743 CpuModelInfo *model,
3748 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
3749 QDict *props = NULL;
3750 const char *base_name;
3752 xc = x86_cpu_from_model(model->name,
3754 qobject_to(QDict, model->props) :
3760 props = qdict_new();
3763 case CPU_MODEL_EXPANSION_TYPE_STATIC:
3764 /* Static expansion will be based on "base" only */
3766 x86_cpu_to_dict(xc, props);
3768 case CPU_MODEL_EXPANSION_TYPE_FULL:
3769 /* As we don't return every single property, full expansion needs
3770 * to keep the original model name+props, and add extra
3771 * properties on top of that.
3773 base_name = model->name;
3774 x86_cpu_to_dict_full(xc, props);
3777 error_setg(&err, "Unsupportted expansion type");
3782 props = qdict_new();
3784 x86_cpu_to_dict(xc, props);
3786 ret->model = g_new0(CpuModelInfo, 1);
3787 ret->model->name = g_strdup(base_name);
3788 ret->model->props = QOBJECT(props);
3789 ret->model->has_props = true;
3792 object_unref(OBJECT(xc));
3794 error_propagate(errp, err);
3795 qapi_free_CpuModelExpansionInfo(ret);
3801 static gchar *x86_gdb_arch_name(CPUState *cs)
3803 #ifdef TARGET_X86_64
3804 return g_strdup("i386:x86-64");
3806 return g_strdup("i386");
3810 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
3812 X86CPUDefinition *cpudef = data;
3813 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3815 xcc->cpu_def = cpudef;
3816 xcc->migration_safe = true;
3819 static void x86_register_cpudef_type(X86CPUDefinition *def)
3821 char *typename = x86_cpu_type_name(def->name);
3824 .parent = TYPE_X86_CPU,
3825 .class_init = x86_cpu_cpudef_class_init,
3829 /* AMD aliases are handled at runtime based on CPUID vendor, so
3830 * they shouldn't be set on the CPU model table.
3832 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
3833 /* catch mistakes instead of silently truncating model_id when too long */
3834 assert(def->model_id && strlen(def->model_id) <= 48);
3841 #if !defined(CONFIG_USER_ONLY)
3843 void cpu_clear_apic_feature(CPUX86State *env)
3845 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
3848 #endif /* !CONFIG_USER_ONLY */
3850 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
3851 uint32_t *eax, uint32_t *ebx,
3852 uint32_t *ecx, uint32_t *edx)
3854 X86CPU *cpu = x86_env_get_cpu(env);
3855 CPUState *cs = CPU(cpu);
3856 uint32_t pkg_offset;
3858 uint32_t signature[3];
3860 /* Calculate & apply limits for different index ranges */
3861 if (index >= 0xC0000000) {
3862 limit = env->cpuid_xlevel2;
3863 } else if (index >= 0x80000000) {
3864 limit = env->cpuid_xlevel;
3865 } else if (index >= 0x40000000) {
3868 limit = env->cpuid_level;
3871 if (index > limit) {
3872 /* Intel documentation states that invalid EAX input will
3873 * return the same information as EAX=cpuid_level
3874 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
3876 index = env->cpuid_level;
3881 *eax = env->cpuid_level;
3882 *ebx = env->cpuid_vendor1;
3883 *edx = env->cpuid_vendor2;
3884 *ecx = env->cpuid_vendor3;
3887 *eax = env->cpuid_version;
3888 *ebx = (cpu->apic_id << 24) |
3889 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
3890 *ecx = env->features[FEAT_1_ECX];
3891 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
3892 *ecx |= CPUID_EXT_OSXSAVE;
3894 *edx = env->features[FEAT_1_EDX];
3895 if (cs->nr_cores * cs->nr_threads > 1) {
3896 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
3901 /* cache info: needed for Pentium Pro compatibility */
3902 if (cpu->cache_info_passthrough) {
3903 host_cpuid(index, 0, eax, ebx, ecx, edx);
3906 *eax = 1; /* Number of CPUID[EAX=2] calls required */
3908 if (!cpu->enable_l3_cache) {
3911 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
3913 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
3914 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
3915 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
3918 /* cache info: needed for Core compatibility */
3919 if (cpu->cache_info_passthrough) {
3920 host_cpuid(index, count, eax, ebx, ecx, edx);
3921 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
3922 *eax &= ~0xFC000000;
3923 if ((*eax & 31) && cs->nr_cores > 1) {
3924 *eax |= (cs->nr_cores - 1) << 26;
3929 case 0: /* L1 dcache info */
3930 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
3932 eax, ebx, ecx, edx);
3934 case 1: /* L1 icache info */
3935 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
3937 eax, ebx, ecx, edx);
3939 case 2: /* L2 cache info */
3940 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
3941 cs->nr_threads, cs->nr_cores,
3942 eax, ebx, ecx, edx);
3944 case 3: /* L3 cache info */
3945 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3946 if (cpu->enable_l3_cache) {
3947 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
3948 (1 << pkg_offset), cs->nr_cores,
3949 eax, ebx, ecx, edx);
3953 default: /* end of info */
3954 *eax = *ebx = *ecx = *edx = 0;
3960 /* MONITOR/MWAIT Leaf */
3961 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
3962 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
3963 *ecx = cpu->mwait.ecx; /* flags */
3964 *edx = cpu->mwait.edx; /* mwait substates */
3967 /* Thermal and Power Leaf */
3968 *eax = env->features[FEAT_6_EAX];
3974 /* Structured Extended Feature Flags Enumeration Leaf */
3976 *eax = 0; /* Maximum ECX value for sub-leaves */
3977 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
3978 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
3979 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
3980 *ecx |= CPUID_7_0_ECX_OSPKE;
3982 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
3991 /* Direct Cache Access Information Leaf */
3992 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
3998 /* Architectural Performance Monitoring Leaf */
3999 if (kvm_enabled() && cpu->enable_pmu) {
4000 KVMState *s = cs->kvm_state;
4002 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
4003 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
4004 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
4005 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
4006 } else if (hvf_enabled() && cpu->enable_pmu) {
4007 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
4008 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
4009 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
4010 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
4019 /* Extended Topology Enumeration Leaf */
4020 if (!cpu->enable_cpuid_0xb) {
4021 *eax = *ebx = *ecx = *edx = 0;
4025 *ecx = count & 0xff;
4026 *edx = cpu->apic_id;
4030 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
4031 *ebx = cs->nr_threads;
4032 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
4035 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
4036 *ebx = cs->nr_cores * cs->nr_threads;
4037 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
4042 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
4045 assert(!(*eax & ~0x1f));
4046 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
4049 /* Processor Extended State */
4054 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4059 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
4060 *eax = env->features[FEAT_XSAVE_COMP_LO];
4061 *edx = env->features[FEAT_XSAVE_COMP_HI];
4063 } else if (count == 1) {
4064 *eax = env->features[FEAT_XSAVE];
4065 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
4066 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
4067 const ExtSaveArea *esa = &x86_ext_save_areas[count];
4075 /* Intel Processor Trace Enumeration */
4080 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
4086 *eax = INTEL_PT_MAX_SUBLEAF;
4087 *ebx = INTEL_PT_MINIMAL_EBX;
4088 *ecx = INTEL_PT_MINIMAL_ECX;
4089 } else if (count == 1) {
4090 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
4091 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
4097 * CPUID code in kvm_arch_init_vcpu() ignores stuff
4098 * set here, but we restrict to TCG none the less.
4100 if (tcg_enabled() && cpu->expose_tcg) {
4101 memcpy(signature, "TCGTCGTCGTCG", 12);
4103 *ebx = signature[0];
4104 *ecx = signature[1];
4105 *edx = signature[2];
4120 *eax = env->cpuid_xlevel;
4121 *ebx = env->cpuid_vendor1;
4122 *edx = env->cpuid_vendor2;
4123 *ecx = env->cpuid_vendor3;
4126 *eax = env->cpuid_version;
4128 *ecx = env->features[FEAT_8000_0001_ECX];
4129 *edx = env->features[FEAT_8000_0001_EDX];
4131 /* The Linux kernel checks for the CMPLegacy bit and
4132 * discards multiple thread information if it is set.
4133 * So don't set it here for Intel to make Linux guests happy.
4135 if (cs->nr_cores * cs->nr_threads > 1) {
4136 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
4137 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
4138 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
4139 *ecx |= 1 << 1; /* CmpLegacy bit */
4146 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
4147 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
4148 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
4149 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
4152 /* cache info (L1 cache) */
4153 if (cpu->cache_info_passthrough) {
4154 host_cpuid(index, 0, eax, ebx, ecx, edx);
4157 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
4158 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
4159 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
4160 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
4161 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
4162 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
4165 /* cache info (L2 cache) */
4166 if (cpu->cache_info_passthrough) {
4167 host_cpuid(index, 0, eax, ebx, ecx, edx);
4170 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
4171 (L2_DTLB_2M_ENTRIES << 16) | \
4172 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
4173 (L2_ITLB_2M_ENTRIES);
4174 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
4175 (L2_DTLB_4K_ENTRIES << 16) | \
4176 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
4177 (L2_ITLB_4K_ENTRIES);
4178 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
4179 cpu->enable_l3_cache ?
4180 env->cache_info_amd.l3_cache : NULL,
4187 *edx = env->features[FEAT_8000_0007_EDX];
4190 /* virtual & phys address size in low 2 bytes. */
4191 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4192 /* 64 bit processor */
4193 *eax = cpu->phys_bits; /* configurable physical bits */
4194 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
4195 *eax |= 0x00003900; /* 57 bits virtual */
4197 *eax |= 0x00003000; /* 48 bits virtual */
4200 *eax = cpu->phys_bits;
4202 *ebx = env->features[FEAT_8000_0008_EBX];
4205 if (cs->nr_cores * cs->nr_threads > 1) {
4206 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
4210 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4211 *eax = 0x00000001; /* SVM Revision */
4212 *ebx = 0x00000010; /* nr of ASIDs */
4214 *edx = env->features[FEAT_SVM]; /* optional features */
4225 case 0: /* L1 dcache info */
4226 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
4227 eax, ebx, ecx, edx);
4229 case 1: /* L1 icache info */
4230 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
4231 eax, ebx, ecx, edx);
4233 case 2: /* L2 cache info */
4234 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
4235 eax, ebx, ecx, edx);
4237 case 3: /* L3 cache info */
4238 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
4239 eax, ebx, ecx, edx);
4241 default: /* end of info */
4242 *eax = *ebx = *ecx = *edx = 0;
4247 assert(cpu->core_id <= 255);
4248 encode_topo_cpuid8000001e(cs, cpu,
4249 eax, ebx, ecx, edx);
4252 *eax = env->cpuid_xlevel2;
4258 /* Support for VIA CPU's CPUID instruction */
4259 *eax = env->cpuid_version;
4262 *edx = env->features[FEAT_C000_0001_EDX];
4267 /* Reserved for the future, and now filled with zero */
4274 *eax = sev_enabled() ? 0x2 : 0;
4275 *ebx = sev_get_cbit_position();
4276 *ebx |= sev_get_reduced_phys_bits() << 6;
4281 /* reserved values: zero */
4290 /* CPUClass::reset() */
4291 static void x86_cpu_reset(CPUState *s)
4293 X86CPU *cpu = X86_CPU(s);
4294 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4295 CPUX86State *env = &cpu->env;
4300 xcc->parent_reset(s);
4302 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4304 env->old_exception = -1;
4306 /* init to reset state */
4308 env->hflags2 |= HF2_GIF_MASK;
4310 cpu_x86_update_cr0(env, 0x60000010);
4311 env->a20_mask = ~0x0;
4312 env->smbase = 0x30000;
4313 env->msr_smi_count = 0;
4315 env->idt.limit = 0xffff;
4316 env->gdt.limit = 0xffff;
4317 env->ldt.limit = 0xffff;
4318 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4319 env->tr.limit = 0xffff;
4320 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4322 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4323 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4324 DESC_R_MASK | DESC_A_MASK);
4325 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4326 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4328 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4329 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4331 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4332 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4334 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4335 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4337 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4338 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4342 env->regs[R_EDX] = env->cpuid_version;
4347 for (i = 0; i < 8; i++) {
4350 cpu_set_fpuc(env, 0x37f);
4352 env->mxcsr = 0x1f80;
4353 /* All units are in INIT state. */
4356 env->pat = 0x0007040600070406ULL;
4357 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4359 memset(env->dr, 0, sizeof(env->dr));
4360 env->dr[6] = DR6_FIXED_1;
4361 env->dr[7] = DR7_FIXED_1;
4362 cpu_breakpoint_remove_all(s, BP_CPU);
4363 cpu_watchpoint_remove_all(s, BP_CPU);
4366 xcr0 = XSTATE_FP_MASK;
4368 #ifdef CONFIG_USER_ONLY
4369 /* Enable all the features for user-mode. */
4370 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4371 xcr0 |= XSTATE_SSE_MASK;
4373 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4374 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4375 if (env->features[esa->feature] & esa->bits) {
4380 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
4381 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
4383 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
4384 cr4 |= CR4_FSGSBASE_MASK;
4389 cpu_x86_update_cr4(env, cr4);
4392 * SDM 11.11.5 requires:
4393 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4394 * - IA32_MTRR_PHYSMASKn.V = 0
4395 * All other bits are undefined. For simplification, zero it all.
4397 env->mtrr_deftype = 0;
4398 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
4399 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
4401 env->interrupt_injected = -1;
4402 env->exception_injected = -1;
4403 env->nmi_injected = false;
4404 #if !defined(CONFIG_USER_ONLY)
4405 /* We hard-wire the BSP to the first CPU. */
4406 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
4408 s->halted = !cpu_is_bsp(cpu);
4410 if (kvm_enabled()) {
4411 kvm_arch_reset_vcpu(cpu);
4413 else if (hvf_enabled()) {
4419 #ifndef CONFIG_USER_ONLY
4420 bool cpu_is_bsp(X86CPU *cpu)
4422 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
4425 /* TODO: remove me, when reset over QOM tree is implemented */
4426 static void x86_cpu_machine_reset_cb(void *opaque)
4428 X86CPU *cpu = opaque;
4429 cpu_reset(CPU(cpu));
4433 static void mce_init(X86CPU *cpu)
4435 CPUX86State *cenv = &cpu->env;
4438 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
4439 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
4440 (CPUID_MCE | CPUID_MCA)) {
4441 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
4442 (cpu->enable_lmce ? MCG_LMCE_P : 0);
4443 cenv->mcg_ctl = ~(uint64_t)0;
4444 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
4445 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
4450 #ifndef CONFIG_USER_ONLY
4451 APICCommonClass *apic_get_class(void)
4453 const char *apic_type = "apic";
4455 /* TODO: in-kernel irqchip for hvf */
4456 if (kvm_apic_in_kernel()) {
4457 apic_type = "kvm-apic";
4458 } else if (xen_enabled()) {
4459 apic_type = "xen-apic";
4462 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
4465 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
4467 APICCommonState *apic;
4468 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
4470 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
4472 object_property_add_child(OBJECT(cpu), "lapic",
4473 OBJECT(cpu->apic_state), &error_abort);
4474 object_unref(OBJECT(cpu->apic_state));
4476 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
4477 /* TODO: convert to link<> */
4478 apic = APIC_COMMON(cpu->apic_state);
4480 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
4483 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4485 APICCommonState *apic;
4486 static bool apic_mmio_map_once;
4488 if (cpu->apic_state == NULL) {
4491 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
4494 /* Map APIC MMIO area */
4495 apic = APIC_COMMON(cpu->apic_state);
4496 if (!apic_mmio_map_once) {
4497 memory_region_add_subregion_overlap(get_system_memory(),
4499 MSR_IA32_APICBASE_BASE,
4502 apic_mmio_map_once = true;
4506 static void x86_cpu_machine_done(Notifier *n, void *unused)
4508 X86CPU *cpu = container_of(n, X86CPU, machine_done);
4509 MemoryRegion *smram =
4510 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
4513 cpu->smram = g_new(MemoryRegion, 1);
4514 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
4515 smram, 0, 1ull << 32);
4516 memory_region_set_enabled(cpu->smram, true);
4517 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
4521 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4526 /* Note: Only safe for use on x86(-64) hosts */
4527 static uint32_t x86_host_phys_bits(void)
4530 uint32_t host_phys_bits;
4532 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
4533 if (eax >= 0x80000008) {
4534 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
4535 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4536 * at 23:16 that can specify a maximum physical address bits for
4537 * the guest that can override this value; but I've not seen
4538 * anything with that set.
4540 host_phys_bits = eax & 0xff;
4542 /* It's an odd 64 bit machine that doesn't have the leaf for
4543 * physical address bits; fall back to 36 that's most older
4546 host_phys_bits = 36;
4549 return host_phys_bits;
4552 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
4559 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4560 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
4562 CPUX86State *env = &cpu->env;
4563 FeatureWordInfo *fi = &feature_word_info[w];
4564 uint32_t eax = fi->cpuid_eax;
4565 uint32_t region = eax & 0xF0000000;
4567 if (!env->features[w]) {
4573 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
4576 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
4579 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
4584 /* Calculate XSAVE components based on the configured CPU feature flags */
4585 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
4587 CPUX86State *env = &cpu->env;
4591 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4596 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4597 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4598 if (env->features[esa->feature] & esa->bits) {
4599 mask |= (1ULL << i);
4603 env->features[FEAT_XSAVE_COMP_LO] = mask;
4604 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
4607 /***** Steps involved on loading and filtering CPUID data
4609 * When initializing and realizing a CPU object, the steps
4610 * involved in setting up CPUID data are:
4612 * 1) Loading CPU model definition (X86CPUDefinition). This is
4613 * implemented by x86_cpu_load_def() and should be completely
4614 * transparent, as it is done automatically by instance_init.
4615 * No code should need to look at X86CPUDefinition structs
4616 * outside instance_init.
4618 * 2) CPU expansion. This is done by realize before CPUID
4619 * filtering, and will make sure host/accelerator data is
4620 * loaded for CPU models that depend on host capabilities
4621 * (e.g. "host"). Done by x86_cpu_expand_features().
4623 * 3) CPUID filtering. This initializes extra data related to
4624 * CPUID, and checks if the host supports all capabilities
4625 * required by the CPU. Runnability of a CPU model is
4626 * determined at this step. Done by x86_cpu_filter_features().
4628 * Some operations don't require all steps to be performed.
4631 * - CPU instance creation (instance_init) will run only CPU
4632 * model loading. CPU expansion can't run at instance_init-time
4633 * because host/accelerator data may be not available yet.
4634 * - CPU realization will perform both CPU model expansion and CPUID
4635 * filtering, and return an error in case one of them fails.
4636 * - query-cpu-definitions needs to run all 3 steps. It needs
4637 * to run CPUID filtering, as the 'unavailable-features'
4638 * field is set based on the filtering results.
4639 * - The query-cpu-model-expansion QMP command only needs to run
4640 * CPU model loading and CPU expansion. It should not filter
4641 * any CPUID data based on host capabilities.
4644 /* Expand CPU configuration data, based on configured features
4645 * and host/accelerator capabilities when appropriate.
4647 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
4649 CPUX86State *env = &cpu->env;
4652 Error *local_err = NULL;
4654 /*TODO: Now cpu->max_features doesn't overwrite features
4655 * set using QOM properties, and we can convert
4656 * plus_features & minus_features to global properties
4657 * inside x86_cpu_parse_featurestr() too.
4659 if (cpu->max_features) {
4660 for (w = 0; w < FEATURE_WORDS; w++) {
4661 /* Override only features that weren't set explicitly
4665 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
4666 ~env->user_features[w] & \
4667 ~feature_word_info[w].no_autoenable_flags;
4671 for (l = plus_features; l; l = l->next) {
4672 const char *prop = l->data;
4673 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4679 for (l = minus_features; l; l = l->next) {
4680 const char *prop = l->data;
4681 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
4687 if (!kvm_enabled() || !cpu->expose_kvm) {
4688 env->features[FEAT_KVM] = 0;
4691 x86_cpu_enable_xsave_components(cpu);
4693 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
4694 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
4695 if (cpu->full_cpuid_auto_level) {
4696 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
4697 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
4698 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
4699 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
4700 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
4701 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
4702 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
4703 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
4704 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
4705 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
4706 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
4707 /* SVM requires CPUID[0x8000000A] */
4708 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4709 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
4712 /* SEV requires CPUID[0x8000001F] */
4713 if (sev_enabled()) {
4714 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
4718 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
4719 if (env->cpuid_level == UINT32_MAX) {
4720 env->cpuid_level = env->cpuid_min_level;
4722 if (env->cpuid_xlevel == UINT32_MAX) {
4723 env->cpuid_xlevel = env->cpuid_min_xlevel;
4725 if (env->cpuid_xlevel2 == UINT32_MAX) {
4726 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
4730 if (local_err != NULL) {
4731 error_propagate(errp, local_err);
4736 * Finishes initialization of CPUID data, filters CPU feature
4737 * words based on host availability of each feature.
4739 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
4741 static int x86_cpu_filter_features(X86CPU *cpu)
4743 CPUX86State *env = &cpu->env;
4747 for (w = 0; w < FEATURE_WORDS; w++) {
4748 uint32_t host_feat =
4749 x86_cpu_get_supported_feature_word(w, false);
4750 uint32_t requested_features = env->features[w];
4751 env->features[w] &= host_feat;
4752 cpu->filtered_features[w] = requested_features & ~env->features[w];
4753 if (cpu->filtered_features[w]) {
4758 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
4760 KVMState *s = CPU(cpu)->kvm_state;
4761 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
4762 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
4763 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
4764 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
4765 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
4768 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
4769 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
4770 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
4771 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
4772 INTEL_PT_ADDR_RANGES_NUM) ||
4773 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
4774 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
4775 (ecx_0 & INTEL_PT_IP_LIP)) {
4777 * Processor Trace capabilities aren't configurable, so if the
4778 * host can't emulate the capabilities we report on
4779 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
4781 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
4782 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
4790 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
4791 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
4792 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
4793 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
4794 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
4795 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
4796 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
4798 CPUState *cs = CPU(dev);
4799 X86CPU *cpu = X86_CPU(dev);
4800 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4801 CPUX86State *env = &cpu->env;
4802 Error *local_err = NULL;
4803 static bool ht_warned;
4805 if (xcc->host_cpuid_required) {
4806 if (!accel_uses_host_cpuid()) {
4807 char *name = x86_cpu_class_get_model_name(xcc);
4808 error_setg(&local_err, "CPU model '%s' requires KVM", name);
4813 if (enable_cpu_pm) {
4814 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
4815 &cpu->mwait.ecx, &cpu->mwait.edx);
4816 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
4820 /* mwait extended info: needed for Core compatibility */
4821 /* We always wake on interrupt even if host does not have the capability */
4822 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
4824 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
4825 error_setg(errp, "apic-id property was not initialized properly");
4829 x86_cpu_expand_features(cpu, &local_err);
4834 if (x86_cpu_filter_features(cpu) &&
4835 (cpu->check_cpuid || cpu->enforce_cpuid)) {
4836 x86_cpu_report_filtered_features(cpu);
4837 if (cpu->enforce_cpuid) {
4838 error_setg(&local_err,
4839 accel_uses_host_cpuid() ?
4840 "Host doesn't support requested features" :
4841 "TCG doesn't support requested features");
4846 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
4849 if (IS_AMD_CPU(env)) {
4850 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
4851 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
4852 & CPUID_EXT2_AMD_ALIASES);
4855 /* For 64bit systems think about the number of physical bits to present.
4856 * ideally this should be the same as the host; anything other than matching
4857 * the host can cause incorrect guest behaviour.
4858 * QEMU used to pick the magic value of 40 bits that corresponds to
4859 * consumer AMD devices but nothing else.
4861 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4862 if (accel_uses_host_cpuid()) {
4863 uint32_t host_phys_bits = x86_host_phys_bits();
4866 if (cpu->host_phys_bits) {
4867 /* The user asked for us to use the host physical bits */
4868 cpu->phys_bits = host_phys_bits;
4871 /* Print a warning if the user set it to a value that's not the
4874 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
4876 warn_report("Host physical bits (%u)"
4877 " does not match phys-bits property (%u)",
4878 host_phys_bits, cpu->phys_bits);
4882 if (cpu->phys_bits &&
4883 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
4884 cpu->phys_bits < 32)) {
4885 error_setg(errp, "phys-bits should be between 32 and %u "
4887 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
4891 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
4892 error_setg(errp, "TCG only supports phys-bits=%u",
4893 TCG_PHYS_ADDR_BITS);
4897 /* 0 means it was not explicitly set by the user (or by machine
4898 * compat_props or by the host code above). In this case, the default
4899 * is the value used by TCG (40).
4901 if (cpu->phys_bits == 0) {
4902 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
4905 /* For 32 bit systems don't use the user set value, but keep
4906 * phys_bits consistent with what we tell the guest.
4908 if (cpu->phys_bits != 0) {
4909 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
4913 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
4914 cpu->phys_bits = 36;
4916 cpu->phys_bits = 32;
4920 /* Cache information initialization */
4921 if (!cpu->legacy_cache) {
4922 if (!xcc->cpu_def || !xcc->cpu_def->cache_info) {
4923 char *name = x86_cpu_class_get_model_name(xcc);
4925 "CPU model '%s' doesn't support legacy-cache=off", name);
4929 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
4930 *xcc->cpu_def->cache_info;
4932 /* Build legacy cache information */
4933 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
4934 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
4935 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
4936 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
4938 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
4939 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
4940 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
4941 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
4943 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
4944 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
4945 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
4946 env->cache_info_amd.l3_cache = &legacy_l3_cache;
4950 cpu_exec_realizefn(cs, &local_err);
4951 if (local_err != NULL) {
4952 error_propagate(errp, local_err);
4956 #ifndef CONFIG_USER_ONLY
4957 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
4959 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
4960 x86_cpu_apic_create(cpu, &local_err);
4961 if (local_err != NULL) {
4969 #ifndef CONFIG_USER_ONLY
4970 if (tcg_enabled()) {
4971 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
4972 cpu->cpu_as_root = g_new(MemoryRegion, 1);
4974 /* Outer container... */
4975 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
4976 memory_region_set_enabled(cpu->cpu_as_root, true);
4978 /* ... with two regions inside: normal system memory with low
4981 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
4982 get_system_memory(), 0, ~0ull);
4983 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
4984 memory_region_set_enabled(cpu->cpu_as_mem, true);
4987 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
4988 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
4990 /* ... SMRAM with higher priority, linked from /machine/smram. */
4991 cpu->machine_done.notify = x86_cpu_machine_done;
4992 qemu_add_machine_init_done_notifier(&cpu->machine_done);
4999 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
5000 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
5001 * based on inputs (sockets,cores,threads), it is still better to give
5004 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
5005 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
5007 if (IS_AMD_CPU(env) &&
5008 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
5009 cs->nr_threads > 1 && !ht_warned) {
5010 error_report("This family of AMD CPU doesn't support "
5011 "hyperthreading(%d). Please configure -smp "
5012 "options properly or try enabling topoext feature.",
5017 x86_cpu_apic_realize(cpu, &local_err);
5018 if (local_err != NULL) {
5023 xcc->parent_realize(dev, &local_err);
5026 if (local_err != NULL) {
5027 error_propagate(errp, local_err);
5032 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
5034 X86CPU *cpu = X86_CPU(dev);
5035 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5036 Error *local_err = NULL;
5038 #ifndef CONFIG_USER_ONLY
5039 cpu_remove_sync(CPU(dev));
5040 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
5043 if (cpu->apic_state) {
5044 object_unparent(OBJECT(cpu->apic_state));
5045 cpu->apic_state = NULL;
5048 xcc->parent_unrealize(dev, &local_err);
5049 if (local_err != NULL) {
5050 error_propagate(errp, local_err);
5055 typedef struct BitProperty {
5060 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
5061 void *opaque, Error **errp)
5063 X86CPU *cpu = X86_CPU(obj);
5064 BitProperty *fp = opaque;
5065 uint32_t f = cpu->env.features[fp->w];
5066 bool value = (f & fp->mask) == fp->mask;
5067 visit_type_bool(v, name, &value, errp);
5070 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
5071 void *opaque, Error **errp)
5073 DeviceState *dev = DEVICE(obj);
5074 X86CPU *cpu = X86_CPU(obj);
5075 BitProperty *fp = opaque;
5076 Error *local_err = NULL;
5079 if (dev->realized) {
5080 qdev_prop_set_after_realize(dev, name, errp);
5084 visit_type_bool(v, name, &value, &local_err);
5086 error_propagate(errp, local_err);
5091 cpu->env.features[fp->w] |= fp->mask;
5093 cpu->env.features[fp->w] &= ~fp->mask;
5095 cpu->env.user_features[fp->w] |= fp->mask;
5098 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
5101 BitProperty *prop = opaque;
5105 /* Register a boolean property to get/set a single bit in a uint32_t field.
5107 * The same property name can be registered multiple times to make it affect
5108 * multiple bits in the same FeatureWord. In that case, the getter will return
5109 * true only if all bits are set.
5111 static void x86_cpu_register_bit_prop(X86CPU *cpu,
5112 const char *prop_name,
5118 uint32_t mask = (1UL << bitnr);
5120 op = object_property_find(OBJECT(cpu), prop_name, NULL);
5126 fp = g_new0(BitProperty, 1);
5129 object_property_add(OBJECT(cpu), prop_name, "bool",
5130 x86_cpu_get_bit_prop,
5131 x86_cpu_set_bit_prop,
5132 x86_cpu_release_bit_prop, fp, &error_abort);
5136 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
5140 FeatureWordInfo *fi = &feature_word_info[w];
5141 const char *name = fi->feat_names[bitnr];
5147 /* Property names should use "-" instead of "_".
5148 * Old names containing underscores are registered as aliases
5149 * using object_property_add_alias()
5151 assert(!strchr(name, '_'));
5152 /* aliases don't use "|" delimiters anymore, they are registered
5153 * manually using object_property_add_alias() */
5154 assert(!strchr(name, '|'));
5155 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
5158 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
5160 X86CPU *cpu = X86_CPU(cs);
5161 CPUX86State *env = &cpu->env;
5162 GuestPanicInformation *panic_info = NULL;
5164 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
5165 panic_info = g_malloc0(sizeof(GuestPanicInformation));
5167 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
5169 assert(HV_CRASH_PARAMS >= 5);
5170 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
5171 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
5172 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
5173 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
5174 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
5179 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
5180 const char *name, void *opaque,
5183 CPUState *cs = CPU(obj);
5184 GuestPanicInformation *panic_info;
5186 if (!cs->crash_occurred) {
5187 error_setg(errp, "No crash occured");
5191 panic_info = x86_cpu_get_crash_info(cs);
5192 if (panic_info == NULL) {
5193 error_setg(errp, "No crash information");
5197 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
5199 qapi_free_GuestPanicInformation(panic_info);
5202 static void x86_cpu_initfn(Object *obj)
5204 CPUState *cs = CPU(obj);
5205 X86CPU *cpu = X86_CPU(obj);
5206 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
5207 CPUX86State *env = &cpu->env;
5212 object_property_add(obj, "family", "int",
5213 x86_cpuid_version_get_family,
5214 x86_cpuid_version_set_family, NULL, NULL, NULL);
5215 object_property_add(obj, "model", "int",
5216 x86_cpuid_version_get_model,
5217 x86_cpuid_version_set_model, NULL, NULL, NULL);
5218 object_property_add(obj, "stepping", "int",
5219 x86_cpuid_version_get_stepping,
5220 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
5221 object_property_add_str(obj, "vendor",
5222 x86_cpuid_get_vendor,
5223 x86_cpuid_set_vendor, NULL);
5224 object_property_add_str(obj, "model-id",
5225 x86_cpuid_get_model_id,
5226 x86_cpuid_set_model_id, NULL);
5227 object_property_add(obj, "tsc-frequency", "int",
5228 x86_cpuid_get_tsc_freq,
5229 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
5230 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
5231 x86_cpu_get_feature_words,
5232 NULL, NULL, (void *)env->features, NULL);
5233 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
5234 x86_cpu_get_feature_words,
5235 NULL, NULL, (void *)cpu->filtered_features, NULL);
5237 object_property_add(obj, "crash-information", "GuestPanicInformation",
5238 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
5240 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
5242 for (w = 0; w < FEATURE_WORDS; w++) {
5245 for (bitnr = 0; bitnr < 32; bitnr++) {
5246 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
5250 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
5251 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
5252 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
5253 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
5254 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
5255 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
5256 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
5258 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
5259 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
5260 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
5261 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5262 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5263 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5264 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5265 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5266 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5267 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5268 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5269 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5270 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5271 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5272 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5273 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5274 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5275 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5276 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5277 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5278 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5281 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
5285 static int64_t x86_cpu_get_arch_id(CPUState *cs)
5287 X86CPU *cpu = X86_CPU(cs);
5289 return cpu->apic_id;
5292 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
5294 X86CPU *cpu = X86_CPU(cs);
5296 return cpu->env.cr[0] & CR0_PG_MASK;
5299 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5301 X86CPU *cpu = X86_CPU(cs);
5303 cpu->env.eip = value;
5306 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5308 X86CPU *cpu = X86_CPU(cs);
5310 cpu->env.eip = tb->pc - tb->cs_base;
5313 static bool x86_cpu_has_work(CPUState *cs)
5315 X86CPU *cpu = X86_CPU(cs);
5316 CPUX86State *env = &cpu->env;
5318 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
5319 CPU_INTERRUPT_POLL)) &&
5320 (env->eflags & IF_MASK)) ||
5321 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
5322 CPU_INTERRUPT_INIT |
5323 CPU_INTERRUPT_SIPI |
5324 CPU_INTERRUPT_MCE)) ||
5325 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
5326 !(env->hflags & HF_SMM_MASK));
5329 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
5331 X86CPU *cpu = X86_CPU(cs);
5332 CPUX86State *env = &cpu->env;
5334 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
5335 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
5336 : bfd_mach_i386_i8086);
5337 info->print_insn = print_insn_i386;
5339 info->cap_arch = CS_ARCH_X86;
5340 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
5341 : env->hflags & HF_CS32_MASK ? CS_MODE_32
5343 info->cap_insn_unit = 1;
5344 info->cap_insn_split = 8;
5347 void x86_update_hflags(CPUX86State *env)
5350 #define HFLAG_COPY_MASK \
5351 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5352 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5353 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5354 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5356 hflags = env->hflags & HFLAG_COPY_MASK;
5357 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
5358 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
5359 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
5360 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
5361 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
5363 if (env->cr[4] & CR4_OSFXSR_MASK) {
5364 hflags |= HF_OSFXSR_MASK;
5367 if (env->efer & MSR_EFER_LMA) {
5368 hflags |= HF_LMA_MASK;
5371 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
5372 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
5374 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
5375 (DESC_B_SHIFT - HF_CS32_SHIFT);
5376 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
5377 (DESC_B_SHIFT - HF_SS32_SHIFT);
5378 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
5379 !(hflags & HF_CS32_MASK)) {
5380 hflags |= HF_ADDSEG_MASK;
5382 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
5383 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
5386 env->hflags = hflags;
5389 static Property x86_cpu_properties[] = {
5390 #ifdef CONFIG_USER_ONLY
5391 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5392 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
5393 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
5394 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
5395 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
5397 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
5398 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
5399 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
5400 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
5402 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
5403 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
5404 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
5405 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
5406 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
5407 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
5408 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
5409 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
5410 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
5411 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
5412 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
5413 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
5414 DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false),
5415 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false),
5416 DEFINE_PROP_BOOL("hv-tlbflush", X86CPU, hyperv_tlbflush, false),
5417 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
5418 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
5419 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
5420 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
5421 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
5422 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
5423 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
5424 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
5425 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
5426 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
5427 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
5428 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
5429 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
5430 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
5431 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
5432 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
5433 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
5434 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
5436 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
5437 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
5439 * lecacy_cache defaults to true unless the CPU model provides its
5440 * own cache information (see x86_cpu_load_def()).
5442 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
5445 * From "Requirements for Implementing the Microsoft
5446 * Hypervisor Interface":
5447 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5449 * "Starting with Windows Server 2012 and Windows 8, if
5450 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5451 * the hypervisor imposes no specific limit to the number of VPs.
5452 * In this case, Windows Server 2012 guest VMs may use more than
5453 * 64 VPs, up to the maximum supported number of processors applicable
5454 * to the specific Windows version being used."
5456 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
5457 DEFINE_PROP_END_OF_LIST()
5460 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
5462 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5463 CPUClass *cc = CPU_CLASS(oc);
5464 DeviceClass *dc = DEVICE_CLASS(oc);
5466 device_class_set_parent_realize(dc, x86_cpu_realizefn,
5467 &xcc->parent_realize);
5468 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
5469 &xcc->parent_unrealize);
5470 dc->props = x86_cpu_properties;
5472 xcc->parent_reset = cc->reset;
5473 cc->reset = x86_cpu_reset;
5474 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
5476 cc->class_by_name = x86_cpu_class_by_name;
5477 cc->parse_features = x86_cpu_parse_featurestr;
5478 cc->has_work = x86_cpu_has_work;
5480 cc->do_interrupt = x86_cpu_do_interrupt;
5481 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
5483 cc->dump_state = x86_cpu_dump_state;
5484 cc->get_crash_info = x86_cpu_get_crash_info;
5485 cc->set_pc = x86_cpu_set_pc;
5486 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
5487 cc->gdb_read_register = x86_cpu_gdb_read_register;
5488 cc->gdb_write_register = x86_cpu_gdb_write_register;
5489 cc->get_arch_id = x86_cpu_get_arch_id;
5490 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
5491 #ifdef CONFIG_USER_ONLY
5492 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
5494 cc->asidx_from_attrs = x86_asidx_from_attrs;
5495 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
5496 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
5497 cc->write_elf64_note = x86_cpu_write_elf64_note;
5498 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
5499 cc->write_elf32_note = x86_cpu_write_elf32_note;
5500 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
5501 cc->vmsd = &vmstate_x86_cpu;
5503 cc->gdb_arch_name = x86_gdb_arch_name;
5504 #ifdef TARGET_X86_64
5505 cc->gdb_core_xml_file = "i386-64bit.xml";
5506 cc->gdb_num_core_regs = 57;
5508 cc->gdb_core_xml_file = "i386-32bit.xml";
5509 cc->gdb_num_core_regs = 41;
5511 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
5512 cc->debug_excp_handler = breakpoint_handler;
5514 cc->cpu_exec_enter = x86_cpu_exec_enter;
5515 cc->cpu_exec_exit = x86_cpu_exec_exit;
5517 cc->tcg_initialize = tcg_x86_init;
5519 cc->disas_set_info = x86_disas_set_info;
5521 dc->user_creatable = true;
5524 static const TypeInfo x86_cpu_type_info = {
5525 .name = TYPE_X86_CPU,
5527 .instance_size = sizeof(X86CPU),
5528 .instance_init = x86_cpu_initfn,
5530 .class_size = sizeof(X86CPUClass),
5531 .class_init = x86_cpu_common_class_init,
5535 /* "base" CPU model, used by query-cpu-model-expansion */
5536 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
5538 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5540 xcc->static_model = true;
5541 xcc->migration_safe = true;
5542 xcc->model_description = "base CPU model type with no features enabled";
5546 static const TypeInfo x86_base_cpu_type_info = {
5547 .name = X86_CPU_TYPE_NAME("base"),
5548 .parent = TYPE_X86_CPU,
5549 .class_init = x86_cpu_base_class_init,
5552 static void x86_cpu_register_types(void)
5556 type_register_static(&x86_cpu_type_info);
5557 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
5558 x86_register_cpudef_type(&builtin_x86_defs[i]);
5560 type_register_static(&max_x86_cpu_type_info);
5561 type_register_static(&x86_base_cpu_type_info);
5562 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
5563 type_register_static(&host_x86_cpu_type_info);
5567 type_init(x86_cpu_register_types)