2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/bitops.h"
25 #include "exec/exec-all.h"
26 #include "sysemu/kvm.h"
27 #include "sysemu/hvf.h"
28 #include "sysemu/cpus.h"
32 #include "qemu/error-report.h"
33 #include "qemu/option.h"
34 #include "qemu/config-file.h"
35 #include "qapi/error.h"
36 #include "qapi/qapi-visit-misc.h"
37 #include "qapi/qapi-visit-run-state.h"
38 #include "qapi/qmp/qdict.h"
39 #include "qapi/qmp/qerror.h"
40 #include "qapi/visitor.h"
41 #include "qom/qom-qobject.h"
42 #include "sysemu/arch_init.h"
44 #include "standard-headers/asm-x86/kvm_para.h"
46 #include "sysemu/sysemu.h"
47 #include "hw/qdev-properties.h"
48 #include "hw/i386/topology.h"
49 #ifndef CONFIG_USER_ONLY
50 #include "exec/address-spaces.h"
52 #include "hw/xen/xen.h"
53 #include "hw/i386/apic_internal.h"
56 #include "disas/capstone.h"
58 /* Helpers for building CPUID[2] descriptors: */
60 struct CPUID2CacheDescriptorInfo {
69 #define MiB (1024 * 1024)
72 * Known CPUID 2 cache descriptors.
73 * From Intel SDM Volume 2A, CPUID instruction
75 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
76 [0x06] = { .level = 1, .type = ICACHE, .size = 8 * KiB,
77 .associativity = 4, .line_size = 32, },
78 [0x08] = { .level = 1, .type = ICACHE, .size = 16 * KiB,
79 .associativity = 4, .line_size = 32, },
80 [0x09] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
81 .associativity = 4, .line_size = 64, },
82 [0x0A] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
83 .associativity = 2, .line_size = 32, },
84 [0x0C] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
85 .associativity = 4, .line_size = 32, },
86 [0x0D] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
87 .associativity = 4, .line_size = 64, },
88 [0x0E] = { .level = 1, .type = DCACHE, .size = 24 * KiB,
89 .associativity = 6, .line_size = 64, },
90 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
91 .associativity = 2, .line_size = 64, },
92 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
93 .associativity = 8, .line_size = 64, },
94 /* lines per sector is not supported cpuid2_cache_descriptor(),
95 * so descriptors 0x22, 0x23 are not included
97 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
98 .associativity = 16, .line_size = 64, },
99 /* lines per sector is not supported cpuid2_cache_descriptor(),
100 * so descriptors 0x25, 0x20 are not included
102 [0x2C] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
103 .associativity = 8, .line_size = 64, },
104 [0x30] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
105 .associativity = 8, .line_size = 64, },
106 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
107 .associativity = 4, .line_size = 32, },
108 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
109 .associativity = 4, .line_size = 32, },
110 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
111 .associativity = 4, .line_size = 32, },
112 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
113 .associativity = 4, .line_size = 32, },
114 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
115 .associativity = 4, .line_size = 32, },
116 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
117 .associativity = 4, .line_size = 64, },
118 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
119 .associativity = 8, .line_size = 64, },
120 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
121 .associativity = 12, .line_size = 64, },
122 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
123 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
124 .associativity = 12, .line_size = 64, },
125 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
126 .associativity = 16, .line_size = 64, },
127 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
128 .associativity = 12, .line_size = 64, },
129 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
130 .associativity = 16, .line_size = 64, },
131 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
132 .associativity = 24, .line_size = 64, },
133 [0x60] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
134 .associativity = 8, .line_size = 64, },
135 [0x66] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
136 .associativity = 4, .line_size = 64, },
137 [0x67] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
138 .associativity = 4, .line_size = 64, },
139 [0x68] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
140 .associativity = 4, .line_size = 64, },
141 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
142 .associativity = 4, .line_size = 64, },
143 /* lines per sector is not supported cpuid2_cache_descriptor(),
144 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
146 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
147 .associativity = 8, .line_size = 64, },
148 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
149 .associativity = 2, .line_size = 64, },
150 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
151 .associativity = 8, .line_size = 64, },
152 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
153 .associativity = 8, .line_size = 32, },
154 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
155 .associativity = 8, .line_size = 32, },
156 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
157 .associativity = 8, .line_size = 32, },
158 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
159 .associativity = 8, .line_size = 32, },
160 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
161 .associativity = 4, .line_size = 64, },
162 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
163 .associativity = 8, .line_size = 64, },
164 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
165 .associativity = 4, .line_size = 64, },
166 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
167 .associativity = 4, .line_size = 64, },
168 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
169 .associativity = 4, .line_size = 64, },
170 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
171 .associativity = 8, .line_size = 64, },
172 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
173 .associativity = 8, .line_size = 64, },
174 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
175 .associativity = 8, .line_size = 64, },
176 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
177 .associativity = 12, .line_size = 64, },
178 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
179 .associativity = 12, .line_size = 64, },
180 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
181 .associativity = 12, .line_size = 64, },
182 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
183 .associativity = 16, .line_size = 64, },
184 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
185 .associativity = 16, .line_size = 64, },
186 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
187 .associativity = 16, .line_size = 64, },
188 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
189 .associativity = 24, .line_size = 64, },
190 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
191 .associativity = 24, .line_size = 64, },
192 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
193 .associativity = 24, .line_size = 64, },
197 * "CPUID leaf 2 does not report cache descriptor information,
198 * use CPUID leaf 4 to query cache parameters"
200 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
203 * Return a CPUID 2 cache descriptor for a given cache.
204 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
206 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
210 assert(cache->size > 0);
211 assert(cache->level > 0);
212 assert(cache->line_size > 0);
213 assert(cache->associativity > 0);
214 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
215 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
216 if (d->level == cache->level && d->type == cache->type &&
217 d->size == cache->size && d->line_size == cache->line_size &&
218 d->associativity == cache->associativity) {
223 return CACHE_DESCRIPTOR_UNAVAILABLE;
226 /* CPUID Leaf 4 constants: */
229 #define CACHE_TYPE_D 1
230 #define CACHE_TYPE_I 2
231 #define CACHE_TYPE_UNIFIED 3
233 #define CACHE_LEVEL(l) (l << 5)
235 #define CACHE_SELF_INIT_LEVEL (1 << 8)
238 #define CACHE_NO_INVD_SHARING (1 << 0)
239 #define CACHE_INCLUSIVE (1 << 1)
240 #define CACHE_COMPLEX_IDX (1 << 2)
242 /* Encode CacheType for CPUID[4].EAX */
243 #define CACHE_TYPE(t) (((t) == DCACHE) ? CACHE_TYPE_D : \
244 ((t) == ICACHE) ? CACHE_TYPE_I : \
245 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
246 0 /* Invalid value */)
249 /* Encode cache info for CPUID[4] */
250 static void encode_cache_cpuid4(CPUCacheInfo *cache,
251 int num_apic_ids, int num_cores,
252 uint32_t *eax, uint32_t *ebx,
253 uint32_t *ecx, uint32_t *edx)
255 assert(cache->size == cache->line_size * cache->associativity *
256 cache->partitions * cache->sets);
258 assert(num_apic_ids > 0);
259 *eax = CACHE_TYPE(cache->type) |
260 CACHE_LEVEL(cache->level) |
261 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
262 ((num_cores - 1) << 26) |
263 ((num_apic_ids - 1) << 14);
265 assert(cache->line_size > 0);
266 assert(cache->partitions > 0);
267 assert(cache->associativity > 0);
268 /* We don't implement fully-associative caches */
269 assert(cache->associativity < cache->sets);
270 *ebx = (cache->line_size - 1) |
271 ((cache->partitions - 1) << 12) |
272 ((cache->associativity - 1) << 22);
274 assert(cache->sets > 0);
275 *ecx = cache->sets - 1;
277 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
278 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
279 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
282 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
283 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
285 assert(cache->size % 1024 == 0);
286 assert(cache->lines_per_tag > 0);
287 assert(cache->associativity > 0);
288 assert(cache->line_size > 0);
289 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
290 (cache->lines_per_tag << 8) | (cache->line_size);
293 #define ASSOC_FULL 0xFF
295 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
296 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
306 a == ASSOC_FULL ? 0xF : \
307 0 /* invalid value */)
310 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
313 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
315 uint32_t *ecx, uint32_t *edx)
317 assert(l2->size % 1024 == 0);
318 assert(l2->associativity > 0);
319 assert(l2->lines_per_tag > 0);
320 assert(l2->line_size > 0);
321 *ecx = ((l2->size / 1024) << 16) |
322 (AMD_ENC_ASSOC(l2->associativity) << 12) |
323 (l2->lines_per_tag << 8) | (l2->line_size);
326 assert(l3->size % (512 * 1024) == 0);
327 assert(l3->associativity > 0);
328 assert(l3->lines_per_tag > 0);
329 assert(l3->line_size > 0);
330 *edx = ((l3->size / (512 * 1024)) << 18) |
331 (AMD_ENC_ASSOC(l3->associativity) << 12) |
332 (l3->lines_per_tag << 8) | (l3->line_size);
339 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
340 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
341 * Define the constants to build the cpu topology. Right now, TOPOEXT
342 * feature is enabled only on EPYC. So, these constants are based on
343 * EPYC supported configurations. We may need to handle the cases if
344 * these values change in future.
346 /* Maximum core complexes in a node */
348 /* Maximum cores in a core complex */
349 #define MAX_CORES_IN_CCX 4
350 /* Maximum cores in a node */
351 #define MAX_CORES_IN_NODE 8
352 /* Maximum nodes in a socket */
353 #define MAX_NODES_PER_SOCKET 4
356 * Figure out the number of nodes required to build this config.
357 * Max cores in a node is 8
359 static int nodes_in_socket(int nr_cores)
363 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
365 /* Hardware does not support config with 3 nodes, return 4 in that case */
366 return (nodes == 3) ? 4 : nodes;
370 * Decide the number of cores in a core complex with the given nr_cores using
371 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
372 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
373 * L3 cache is shared across all cores in a core complex. So, this will also
374 * tell us how many cores are sharing the L3 cache.
376 static int cores_in_core_complex(int nr_cores)
380 /* Check if we can fit all the cores in one core complex */
381 if (nr_cores <= MAX_CORES_IN_CCX) {
384 /* Get the number of nodes required to build this config */
385 nodes = nodes_in_socket(nr_cores);
388 * Divide the cores accros all the core complexes
389 * Return rounded up value
391 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
394 /* Encode cache info for CPUID[8000001D] */
395 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
396 uint32_t *eax, uint32_t *ebx,
397 uint32_t *ecx, uint32_t *edx)
400 assert(cache->size == cache->line_size * cache->associativity *
401 cache->partitions * cache->sets);
403 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
404 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
406 /* L3 is shared among multiple cores */
407 if (cache->level == 3) {
408 l3_cores = cores_in_core_complex(cs->nr_cores);
409 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
411 *eax |= ((cs->nr_threads - 1) << 14);
414 assert(cache->line_size > 0);
415 assert(cache->partitions > 0);
416 assert(cache->associativity > 0);
417 /* We don't implement fully-associative caches */
418 assert(cache->associativity < cache->sets);
419 *ebx = (cache->line_size - 1) |
420 ((cache->partitions - 1) << 12) |
421 ((cache->associativity - 1) << 22);
423 assert(cache->sets > 0);
424 *ecx = cache->sets - 1;
426 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
427 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
428 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
431 /* Data structure to hold the configuration info for a given core index */
432 struct core_topology {
433 /* core complex id of the current core index */
436 * Adjusted core index for this core in the topology
437 * This can be 0,1,2,3 with max 4 cores in a core complex
440 /* Node id for this core index */
442 /* Number of nodes in this config */
447 * Build the configuration closely match the EPYC hardware. Using the EPYC
448 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
449 * right now. This could change in future.
450 * nr_cores : Total number of cores in the config
451 * core_id : Core index of the current CPU
452 * topo : Data structure to hold all the config info for this core index
454 static void build_core_topology(int nr_cores, int core_id,
455 struct core_topology *topo)
457 int nodes, cores_in_ccx;
459 /* First get the number of nodes required */
460 nodes = nodes_in_socket(nr_cores);
462 cores_in_ccx = cores_in_core_complex(nr_cores);
464 topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
465 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
466 topo->core_id = core_id % cores_in_ccx;
467 topo->num_nodes = nodes;
470 /* Encode cache info for CPUID[8000001E] */
471 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
472 uint32_t *eax, uint32_t *ebx,
473 uint32_t *ecx, uint32_t *edx)
475 struct core_topology topo = {0};
479 build_core_topology(cs->nr_cores, cpu->core_id, &topo);
482 * CPUID_Fn8000001E_EBX
484 * 15:8 Threads per core (The number of threads per core is
485 * Threads per core + 1)
486 * 7:0 Core id (see bit decoding below)
496 if (cs->nr_threads - 1) {
497 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
498 (topo.ccx_id << 2) | topo.core_id;
500 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
503 * CPUID_Fn8000001E_ECX
505 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
506 * 7:0 Node id (see bit decoding below)
510 if (topo.num_nodes <= 4) {
511 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
515 * Node id fix up. Actual hardware supports up to 4 nodes. But with
516 * more than 32 cores, we may end up with more than 4 nodes.
517 * Node id is a combination of socket id and node id. Only requirement
518 * here is that this number should be unique accross the system.
519 * Shift the socket id to accommodate more nodes. We dont expect both
520 * socket id and node id to be big number at the same time. This is not
521 * an ideal config but we need to to support it. Max nodes we can have
522 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
523 * 5 bits for nodes. Find the left most set bit to represent the total
524 * number of nodes. find_last_bit returns last set bit(0 based). Left
525 * shift(+1) the socket id to represent all the nodes.
527 nodes = topo.num_nodes - 1;
528 shift = find_last_bit(&nodes, 8);
529 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
536 * Definitions of the hardcoded cache entries we expose:
537 * These are legacy cache values. If there is a need to change any
538 * of these values please use builtin_x86_defs
542 static CPUCacheInfo legacy_l1d_cache = {
551 .no_invd_sharing = true,
554 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
555 static CPUCacheInfo legacy_l1d_cache_amd = {
565 .no_invd_sharing = true,
568 /* L1 instruction cache: */
569 static CPUCacheInfo legacy_l1i_cache = {
578 .no_invd_sharing = true,
581 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
582 static CPUCacheInfo legacy_l1i_cache_amd = {
592 .no_invd_sharing = true,
595 /* Level 2 unified cache: */
596 static CPUCacheInfo legacy_l2_cache = {
597 .type = UNIFIED_CACHE,
605 .no_invd_sharing = true,
608 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
609 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
610 .type = UNIFIED_CACHE,
618 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
619 static CPUCacheInfo legacy_l2_cache_amd = {
620 .type = UNIFIED_CACHE,
630 /* Level 3 unified cache: */
631 static CPUCacheInfo legacy_l3_cache = {
632 .type = UNIFIED_CACHE,
642 .complex_indexing = true,
645 /* TLB definitions: */
647 #define L1_DTLB_2M_ASSOC 1
648 #define L1_DTLB_2M_ENTRIES 255
649 #define L1_DTLB_4K_ASSOC 1
650 #define L1_DTLB_4K_ENTRIES 255
652 #define L1_ITLB_2M_ASSOC 1
653 #define L1_ITLB_2M_ENTRIES 255
654 #define L1_ITLB_4K_ASSOC 1
655 #define L1_ITLB_4K_ENTRIES 255
657 #define L2_DTLB_2M_ASSOC 0 /* disabled */
658 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
659 #define L2_DTLB_4K_ASSOC 4
660 #define L2_DTLB_4K_ENTRIES 512
662 #define L2_ITLB_2M_ASSOC 0 /* disabled */
663 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
664 #define L2_ITLB_4K_ASSOC 4
665 #define L2_ITLB_4K_ENTRIES 512
667 /* CPUID Leaf 0x14 constants: */
668 #define INTEL_PT_MAX_SUBLEAF 0x1
670 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
671 * MSR can be accessed;
672 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
673 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
674 * of Intel PT MSRs across warm reset;
675 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
677 #define INTEL_PT_MINIMAL_EBX 0xf
679 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
680 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
682 * bit[01]: ToPA tables can hold any number of output entries, up to the
683 * maximum allowed by the MaskOrTableOffset field of
684 * IA32_RTIT_OUTPUT_MASK_PTRS;
685 * bit[02]: Support Single-Range Output scheme;
687 #define INTEL_PT_MINIMAL_ECX 0x7
688 /* generated packets which contain IP payloads have LIP values */
689 #define INTEL_PT_IP_LIP (1 << 31)
690 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
691 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
692 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
693 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
694 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
696 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
697 uint32_t vendor2, uint32_t vendor3)
700 for (i = 0; i < 4; i++) {
701 dst[i] = vendor1 >> (8 * i);
702 dst[i + 4] = vendor2 >> (8 * i);
703 dst[i + 8] = vendor3 >> (8 * i);
705 dst[CPUID_VENDOR_SZ] = '\0';
708 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
709 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
710 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
711 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
712 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
713 CPUID_PSE36 | CPUID_FXSR)
714 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
715 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
716 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
717 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
718 CPUID_PAE | CPUID_SEP | CPUID_APIC)
720 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
721 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
722 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
723 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
724 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
725 /* partly implemented:
726 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
728 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
729 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
730 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
731 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
732 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
733 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
735 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
736 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
737 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
738 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
739 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
742 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
744 #define TCG_EXT2_X86_64_FEATURES 0
747 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
748 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
749 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
750 TCG_EXT2_X86_64_FEATURES)
751 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
752 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
753 #define TCG_EXT4_FEATURES 0
754 #define TCG_SVM_FEATURES 0
755 #define TCG_KVM_FEATURES 0
756 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
757 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
758 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
759 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
762 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
763 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
764 CPUID_7_0_EBX_RDSEED */
765 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
766 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
768 #define TCG_7_0_EDX_FEATURES 0
769 #define TCG_APM_FEATURES 0
770 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
771 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
773 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
775 typedef struct FeatureWordInfo {
776 /* feature flags names are taken from "Intel Processor Identification and
777 * the CPUID Instruction" and AMD's "CPUID Specification".
778 * In cases of disagreement between feature naming conventions,
779 * aliases may be added.
781 const char *feat_names[32];
782 uint32_t cpuid_eax; /* Input EAX for CPUID */
783 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
784 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
785 int cpuid_reg; /* output register (R_* constant) */
786 uint32_t tcg_features; /* Feature flags supported by TCG */
787 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
788 uint32_t migratable_flags; /* Feature flags known to be migratable */
789 /* Features that shouldn't be auto-enabled by "-cpu host" */
790 uint32_t no_autoenable_flags;
793 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
796 "fpu", "vme", "de", "pse",
797 "tsc", "msr", "pae", "mce",
798 "cx8", "apic", NULL, "sep",
799 "mtrr", "pge", "mca", "cmov",
800 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
801 NULL, "ds" /* Intel dts */, "acpi", "mmx",
802 "fxsr", "sse", "sse2", "ss",
803 "ht" /* Intel htt */, "tm", "ia64", "pbe",
805 .cpuid_eax = 1, .cpuid_reg = R_EDX,
806 .tcg_features = TCG_FEATURES,
810 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
811 "ds-cpl", "vmx", "smx", "est",
812 "tm2", "ssse3", "cid", NULL,
813 "fma", "cx16", "xtpr", "pdcm",
814 NULL, "pcid", "dca", "sse4.1",
815 "sse4.2", "x2apic", "movbe", "popcnt",
816 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
817 "avx", "f16c", "rdrand", "hypervisor",
819 .cpuid_eax = 1, .cpuid_reg = R_ECX,
820 .tcg_features = TCG_EXT_FEATURES,
822 /* Feature names that are already defined on feature_name[] but
823 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
824 * names on feat_names below. They are copied automatically
825 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
827 [FEAT_8000_0001_EDX] = {
829 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
830 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
831 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
832 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
833 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
834 "nx", NULL, "mmxext", NULL /* mmx */,
835 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
836 NULL, "lm", "3dnowext", "3dnow",
838 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
839 .tcg_features = TCG_EXT2_FEATURES,
841 [FEAT_8000_0001_ECX] = {
843 "lahf-lm", "cmp-legacy", "svm", "extapic",
844 "cr8legacy", "abm", "sse4a", "misalignsse",
845 "3dnowprefetch", "osvw", "ibs", "xop",
846 "skinit", "wdt", NULL, "lwp",
847 "fma4", "tce", NULL, "nodeid-msr",
848 NULL, "tbm", "topoext", "perfctr-core",
849 "perfctr-nb", NULL, NULL, NULL,
850 NULL, NULL, NULL, NULL,
852 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
853 .tcg_features = TCG_EXT3_FEATURES,
855 [FEAT_C000_0001_EDX] = {
857 NULL, NULL, "xstore", "xstore-en",
858 NULL, NULL, "xcrypt", "xcrypt-en",
859 "ace2", "ace2-en", "phe", "phe-en",
860 "pmm", "pmm-en", NULL, NULL,
861 NULL, NULL, NULL, NULL,
862 NULL, NULL, NULL, NULL,
863 NULL, NULL, NULL, NULL,
864 NULL, NULL, NULL, NULL,
866 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
867 .tcg_features = TCG_EXT4_FEATURES,
871 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
872 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
873 NULL, "kvm-pv-tlb-flush", NULL, NULL,
874 NULL, NULL, NULL, NULL,
875 NULL, NULL, NULL, NULL,
876 NULL, NULL, NULL, NULL,
877 "kvmclock-stable-bit", NULL, NULL, NULL,
878 NULL, NULL, NULL, NULL,
880 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
881 .tcg_features = TCG_KVM_FEATURES,
885 "kvm-hint-dedicated", NULL, NULL, NULL,
886 NULL, NULL, NULL, NULL,
887 NULL, NULL, NULL, NULL,
888 NULL, NULL, NULL, NULL,
889 NULL, NULL, NULL, NULL,
890 NULL, NULL, NULL, NULL,
891 NULL, NULL, NULL, NULL,
892 NULL, NULL, NULL, NULL,
894 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EDX,
895 .tcg_features = TCG_KVM_FEATURES,
897 * KVM hints aren't auto-enabled by -cpu host, they need to be
898 * explicitly enabled in the command-line.
900 .no_autoenable_flags = ~0U,
902 [FEAT_HYPERV_EAX] = {
904 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
905 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
906 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
907 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
908 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
909 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
910 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
912 NULL, NULL, NULL, NULL,
913 NULL, NULL, NULL, NULL,
914 NULL, NULL, NULL, NULL,
915 NULL, NULL, NULL, NULL,
917 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
919 [FEAT_HYPERV_EBX] = {
921 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
922 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
923 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
924 NULL /* hv_create_port */, NULL /* hv_connect_port */,
925 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
926 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
928 NULL, NULL, NULL, NULL,
929 NULL, NULL, NULL, NULL,
930 NULL, NULL, NULL, NULL,
931 NULL, NULL, NULL, NULL,
933 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
935 [FEAT_HYPERV_EDX] = {
937 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
938 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
939 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
941 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
942 NULL, NULL, NULL, NULL,
943 NULL, NULL, NULL, NULL,
944 NULL, NULL, NULL, NULL,
945 NULL, NULL, NULL, NULL,
946 NULL, NULL, NULL, NULL,
948 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
952 "npt", "lbrv", "svm-lock", "nrip-save",
953 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
954 NULL, NULL, "pause-filter", NULL,
955 "pfthreshold", NULL, NULL, NULL,
956 NULL, NULL, NULL, NULL,
957 NULL, NULL, NULL, NULL,
958 NULL, NULL, NULL, NULL,
959 NULL, NULL, NULL, NULL,
961 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
962 .tcg_features = TCG_SVM_FEATURES,
966 "fsgsbase", "tsc-adjust", NULL, "bmi1",
967 "hle", "avx2", NULL, "smep",
968 "bmi2", "erms", "invpcid", "rtm",
969 NULL, NULL, "mpx", NULL,
970 "avx512f", "avx512dq", "rdseed", "adx",
971 "smap", "avx512ifma", "pcommit", "clflushopt",
972 "clwb", "intel-pt", "avx512pf", "avx512er",
973 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
976 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
978 .tcg_features = TCG_7_0_EBX_FEATURES,
982 NULL, "avx512vbmi", "umip", "pku",
983 NULL /* ospke */, NULL, "avx512vbmi2", NULL,
984 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
985 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
986 "la57", NULL, NULL, NULL,
987 NULL, NULL, "rdpid", NULL,
988 NULL, "cldemote", NULL, NULL,
989 NULL, NULL, NULL, NULL,
992 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
994 .tcg_features = TCG_7_0_ECX_FEATURES,
998 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
999 NULL, NULL, NULL, NULL,
1000 NULL, NULL, NULL, NULL,
1001 NULL, NULL, NULL, NULL,
1002 NULL, NULL, NULL, NULL,
1003 NULL, NULL, NULL, NULL,
1004 NULL, NULL, "spec-ctrl", NULL,
1005 NULL, NULL, NULL, "ssbd",
1008 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
1010 .tcg_features = TCG_7_0_EDX_FEATURES,
1012 [FEAT_8000_0007_EDX] = {
1014 NULL, NULL, NULL, NULL,
1015 NULL, NULL, NULL, NULL,
1016 "invtsc", NULL, NULL, NULL,
1017 NULL, NULL, NULL, NULL,
1018 NULL, NULL, NULL, NULL,
1019 NULL, NULL, NULL, NULL,
1020 NULL, NULL, NULL, NULL,
1021 NULL, NULL, NULL, NULL,
1023 .cpuid_eax = 0x80000007,
1025 .tcg_features = TCG_APM_FEATURES,
1026 .unmigratable_flags = CPUID_APM_INVTSC,
1028 [FEAT_8000_0008_EBX] = {
1030 NULL, NULL, NULL, NULL,
1031 NULL, NULL, NULL, NULL,
1032 NULL, NULL, NULL, NULL,
1033 "ibpb", NULL, NULL, NULL,
1034 NULL, NULL, NULL, NULL,
1035 NULL, NULL, NULL, NULL,
1036 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1037 NULL, NULL, NULL, NULL,
1039 .cpuid_eax = 0x80000008,
1042 .unmigratable_flags = 0,
1046 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1047 NULL, NULL, NULL, NULL,
1048 NULL, NULL, NULL, NULL,
1049 NULL, NULL, NULL, NULL,
1050 NULL, NULL, NULL, NULL,
1051 NULL, NULL, NULL, NULL,
1052 NULL, NULL, NULL, NULL,
1053 NULL, NULL, NULL, NULL,
1056 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
1058 .tcg_features = TCG_XSAVE_FEATURES,
1062 NULL, NULL, "arat", NULL,
1063 NULL, NULL, NULL, NULL,
1064 NULL, NULL, NULL, NULL,
1065 NULL, NULL, NULL, NULL,
1066 NULL, NULL, NULL, NULL,
1067 NULL, NULL, NULL, NULL,
1068 NULL, NULL, NULL, NULL,
1069 NULL, NULL, NULL, NULL,
1071 .cpuid_eax = 6, .cpuid_reg = R_EAX,
1072 .tcg_features = TCG_6_EAX_FEATURES,
1074 [FEAT_XSAVE_COMP_LO] = {
1076 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
1078 .tcg_features = ~0U,
1079 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1080 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1081 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1084 [FEAT_XSAVE_COMP_HI] = {
1086 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
1088 .tcg_features = ~0U,
1092 typedef struct X86RegisterInfo32 {
1093 /* Name of register */
1095 /* QAPI enum value register */
1096 X86CPURegister32 qapi_enum;
1097 } X86RegisterInfo32;
1099 #define REGISTER(reg) \
1100 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1101 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1113 typedef struct ExtSaveArea {
1114 uint32_t feature, bits;
1115 uint32_t offset, size;
1118 static const ExtSaveArea x86_ext_save_areas[] = {
1120 /* x87 FP state component is always enabled if XSAVE is supported */
1121 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1122 /* x87 state is in the legacy region of the XSAVE area */
1124 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1126 [XSTATE_SSE_BIT] = {
1127 /* SSE state component is always enabled if XSAVE is supported */
1128 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1129 /* SSE state is in the legacy region of the XSAVE area */
1131 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1134 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1135 .offset = offsetof(X86XSaveArea, avx_state),
1136 .size = sizeof(XSaveAVX) },
1137 [XSTATE_BNDREGS_BIT] =
1138 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1139 .offset = offsetof(X86XSaveArea, bndreg_state),
1140 .size = sizeof(XSaveBNDREG) },
1141 [XSTATE_BNDCSR_BIT] =
1142 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1143 .offset = offsetof(X86XSaveArea, bndcsr_state),
1144 .size = sizeof(XSaveBNDCSR) },
1145 [XSTATE_OPMASK_BIT] =
1146 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1147 .offset = offsetof(X86XSaveArea, opmask_state),
1148 .size = sizeof(XSaveOpmask) },
1149 [XSTATE_ZMM_Hi256_BIT] =
1150 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1151 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1152 .size = sizeof(XSaveZMM_Hi256) },
1153 [XSTATE_Hi16_ZMM_BIT] =
1154 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1155 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1156 .size = sizeof(XSaveHi16_ZMM) },
1158 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1159 .offset = offsetof(X86XSaveArea, pkru_state),
1160 .size = sizeof(XSavePKRU) },
1163 static uint32_t xsave_area_size(uint64_t mask)
1168 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1169 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1170 if ((mask >> i) & 1) {
1171 ret = MAX(ret, esa->offset + esa->size);
1177 static inline bool accel_uses_host_cpuid(void)
1179 return kvm_enabled() || hvf_enabled();
1182 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1184 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1185 cpu->env.features[FEAT_XSAVE_COMP_LO];
1188 const char *get_register_name_32(unsigned int reg)
1190 if (reg >= CPU_NB_REGS32) {
1193 return x86_reg_info_32[reg].name;
1197 * Returns the set of feature flags that are supported and migratable by
1198 * QEMU, for a given FeatureWord.
1200 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
1202 FeatureWordInfo *wi = &feature_word_info[w];
1206 for (i = 0; i < 32; i++) {
1207 uint32_t f = 1U << i;
1209 /* If the feature name is known, it is implicitly considered migratable,
1210 * unless it is explicitly set in unmigratable_flags */
1211 if ((wi->migratable_flags & f) ||
1212 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1219 void host_cpuid(uint32_t function, uint32_t count,
1220 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1225 asm volatile("cpuid"
1226 : "=a"(vec[0]), "=b"(vec[1]),
1227 "=c"(vec[2]), "=d"(vec[3])
1228 : "0"(function), "c"(count) : "cc");
1229 #elif defined(__i386__)
1230 asm volatile("pusha \n\t"
1232 "mov %%eax, 0(%2) \n\t"
1233 "mov %%ebx, 4(%2) \n\t"
1234 "mov %%ecx, 8(%2) \n\t"
1235 "mov %%edx, 12(%2) \n\t"
1237 : : "a"(function), "c"(count), "S"(vec)
1253 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1255 uint32_t eax, ebx, ecx, edx;
1257 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1258 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1260 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1262 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1265 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1268 *stepping = eax & 0x0F;
1272 /* CPU class name definitions: */
1274 /* Return type name for a given CPU model name
1275 * Caller is responsible for freeing the returned string.
1277 static char *x86_cpu_type_name(const char *model_name)
1279 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1282 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1285 char *typename = x86_cpu_type_name(cpu_model);
1286 oc = object_class_by_name(typename);
1291 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1293 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1294 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1295 return g_strndup(class_name,
1296 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1299 struct X86CPUDefinition {
1303 /* vendor is zero-terminated, 12 character ASCII string */
1304 char vendor[CPUID_VENDOR_SZ + 1];
1308 FeatureWordArray features;
1309 const char *model_id;
1310 CPUCaches *cache_info;
1313 static CPUCaches epyc_cache_info = {
1314 .l1d_cache = &(CPUCacheInfo) {
1324 .no_invd_sharing = true,
1326 .l1i_cache = &(CPUCacheInfo) {
1336 .no_invd_sharing = true,
1338 .l2_cache = &(CPUCacheInfo) {
1339 .type = UNIFIED_CACHE,
1348 .l3_cache = &(CPUCacheInfo) {
1349 .type = UNIFIED_CACHE,
1353 .associativity = 16,
1359 .complex_indexing = true,
1363 static X86CPUDefinition builtin_x86_defs[] = {
1367 .vendor = CPUID_VENDOR_AMD,
1371 .features[FEAT_1_EDX] =
1373 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1375 .features[FEAT_1_ECX] =
1376 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1377 .features[FEAT_8000_0001_EDX] =
1378 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1379 .features[FEAT_8000_0001_ECX] =
1380 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1381 .xlevel = 0x8000000A,
1382 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1387 .vendor = CPUID_VENDOR_AMD,
1391 /* Missing: CPUID_HT */
1392 .features[FEAT_1_EDX] =
1394 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1395 CPUID_PSE36 | CPUID_VME,
1396 .features[FEAT_1_ECX] =
1397 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1399 .features[FEAT_8000_0001_EDX] =
1400 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1401 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1402 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1403 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1405 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1406 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1407 .features[FEAT_8000_0001_ECX] =
1408 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1409 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1410 /* Missing: CPUID_SVM_LBRV */
1411 .features[FEAT_SVM] =
1413 .xlevel = 0x8000001A,
1414 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1419 .vendor = CPUID_VENDOR_INTEL,
1423 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1424 .features[FEAT_1_EDX] =
1426 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1427 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1428 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1429 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1430 .features[FEAT_1_ECX] =
1431 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1433 .features[FEAT_8000_0001_EDX] =
1434 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1435 .features[FEAT_8000_0001_ECX] =
1437 .xlevel = 0x80000008,
1438 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1443 .vendor = CPUID_VENDOR_INTEL,
1447 /* Missing: CPUID_HT */
1448 .features[FEAT_1_EDX] =
1449 PPRO_FEATURES | CPUID_VME |
1450 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1452 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1453 .features[FEAT_1_ECX] =
1454 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1455 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1456 .features[FEAT_8000_0001_EDX] =
1457 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1458 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1459 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1460 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1461 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1462 .features[FEAT_8000_0001_ECX] =
1464 .xlevel = 0x80000008,
1465 .model_id = "Common KVM processor"
1470 .vendor = CPUID_VENDOR_INTEL,
1474 .features[FEAT_1_EDX] =
1476 .features[FEAT_1_ECX] =
1478 .xlevel = 0x80000004,
1479 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1484 .vendor = CPUID_VENDOR_INTEL,
1488 .features[FEAT_1_EDX] =
1489 PPRO_FEATURES | CPUID_VME |
1490 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1491 .features[FEAT_1_ECX] =
1493 .features[FEAT_8000_0001_ECX] =
1495 .xlevel = 0x80000008,
1496 .model_id = "Common 32-bit KVM processor"
1501 .vendor = CPUID_VENDOR_INTEL,
1505 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1506 .features[FEAT_1_EDX] =
1507 PPRO_FEATURES | CPUID_VME |
1508 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1510 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1511 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1512 .features[FEAT_1_ECX] =
1513 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1514 .features[FEAT_8000_0001_EDX] =
1516 .xlevel = 0x80000008,
1517 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1522 .vendor = CPUID_VENDOR_INTEL,
1526 .features[FEAT_1_EDX] =
1534 .vendor = CPUID_VENDOR_INTEL,
1538 .features[FEAT_1_EDX] =
1546 .vendor = CPUID_VENDOR_INTEL,
1550 .features[FEAT_1_EDX] =
1558 .vendor = CPUID_VENDOR_INTEL,
1562 .features[FEAT_1_EDX] =
1570 .vendor = CPUID_VENDOR_AMD,
1574 .features[FEAT_1_EDX] =
1575 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1577 .features[FEAT_8000_0001_EDX] =
1578 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1579 .xlevel = 0x80000008,
1580 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1585 .vendor = CPUID_VENDOR_INTEL,
1589 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1590 .features[FEAT_1_EDX] =
1592 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1593 CPUID_ACPI | CPUID_SS,
1594 /* Some CPUs got no CPUID_SEP */
1595 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1597 .features[FEAT_1_ECX] =
1598 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1600 .features[FEAT_8000_0001_EDX] =
1602 .features[FEAT_8000_0001_ECX] =
1604 .xlevel = 0x80000008,
1605 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1610 .vendor = CPUID_VENDOR_INTEL,
1614 .features[FEAT_1_EDX] =
1615 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1616 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1617 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1618 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1619 CPUID_DE | CPUID_FP87,
1620 .features[FEAT_1_ECX] =
1621 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1622 .features[FEAT_8000_0001_EDX] =
1623 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1624 .features[FEAT_8000_0001_ECX] =
1626 .xlevel = 0x80000008,
1627 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1632 .vendor = CPUID_VENDOR_INTEL,
1636 .features[FEAT_1_EDX] =
1637 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1638 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1639 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1640 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1641 CPUID_DE | CPUID_FP87,
1642 .features[FEAT_1_ECX] =
1643 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1645 .features[FEAT_8000_0001_EDX] =
1646 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1647 .features[FEAT_8000_0001_ECX] =
1649 .xlevel = 0x80000008,
1650 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1655 .vendor = CPUID_VENDOR_INTEL,
1659 .features[FEAT_1_EDX] =
1660 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1661 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1662 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1663 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1664 CPUID_DE | CPUID_FP87,
1665 .features[FEAT_1_ECX] =
1666 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1667 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1668 .features[FEAT_8000_0001_EDX] =
1669 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1670 .features[FEAT_8000_0001_ECX] =
1672 .xlevel = 0x80000008,
1673 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1676 .name = "Nehalem-IBRS",
1678 .vendor = CPUID_VENDOR_INTEL,
1682 .features[FEAT_1_EDX] =
1683 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1684 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1685 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1686 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1687 CPUID_DE | CPUID_FP87,
1688 .features[FEAT_1_ECX] =
1689 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1690 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1691 .features[FEAT_7_0_EDX] =
1692 CPUID_7_0_EDX_SPEC_CTRL,
1693 .features[FEAT_8000_0001_EDX] =
1694 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1695 .features[FEAT_8000_0001_ECX] =
1697 .xlevel = 0x80000008,
1698 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1703 .vendor = CPUID_VENDOR_INTEL,
1707 .features[FEAT_1_EDX] =
1708 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1709 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1710 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1711 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1712 CPUID_DE | CPUID_FP87,
1713 .features[FEAT_1_ECX] =
1714 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1715 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1716 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1717 .features[FEAT_8000_0001_EDX] =
1718 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1719 .features[FEAT_8000_0001_ECX] =
1721 .features[FEAT_6_EAX] =
1723 .xlevel = 0x80000008,
1724 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1727 .name = "Westmere-IBRS",
1729 .vendor = CPUID_VENDOR_INTEL,
1733 .features[FEAT_1_EDX] =
1734 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1735 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1736 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1737 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1738 CPUID_DE | CPUID_FP87,
1739 .features[FEAT_1_ECX] =
1740 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1741 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1742 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1743 .features[FEAT_8000_0001_EDX] =
1744 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1745 .features[FEAT_8000_0001_ECX] =
1747 .features[FEAT_7_0_EDX] =
1748 CPUID_7_0_EDX_SPEC_CTRL,
1749 .features[FEAT_6_EAX] =
1751 .xlevel = 0x80000008,
1752 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1755 .name = "SandyBridge",
1757 .vendor = CPUID_VENDOR_INTEL,
1761 .features[FEAT_1_EDX] =
1762 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1763 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1764 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1765 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1766 CPUID_DE | CPUID_FP87,
1767 .features[FEAT_1_ECX] =
1768 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1769 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1770 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1771 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1773 .features[FEAT_8000_0001_EDX] =
1774 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1776 .features[FEAT_8000_0001_ECX] =
1778 .features[FEAT_XSAVE] =
1779 CPUID_XSAVE_XSAVEOPT,
1780 .features[FEAT_6_EAX] =
1782 .xlevel = 0x80000008,
1783 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1786 .name = "SandyBridge-IBRS",
1788 .vendor = CPUID_VENDOR_INTEL,
1792 .features[FEAT_1_EDX] =
1793 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1794 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1795 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1796 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1797 CPUID_DE | CPUID_FP87,
1798 .features[FEAT_1_ECX] =
1799 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1800 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1801 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1802 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1804 .features[FEAT_8000_0001_EDX] =
1805 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1807 .features[FEAT_8000_0001_ECX] =
1809 .features[FEAT_7_0_EDX] =
1810 CPUID_7_0_EDX_SPEC_CTRL,
1811 .features[FEAT_XSAVE] =
1812 CPUID_XSAVE_XSAVEOPT,
1813 .features[FEAT_6_EAX] =
1815 .xlevel = 0x80000008,
1816 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1819 .name = "IvyBridge",
1821 .vendor = CPUID_VENDOR_INTEL,
1825 .features[FEAT_1_EDX] =
1826 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1827 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1828 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1829 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1830 CPUID_DE | CPUID_FP87,
1831 .features[FEAT_1_ECX] =
1832 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1833 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1834 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1835 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1836 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1837 .features[FEAT_7_0_EBX] =
1838 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1840 .features[FEAT_8000_0001_EDX] =
1841 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1843 .features[FEAT_8000_0001_ECX] =
1845 .features[FEAT_XSAVE] =
1846 CPUID_XSAVE_XSAVEOPT,
1847 .features[FEAT_6_EAX] =
1849 .xlevel = 0x80000008,
1850 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1853 .name = "IvyBridge-IBRS",
1855 .vendor = CPUID_VENDOR_INTEL,
1859 .features[FEAT_1_EDX] =
1860 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1861 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1862 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1863 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1864 CPUID_DE | CPUID_FP87,
1865 .features[FEAT_1_ECX] =
1866 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1867 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1868 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1869 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1870 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1871 .features[FEAT_7_0_EBX] =
1872 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1874 .features[FEAT_8000_0001_EDX] =
1875 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1877 .features[FEAT_8000_0001_ECX] =
1879 .features[FEAT_7_0_EDX] =
1880 CPUID_7_0_EDX_SPEC_CTRL,
1881 .features[FEAT_XSAVE] =
1882 CPUID_XSAVE_XSAVEOPT,
1883 .features[FEAT_6_EAX] =
1885 .xlevel = 0x80000008,
1886 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1889 .name = "Haswell-noTSX",
1891 .vendor = CPUID_VENDOR_INTEL,
1895 .features[FEAT_1_EDX] =
1896 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1897 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1898 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1899 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1900 CPUID_DE | CPUID_FP87,
1901 .features[FEAT_1_ECX] =
1902 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1903 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1904 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1905 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1906 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1907 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1908 .features[FEAT_8000_0001_EDX] =
1909 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1911 .features[FEAT_8000_0001_ECX] =
1912 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1913 .features[FEAT_7_0_EBX] =
1914 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1915 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1916 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1917 .features[FEAT_XSAVE] =
1918 CPUID_XSAVE_XSAVEOPT,
1919 .features[FEAT_6_EAX] =
1921 .xlevel = 0x80000008,
1922 .model_id = "Intel Core Processor (Haswell, no TSX)",
1925 .name = "Haswell-noTSX-IBRS",
1927 .vendor = CPUID_VENDOR_INTEL,
1931 .features[FEAT_1_EDX] =
1932 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1933 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1934 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1935 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1936 CPUID_DE | CPUID_FP87,
1937 .features[FEAT_1_ECX] =
1938 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1939 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1940 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1941 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1942 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1943 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1944 .features[FEAT_8000_0001_EDX] =
1945 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1947 .features[FEAT_8000_0001_ECX] =
1948 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1949 .features[FEAT_7_0_EDX] =
1950 CPUID_7_0_EDX_SPEC_CTRL,
1951 .features[FEAT_7_0_EBX] =
1952 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1953 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1954 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1955 .features[FEAT_XSAVE] =
1956 CPUID_XSAVE_XSAVEOPT,
1957 .features[FEAT_6_EAX] =
1959 .xlevel = 0x80000008,
1960 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
1965 .vendor = CPUID_VENDOR_INTEL,
1969 .features[FEAT_1_EDX] =
1970 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1971 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1972 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1973 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1974 CPUID_DE | CPUID_FP87,
1975 .features[FEAT_1_ECX] =
1976 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1977 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1978 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1979 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1980 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1981 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1982 .features[FEAT_8000_0001_EDX] =
1983 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1985 .features[FEAT_8000_0001_ECX] =
1986 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1987 .features[FEAT_7_0_EBX] =
1988 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1989 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1990 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1992 .features[FEAT_XSAVE] =
1993 CPUID_XSAVE_XSAVEOPT,
1994 .features[FEAT_6_EAX] =
1996 .xlevel = 0x80000008,
1997 .model_id = "Intel Core Processor (Haswell)",
2000 .name = "Haswell-IBRS",
2002 .vendor = CPUID_VENDOR_INTEL,
2006 .features[FEAT_1_EDX] =
2007 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2008 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2009 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2010 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2011 CPUID_DE | CPUID_FP87,
2012 .features[FEAT_1_ECX] =
2013 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2014 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2015 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2016 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2017 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2018 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2019 .features[FEAT_8000_0001_EDX] =
2020 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2022 .features[FEAT_8000_0001_ECX] =
2023 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2024 .features[FEAT_7_0_EDX] =
2025 CPUID_7_0_EDX_SPEC_CTRL,
2026 .features[FEAT_7_0_EBX] =
2027 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2028 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2029 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2031 .features[FEAT_XSAVE] =
2032 CPUID_XSAVE_XSAVEOPT,
2033 .features[FEAT_6_EAX] =
2035 .xlevel = 0x80000008,
2036 .model_id = "Intel Core Processor (Haswell, IBRS)",
2039 .name = "Broadwell-noTSX",
2041 .vendor = CPUID_VENDOR_INTEL,
2045 .features[FEAT_1_EDX] =
2046 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2047 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2048 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2049 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2050 CPUID_DE | CPUID_FP87,
2051 .features[FEAT_1_ECX] =
2052 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2053 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2054 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2055 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2056 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2057 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2058 .features[FEAT_8000_0001_EDX] =
2059 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2061 .features[FEAT_8000_0001_ECX] =
2062 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2063 .features[FEAT_7_0_EBX] =
2064 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2065 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2066 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2067 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2069 .features[FEAT_XSAVE] =
2070 CPUID_XSAVE_XSAVEOPT,
2071 .features[FEAT_6_EAX] =
2073 .xlevel = 0x80000008,
2074 .model_id = "Intel Core Processor (Broadwell, no TSX)",
2077 .name = "Broadwell-noTSX-IBRS",
2079 .vendor = CPUID_VENDOR_INTEL,
2083 .features[FEAT_1_EDX] =
2084 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2085 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2086 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2087 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2088 CPUID_DE | CPUID_FP87,
2089 .features[FEAT_1_ECX] =
2090 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2091 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2092 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2093 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2094 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2095 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2096 .features[FEAT_8000_0001_EDX] =
2097 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2099 .features[FEAT_8000_0001_ECX] =
2100 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2101 .features[FEAT_7_0_EDX] =
2102 CPUID_7_0_EDX_SPEC_CTRL,
2103 .features[FEAT_7_0_EBX] =
2104 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2105 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2106 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2107 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2109 .features[FEAT_XSAVE] =
2110 CPUID_XSAVE_XSAVEOPT,
2111 .features[FEAT_6_EAX] =
2113 .xlevel = 0x80000008,
2114 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
2117 .name = "Broadwell",
2119 .vendor = CPUID_VENDOR_INTEL,
2123 .features[FEAT_1_EDX] =
2124 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2125 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2126 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2127 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2128 CPUID_DE | CPUID_FP87,
2129 .features[FEAT_1_ECX] =
2130 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2131 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2132 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2133 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2134 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2135 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2136 .features[FEAT_8000_0001_EDX] =
2137 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2139 .features[FEAT_8000_0001_ECX] =
2140 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2141 .features[FEAT_7_0_EBX] =
2142 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2143 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2144 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2145 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2147 .features[FEAT_XSAVE] =
2148 CPUID_XSAVE_XSAVEOPT,
2149 .features[FEAT_6_EAX] =
2151 .xlevel = 0x80000008,
2152 .model_id = "Intel Core Processor (Broadwell)",
2155 .name = "Broadwell-IBRS",
2157 .vendor = CPUID_VENDOR_INTEL,
2161 .features[FEAT_1_EDX] =
2162 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2163 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2164 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2165 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2166 CPUID_DE | CPUID_FP87,
2167 .features[FEAT_1_ECX] =
2168 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2169 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2170 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2171 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2172 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2173 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2174 .features[FEAT_8000_0001_EDX] =
2175 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2177 .features[FEAT_8000_0001_ECX] =
2178 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2179 .features[FEAT_7_0_EDX] =
2180 CPUID_7_0_EDX_SPEC_CTRL,
2181 .features[FEAT_7_0_EBX] =
2182 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2183 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2184 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2185 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2187 .features[FEAT_XSAVE] =
2188 CPUID_XSAVE_XSAVEOPT,
2189 .features[FEAT_6_EAX] =
2191 .xlevel = 0x80000008,
2192 .model_id = "Intel Core Processor (Broadwell, IBRS)",
2195 .name = "Skylake-Client",
2197 .vendor = CPUID_VENDOR_INTEL,
2201 .features[FEAT_1_EDX] =
2202 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2203 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2204 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2205 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2206 CPUID_DE | CPUID_FP87,
2207 .features[FEAT_1_ECX] =
2208 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2209 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2210 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2211 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2212 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2213 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2214 .features[FEAT_8000_0001_EDX] =
2215 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2217 .features[FEAT_8000_0001_ECX] =
2218 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2219 .features[FEAT_7_0_EBX] =
2220 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2221 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2222 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2223 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2224 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2225 /* Missing: XSAVES (not supported by some Linux versions,
2226 * including v4.1 to v4.12).
2227 * KVM doesn't yet expose any XSAVES state save component,
2228 * and the only one defined in Skylake (processor tracing)
2229 * probably will block migration anyway.
2231 .features[FEAT_XSAVE] =
2232 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2233 CPUID_XSAVE_XGETBV1,
2234 .features[FEAT_6_EAX] =
2236 .xlevel = 0x80000008,
2237 .model_id = "Intel Core Processor (Skylake)",
2240 .name = "Skylake-Client-IBRS",
2242 .vendor = CPUID_VENDOR_INTEL,
2246 .features[FEAT_1_EDX] =
2247 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2248 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2249 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2250 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2251 CPUID_DE | CPUID_FP87,
2252 .features[FEAT_1_ECX] =
2253 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2254 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2255 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2256 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2257 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2258 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2259 .features[FEAT_8000_0001_EDX] =
2260 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2262 .features[FEAT_8000_0001_ECX] =
2263 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2264 .features[FEAT_7_0_EDX] =
2265 CPUID_7_0_EDX_SPEC_CTRL,
2266 .features[FEAT_7_0_EBX] =
2267 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2268 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2269 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2270 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2271 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2272 /* Missing: XSAVES (not supported by some Linux versions,
2273 * including v4.1 to v4.12).
2274 * KVM doesn't yet expose any XSAVES state save component,
2275 * and the only one defined in Skylake (processor tracing)
2276 * probably will block migration anyway.
2278 .features[FEAT_XSAVE] =
2279 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2280 CPUID_XSAVE_XGETBV1,
2281 .features[FEAT_6_EAX] =
2283 .xlevel = 0x80000008,
2284 .model_id = "Intel Core Processor (Skylake, IBRS)",
2287 .name = "Skylake-Server",
2289 .vendor = CPUID_VENDOR_INTEL,
2293 .features[FEAT_1_EDX] =
2294 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2295 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2296 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2297 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2298 CPUID_DE | CPUID_FP87,
2299 .features[FEAT_1_ECX] =
2300 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2301 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2302 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2303 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2304 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2305 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2306 .features[FEAT_8000_0001_EDX] =
2307 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2308 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2309 .features[FEAT_8000_0001_ECX] =
2310 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2311 .features[FEAT_7_0_EBX] =
2312 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2313 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2314 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2315 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2316 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2317 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2318 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2319 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2320 /* Missing: XSAVES (not supported by some Linux versions,
2321 * including v4.1 to v4.12).
2322 * KVM doesn't yet expose any XSAVES state save component,
2323 * and the only one defined in Skylake (processor tracing)
2324 * probably will block migration anyway.
2326 .features[FEAT_XSAVE] =
2327 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2328 CPUID_XSAVE_XGETBV1,
2329 .features[FEAT_6_EAX] =
2331 .xlevel = 0x80000008,
2332 .model_id = "Intel Xeon Processor (Skylake)",
2335 .name = "Skylake-Server-IBRS",
2337 .vendor = CPUID_VENDOR_INTEL,
2341 .features[FEAT_1_EDX] =
2342 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2343 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2344 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2345 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2346 CPUID_DE | CPUID_FP87,
2347 .features[FEAT_1_ECX] =
2348 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2349 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2350 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2351 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2352 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2353 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2354 .features[FEAT_8000_0001_EDX] =
2355 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2356 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2357 .features[FEAT_8000_0001_ECX] =
2358 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2359 .features[FEAT_7_0_EDX] =
2360 CPUID_7_0_EDX_SPEC_CTRL,
2361 .features[FEAT_7_0_EBX] =
2362 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2363 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2364 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2365 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2366 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2367 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2368 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2369 CPUID_7_0_EBX_AVX512VL,
2370 /* Missing: XSAVES (not supported by some Linux versions,
2371 * including v4.1 to v4.12).
2372 * KVM doesn't yet expose any XSAVES state save component,
2373 * and the only one defined in Skylake (processor tracing)
2374 * probably will block migration anyway.
2376 .features[FEAT_XSAVE] =
2377 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2378 CPUID_XSAVE_XGETBV1,
2379 .features[FEAT_6_EAX] =
2381 .xlevel = 0x80000008,
2382 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
2385 .name = "KnightsMill",
2387 .vendor = CPUID_VENDOR_INTEL,
2391 .features[FEAT_1_EDX] =
2392 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2393 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2394 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2395 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2396 CPUID_PSE | CPUID_DE | CPUID_FP87,
2397 .features[FEAT_1_ECX] =
2398 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2399 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2400 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2401 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2402 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2403 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2404 .features[FEAT_8000_0001_EDX] =
2405 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2406 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2407 .features[FEAT_8000_0001_ECX] =
2408 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2409 .features[FEAT_7_0_EBX] =
2410 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2411 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2412 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2413 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2414 CPUID_7_0_EBX_AVX512ER,
2415 .features[FEAT_7_0_ECX] =
2416 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2417 .features[FEAT_7_0_EDX] =
2418 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2419 .features[FEAT_XSAVE] =
2420 CPUID_XSAVE_XSAVEOPT,
2421 .features[FEAT_6_EAX] =
2423 .xlevel = 0x80000008,
2424 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2427 .name = "Opteron_G1",
2429 .vendor = CPUID_VENDOR_AMD,
2433 .features[FEAT_1_EDX] =
2434 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2435 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2436 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2437 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2438 CPUID_DE | CPUID_FP87,
2439 .features[FEAT_1_ECX] =
2441 .features[FEAT_8000_0001_EDX] =
2442 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2443 .xlevel = 0x80000008,
2444 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2447 .name = "Opteron_G2",
2449 .vendor = CPUID_VENDOR_AMD,
2453 .features[FEAT_1_EDX] =
2454 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2455 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2456 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2457 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2458 CPUID_DE | CPUID_FP87,
2459 .features[FEAT_1_ECX] =
2460 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2461 /* Missing: CPUID_EXT2_RDTSCP */
2462 .features[FEAT_8000_0001_EDX] =
2463 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2464 .features[FEAT_8000_0001_ECX] =
2465 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2466 .xlevel = 0x80000008,
2467 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2470 .name = "Opteron_G3",
2472 .vendor = CPUID_VENDOR_AMD,
2476 .features[FEAT_1_EDX] =
2477 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2478 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2479 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2480 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2481 CPUID_DE | CPUID_FP87,
2482 .features[FEAT_1_ECX] =
2483 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2485 /* Missing: CPUID_EXT2_RDTSCP */
2486 .features[FEAT_8000_0001_EDX] =
2487 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2488 .features[FEAT_8000_0001_ECX] =
2489 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2490 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2491 .xlevel = 0x80000008,
2492 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2495 .name = "Opteron_G4",
2497 .vendor = CPUID_VENDOR_AMD,
2501 .features[FEAT_1_EDX] =
2502 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2503 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2504 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2505 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2506 CPUID_DE | CPUID_FP87,
2507 .features[FEAT_1_ECX] =
2508 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2509 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2510 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2512 /* Missing: CPUID_EXT2_RDTSCP */
2513 .features[FEAT_8000_0001_EDX] =
2514 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2516 .features[FEAT_8000_0001_ECX] =
2517 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2518 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2519 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2522 .xlevel = 0x8000001A,
2523 .model_id = "AMD Opteron 62xx class CPU",
2526 .name = "Opteron_G5",
2528 .vendor = CPUID_VENDOR_AMD,
2532 .features[FEAT_1_EDX] =
2533 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2534 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2535 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2536 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2537 CPUID_DE | CPUID_FP87,
2538 .features[FEAT_1_ECX] =
2539 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2540 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2541 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2542 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2543 /* Missing: CPUID_EXT2_RDTSCP */
2544 .features[FEAT_8000_0001_EDX] =
2545 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2547 .features[FEAT_8000_0001_ECX] =
2548 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2549 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2550 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2553 .xlevel = 0x8000001A,
2554 .model_id = "AMD Opteron 63xx class CPU",
2559 .vendor = CPUID_VENDOR_AMD,
2563 .features[FEAT_1_EDX] =
2564 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2565 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2566 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2567 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2568 CPUID_VME | CPUID_FP87,
2569 .features[FEAT_1_ECX] =
2570 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2571 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2572 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2573 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2574 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2575 .features[FEAT_8000_0001_EDX] =
2576 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2577 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2579 .features[FEAT_8000_0001_ECX] =
2580 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2581 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2582 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2584 .features[FEAT_7_0_EBX] =
2585 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2586 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2587 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2588 CPUID_7_0_EBX_SHA_NI,
2589 /* Missing: XSAVES (not supported by some Linux versions,
2590 * including v4.1 to v4.12).
2591 * KVM doesn't yet expose any XSAVES state save component.
2593 .features[FEAT_XSAVE] =
2594 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2595 CPUID_XSAVE_XGETBV1,
2596 .features[FEAT_6_EAX] =
2598 .xlevel = 0x8000001E,
2599 .model_id = "AMD EPYC Processor",
2600 .cache_info = &epyc_cache_info,
2603 .name = "EPYC-IBPB",
2605 .vendor = CPUID_VENDOR_AMD,
2609 .features[FEAT_1_EDX] =
2610 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2611 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2612 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2613 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2614 CPUID_VME | CPUID_FP87,
2615 .features[FEAT_1_ECX] =
2616 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2617 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2618 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2619 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2620 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2621 .features[FEAT_8000_0001_EDX] =
2622 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2623 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2625 .features[FEAT_8000_0001_ECX] =
2626 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2627 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2628 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2630 .features[FEAT_8000_0008_EBX] =
2631 CPUID_8000_0008_EBX_IBPB,
2632 .features[FEAT_7_0_EBX] =
2633 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2634 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2635 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2636 CPUID_7_0_EBX_SHA_NI,
2637 /* Missing: XSAVES (not supported by some Linux versions,
2638 * including v4.1 to v4.12).
2639 * KVM doesn't yet expose any XSAVES state save component.
2641 .features[FEAT_XSAVE] =
2642 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2643 CPUID_XSAVE_XGETBV1,
2644 .features[FEAT_6_EAX] =
2646 .xlevel = 0x8000001E,
2647 .model_id = "AMD EPYC Processor (with IBPB)",
2648 .cache_info = &epyc_cache_info,
2652 typedef struct PropValue {
2653 const char *prop, *value;
2656 /* KVM-specific features that are automatically added/removed
2657 * from all CPU models when KVM is enabled.
2659 static PropValue kvm_default_props[] = {
2660 { "kvmclock", "on" },
2661 { "kvm-nopiodelay", "on" },
2662 { "kvm-asyncpf", "on" },
2663 { "kvm-steal-time", "on" },
2664 { "kvm-pv-eoi", "on" },
2665 { "kvmclock-stable-bit", "on" },
2668 { "monitor", "off" },
2673 /* TCG-specific defaults that override all CPU models when using TCG
2675 static PropValue tcg_default_props[] = {
2681 void x86_cpu_change_kvm_default(const char *prop, const char *value)
2684 for (pv = kvm_default_props; pv->prop; pv++) {
2685 if (!strcmp(pv->prop, prop)) {
2691 /* It is valid to call this function only for properties that
2692 * are already present in the kvm_default_props table.
2697 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2698 bool migratable_only);
2700 static bool lmce_supported(void)
2702 uint64_t mce_cap = 0;
2705 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2710 return !!(mce_cap & MCG_LMCE_P);
2713 #define CPUID_MODEL_ID_SZ 48
2716 * cpu_x86_fill_model_id:
2717 * Get CPUID model ID string from host CPU.
2719 * @str should have at least CPUID_MODEL_ID_SZ bytes
2721 * The function does NOT add a null terminator to the string
2724 static int cpu_x86_fill_model_id(char *str)
2726 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2729 for (i = 0; i < 3; i++) {
2730 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
2731 memcpy(str + i * 16 + 0, &eax, 4);
2732 memcpy(str + i * 16 + 4, &ebx, 4);
2733 memcpy(str + i * 16 + 8, &ecx, 4);
2734 memcpy(str + i * 16 + 12, &edx, 4);
2739 static Property max_x86_cpu_properties[] = {
2740 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
2741 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
2742 DEFINE_PROP_END_OF_LIST()
2745 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
2747 DeviceClass *dc = DEVICE_CLASS(oc);
2748 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2752 xcc->model_description =
2753 "Enables all features supported by the accelerator in the current host";
2755 dc->props = max_x86_cpu_properties;
2758 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
2760 static void max_x86_cpu_initfn(Object *obj)
2762 X86CPU *cpu = X86_CPU(obj);
2763 CPUX86State *env = &cpu->env;
2764 KVMState *s = kvm_state;
2766 /* We can't fill the features array here because we don't know yet if
2767 * "migratable" is true or false.
2769 cpu->max_features = true;
2771 if (accel_uses_host_cpuid()) {
2772 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
2773 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
2774 int family, model, stepping;
2775 X86CPUDefinition host_cpudef = { };
2776 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2778 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
2779 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
2781 host_vendor_fms(vendor, &family, &model, &stepping);
2783 cpu_x86_fill_model_id(model_id);
2785 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
2786 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
2787 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
2788 object_property_set_int(OBJECT(cpu), stepping, "stepping",
2790 object_property_set_str(OBJECT(cpu), model_id, "model-id",
2793 if (kvm_enabled()) {
2794 env->cpuid_min_level =
2795 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
2796 env->cpuid_min_xlevel =
2797 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
2798 env->cpuid_min_xlevel2 =
2799 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
2801 env->cpuid_min_level =
2802 hvf_get_supported_cpuid(0x0, 0, R_EAX);
2803 env->cpuid_min_xlevel =
2804 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
2805 env->cpuid_min_xlevel2 =
2806 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
2809 if (lmce_supported()) {
2810 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
2813 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
2814 "vendor", &error_abort);
2815 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
2816 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
2817 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
2818 object_property_set_str(OBJECT(cpu),
2819 "QEMU TCG CPU version " QEMU_HW_VERSION,
2820 "model-id", &error_abort);
2823 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
2826 static const TypeInfo max_x86_cpu_type_info = {
2827 .name = X86_CPU_TYPE_NAME("max"),
2828 .parent = TYPE_X86_CPU,
2829 .instance_init = max_x86_cpu_initfn,
2830 .class_init = max_x86_cpu_class_init,
2833 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
2834 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
2836 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2838 xcc->host_cpuid_required = true;
2841 if (kvm_enabled()) {
2842 xcc->model_description =
2843 "KVM processor with all supported host features ";
2844 } else if (hvf_enabled()) {
2845 xcc->model_description =
2846 "HVF processor with all supported host features ";
2850 static const TypeInfo host_x86_cpu_type_info = {
2851 .name = X86_CPU_TYPE_NAME("host"),
2852 .parent = X86_CPU_TYPE_NAME("max"),
2853 .class_init = host_x86_cpu_class_init,
2858 static void report_unavailable_features(FeatureWord w, uint32_t mask)
2860 FeatureWordInfo *f = &feature_word_info[w];
2863 for (i = 0; i < 32; ++i) {
2864 if ((1UL << i) & mask) {
2865 const char *reg = get_register_name_32(f->cpuid_reg);
2867 warn_report("%s doesn't support requested feature: "
2868 "CPUID.%02XH:%s%s%s [bit %d]",
2869 accel_uses_host_cpuid() ? "host" : "TCG",
2871 f->feat_names[i] ? "." : "",
2872 f->feat_names[i] ? f->feat_names[i] : "", i);
2877 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
2878 const char *name, void *opaque,
2881 X86CPU *cpu = X86_CPU(obj);
2882 CPUX86State *env = &cpu->env;
2885 value = (env->cpuid_version >> 8) & 0xf;
2887 value += (env->cpuid_version >> 20) & 0xff;
2889 visit_type_int(v, name, &value, errp);
2892 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
2893 const char *name, void *opaque,
2896 X86CPU *cpu = X86_CPU(obj);
2897 CPUX86State *env = &cpu->env;
2898 const int64_t min = 0;
2899 const int64_t max = 0xff + 0xf;
2900 Error *local_err = NULL;
2903 visit_type_int(v, name, &value, &local_err);
2905 error_propagate(errp, local_err);
2908 if (value < min || value > max) {
2909 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2910 name ? name : "null", value, min, max);
2914 env->cpuid_version &= ~0xff00f00;
2916 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
2918 env->cpuid_version |= value << 8;
2922 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
2923 const char *name, void *opaque,
2926 X86CPU *cpu = X86_CPU(obj);
2927 CPUX86State *env = &cpu->env;
2930 value = (env->cpuid_version >> 4) & 0xf;
2931 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
2932 visit_type_int(v, name, &value, errp);
2935 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
2936 const char *name, void *opaque,
2939 X86CPU *cpu = X86_CPU(obj);
2940 CPUX86State *env = &cpu->env;
2941 const int64_t min = 0;
2942 const int64_t max = 0xff;
2943 Error *local_err = NULL;
2946 visit_type_int(v, name, &value, &local_err);
2948 error_propagate(errp, local_err);
2951 if (value < min || value > max) {
2952 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2953 name ? name : "null", value, min, max);
2957 env->cpuid_version &= ~0xf00f0;
2958 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
2961 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
2962 const char *name, void *opaque,
2965 X86CPU *cpu = X86_CPU(obj);
2966 CPUX86State *env = &cpu->env;
2969 value = env->cpuid_version & 0xf;
2970 visit_type_int(v, name, &value, errp);
2973 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
2974 const char *name, void *opaque,
2977 X86CPU *cpu = X86_CPU(obj);
2978 CPUX86State *env = &cpu->env;
2979 const int64_t min = 0;
2980 const int64_t max = 0xf;
2981 Error *local_err = NULL;
2984 visit_type_int(v, name, &value, &local_err);
2986 error_propagate(errp, local_err);
2989 if (value < min || value > max) {
2990 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2991 name ? name : "null", value, min, max);
2995 env->cpuid_version &= ~0xf;
2996 env->cpuid_version |= value & 0xf;
2999 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
3001 X86CPU *cpu = X86_CPU(obj);
3002 CPUX86State *env = &cpu->env;
3005 value = g_malloc(CPUID_VENDOR_SZ + 1);
3006 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
3007 env->cpuid_vendor3);
3011 static void x86_cpuid_set_vendor(Object *obj, const char *value,
3014 X86CPU *cpu = X86_CPU(obj);
3015 CPUX86State *env = &cpu->env;
3018 if (strlen(value) != CPUID_VENDOR_SZ) {
3019 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
3023 env->cpuid_vendor1 = 0;
3024 env->cpuid_vendor2 = 0;
3025 env->cpuid_vendor3 = 0;
3026 for (i = 0; i < 4; i++) {
3027 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
3028 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
3029 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
3033 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
3035 X86CPU *cpu = X86_CPU(obj);
3036 CPUX86State *env = &cpu->env;
3040 value = g_malloc(48 + 1);
3041 for (i = 0; i < 48; i++) {
3042 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
3048 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
3051 X86CPU *cpu = X86_CPU(obj);
3052 CPUX86State *env = &cpu->env;
3055 if (model_id == NULL) {
3058 len = strlen(model_id);
3059 memset(env->cpuid_model, 0, 48);
3060 for (i = 0; i < 48; i++) {
3064 c = (uint8_t)model_id[i];
3066 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
3070 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
3071 void *opaque, Error **errp)
3073 X86CPU *cpu = X86_CPU(obj);
3076 value = cpu->env.tsc_khz * 1000;
3077 visit_type_int(v, name, &value, errp);
3080 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
3081 void *opaque, Error **errp)
3083 X86CPU *cpu = X86_CPU(obj);
3084 const int64_t min = 0;
3085 const int64_t max = INT64_MAX;
3086 Error *local_err = NULL;
3089 visit_type_int(v, name, &value, &local_err);
3091 error_propagate(errp, local_err);
3094 if (value < min || value > max) {
3095 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3096 name ? name : "null", value, min, max);
3100 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
3103 /* Generic getter for "feature-words" and "filtered-features" properties */
3104 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
3105 const char *name, void *opaque,
3108 uint32_t *array = (uint32_t *)opaque;
3110 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
3111 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
3112 X86CPUFeatureWordInfoList *list = NULL;
3114 for (w = 0; w < FEATURE_WORDS; w++) {
3115 FeatureWordInfo *wi = &feature_word_info[w];
3116 X86CPUFeatureWordInfo *qwi = &word_infos[w];
3117 qwi->cpuid_input_eax = wi->cpuid_eax;
3118 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
3119 qwi->cpuid_input_ecx = wi->cpuid_ecx;
3120 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
3121 qwi->features = array[w];
3123 /* List will be in reverse order, but order shouldn't matter */
3124 list_entries[w].next = list;
3125 list_entries[w].value = &word_infos[w];
3126 list = &list_entries[w];
3129 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
3132 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3133 void *opaque, Error **errp)
3135 X86CPU *cpu = X86_CPU(obj);
3136 int64_t value = cpu->hyperv_spinlock_attempts;
3138 visit_type_int(v, name, &value, errp);
3141 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3142 void *opaque, Error **errp)
3144 const int64_t min = 0xFFF;
3145 const int64_t max = UINT_MAX;
3146 X86CPU *cpu = X86_CPU(obj);
3150 visit_type_int(v, name, &value, &err);
3152 error_propagate(errp, err);
3156 if (value < min || value > max) {
3157 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
3158 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
3159 object_get_typename(obj), name ? name : "null",
3163 cpu->hyperv_spinlock_attempts = value;
3166 static const PropertyInfo qdev_prop_spinlocks = {
3168 .get = x86_get_hv_spinlocks,
3169 .set = x86_set_hv_spinlocks,
3172 /* Convert all '_' in a feature string option name to '-', to make feature
3173 * name conform to QOM property naming rule, which uses '-' instead of '_'.
3175 static inline void feat2prop(char *s)
3177 while ((s = strchr(s, '_'))) {
3182 /* Return the feature property name for a feature flag bit */
3183 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
3185 /* XSAVE components are automatically enabled by other features,
3186 * so return the original feature name instead
3188 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
3189 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
3191 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
3192 x86_ext_save_areas[comp].bits) {
3193 w = x86_ext_save_areas[comp].feature;
3194 bitnr = ctz32(x86_ext_save_areas[comp].bits);
3199 assert(w < FEATURE_WORDS);
3200 return feature_word_info[w].feat_names[bitnr];
3203 /* Compatibily hack to maintain legacy +-feat semantic,
3204 * where +-feat overwrites any feature set by
3205 * feat=on|feat even if the later is parsed after +-feat
3206 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3208 static GList *plus_features, *minus_features;
3210 static gint compare_string(gconstpointer a, gconstpointer b)
3212 return g_strcmp0(a, b);
3215 /* Parse "+feature,-feature,feature=foo" CPU feature string
3217 static void x86_cpu_parse_featurestr(const char *typename, char *features,
3220 char *featurestr; /* Single 'key=value" string being parsed */
3221 static bool cpu_globals_initialized;
3222 bool ambiguous = false;
3224 if (cpu_globals_initialized) {
3227 cpu_globals_initialized = true;
3233 for (featurestr = strtok(features, ",");
3235 featurestr = strtok(NULL, ",")) {
3237 const char *val = NULL;
3240 GlobalProperty *prop;
3242 /* Compatibility syntax: */
3243 if (featurestr[0] == '+') {
3244 plus_features = g_list_append(plus_features,
3245 g_strdup(featurestr + 1));
3247 } else if (featurestr[0] == '-') {
3248 minus_features = g_list_append(minus_features,
3249 g_strdup(featurestr + 1));
3253 eq = strchr(featurestr, '=');
3261 feat2prop(featurestr);
3264 if (g_list_find_custom(plus_features, name, compare_string)) {
3265 warn_report("Ambiguous CPU model string. "
3266 "Don't mix both \"+%s\" and \"%s=%s\"",
3270 if (g_list_find_custom(minus_features, name, compare_string)) {
3271 warn_report("Ambiguous CPU model string. "
3272 "Don't mix both \"-%s\" and \"%s=%s\"",
3278 if (!strcmp(name, "tsc-freq")) {
3282 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3283 if (ret < 0 || tsc_freq > INT64_MAX) {
3284 error_setg(errp, "bad numerical value %s", val);
3287 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3289 name = "tsc-frequency";
3292 prop = g_new0(typeof(*prop), 1);
3293 prop->driver = typename;
3294 prop->property = g_strdup(name);
3295 prop->value = g_strdup(val);
3296 prop->errp = &error_fatal;
3297 qdev_prop_register_global(prop);
3301 warn_report("Compatibility of ambiguous CPU model "
3302 "strings won't be kept on future QEMU versions");
3306 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3307 static int x86_cpu_filter_features(X86CPU *cpu);
3309 /* Check for missing features that may prevent the CPU class from
3310 * running using the current machine and accelerator.
3312 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3313 strList **missing_feats)
3318 strList **next = missing_feats;
3320 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3321 strList *new = g_new0(strList, 1);
3322 new->value = g_strdup("kvm");
3323 *missing_feats = new;
3327 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3329 x86_cpu_expand_features(xc, &err);
3331 /* Errors at x86_cpu_expand_features should never happen,
3332 * but in case it does, just report the model as not
3333 * runnable at all using the "type" property.
3335 strList *new = g_new0(strList, 1);
3336 new->value = g_strdup("type");
3341 x86_cpu_filter_features(xc);
3343 for (w = 0; w < FEATURE_WORDS; w++) {
3344 uint32_t filtered = xc->filtered_features[w];
3346 for (i = 0; i < 32; i++) {
3347 if (filtered & (1UL << i)) {
3348 strList *new = g_new0(strList, 1);
3349 new->value = g_strdup(x86_cpu_feature_name(w, i));
3356 object_unref(OBJECT(xc));
3359 /* Print all cpuid feature names in featureset
3361 static void listflags(FILE *f, fprintf_function print, GList *features)
3366 for (tmp = features; tmp; tmp = tmp->next) {
3367 const char *name = tmp->data;
3368 if ((len + strlen(name) + 1) >= 75) {
3372 print(f, "%s%s", len == 0 ? " " : " ", name);
3373 len += strlen(name) + 1;
3378 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3379 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3381 ObjectClass *class_a = (ObjectClass *)a;
3382 ObjectClass *class_b = (ObjectClass *)b;
3383 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3384 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3385 char *name_a, *name_b;
3388 if (cc_a->ordering != cc_b->ordering) {
3389 ret = cc_a->ordering - cc_b->ordering;
3391 name_a = x86_cpu_class_get_model_name(cc_a);
3392 name_b = x86_cpu_class_get_model_name(cc_b);
3393 ret = strcmp(name_a, name_b);
3400 static GSList *get_sorted_cpu_model_list(void)
3402 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3403 list = g_slist_sort(list, x86_cpu_list_compare);
3407 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3409 ObjectClass *oc = data;
3410 X86CPUClass *cc = X86_CPU_CLASS(oc);
3411 CPUListState *s = user_data;
3412 char *name = x86_cpu_class_get_model_name(cc);
3413 const char *desc = cc->model_description;
3414 if (!desc && cc->cpu_def) {
3415 desc = cc->cpu_def->model_id;
3418 (*s->cpu_fprintf)(s->file, "x86 %-20s %-48s\n",
3423 /* list available CPU models and flags */
3424 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
3429 .cpu_fprintf = cpu_fprintf,
3432 GList *names = NULL;
3434 (*cpu_fprintf)(f, "Available CPUs:\n");
3435 list = get_sorted_cpu_model_list();
3436 g_slist_foreach(list, x86_cpu_list_entry, &s);
3440 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3441 FeatureWordInfo *fw = &feature_word_info[i];
3442 for (j = 0; j < 32; j++) {
3443 if (fw->feat_names[j]) {
3444 names = g_list_append(names, (gpointer)fw->feat_names[j]);
3449 names = g_list_sort(names, (GCompareFunc)strcmp);
3451 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
3452 listflags(f, cpu_fprintf, names);
3453 (*cpu_fprintf)(f, "\n");
3457 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3459 ObjectClass *oc = data;
3460 X86CPUClass *cc = X86_CPU_CLASS(oc);
3461 CpuDefinitionInfoList **cpu_list = user_data;
3462 CpuDefinitionInfoList *entry;
3463 CpuDefinitionInfo *info;
3465 info = g_malloc0(sizeof(*info));
3466 info->name = x86_cpu_class_get_model_name(cc);
3467 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3468 info->has_unavailable_features = true;
3469 info->q_typename = g_strdup(object_class_get_name(oc));
3470 info->migration_safe = cc->migration_safe;
3471 info->has_migration_safe = true;
3472 info->q_static = cc->static_model;
3474 entry = g_malloc0(sizeof(*entry));
3475 entry->value = info;
3476 entry->next = *cpu_list;
3480 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
3482 CpuDefinitionInfoList *cpu_list = NULL;
3483 GSList *list = get_sorted_cpu_model_list();
3484 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
3489 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3490 bool migratable_only)
3492 FeatureWordInfo *wi = &feature_word_info[w];
3495 if (kvm_enabled()) {
3496 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
3499 } else if (hvf_enabled()) {
3500 r = hvf_get_supported_cpuid(wi->cpuid_eax,
3503 } else if (tcg_enabled()) {
3504 r = wi->tcg_features;
3508 if (migratable_only) {
3509 r &= x86_cpu_get_migratable_flags(w);
3514 static void x86_cpu_report_filtered_features(X86CPU *cpu)
3518 for (w = 0; w < FEATURE_WORDS; w++) {
3519 report_unavailable_features(w, cpu->filtered_features[w]);
3523 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
3526 for (pv = props; pv->prop; pv++) {
3530 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
3535 /* Load data from X86CPUDefinition into a X86CPU object
3537 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
3539 CPUX86State *env = &cpu->env;
3541 char host_vendor[CPUID_VENDOR_SZ + 1];
3544 /*NOTE: any property set by this function should be returned by
3545 * x86_cpu_static_props(), so static expansion of
3546 * query-cpu-model-expansion is always complete.
3549 /* CPU models only set _minimum_ values for level/xlevel: */
3550 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
3551 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
3553 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
3554 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
3555 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
3556 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
3557 for (w = 0; w < FEATURE_WORDS; w++) {
3558 env->features[w] = def->features[w];
3561 /* legacy-cache defaults to 'off' if CPU model provides cache info */
3562 cpu->legacy_cache = !def->cache_info;
3564 /* Special cases not set in the X86CPUDefinition structs: */
3565 /* TODO: in-kernel irqchip for hvf */
3566 if (kvm_enabled()) {
3567 if (!kvm_irqchip_in_kernel()) {
3568 x86_cpu_change_kvm_default("x2apic", "off");
3571 x86_cpu_apply_props(cpu, kvm_default_props);
3572 } else if (tcg_enabled()) {
3573 x86_cpu_apply_props(cpu, tcg_default_props);
3576 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
3578 /* sysenter isn't supported in compatibility mode on AMD,
3579 * syscall isn't supported in compatibility mode on Intel.
3580 * Normally we advertise the actual CPU vendor, but you can
3581 * override this using the 'vendor' property if you want to use
3582 * KVM's sysenter/syscall emulation in compatibility mode and
3583 * when doing cross vendor migration
3585 vendor = def->vendor;
3586 if (accel_uses_host_cpuid()) {
3587 uint32_t ebx = 0, ecx = 0, edx = 0;
3588 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
3589 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
3590 vendor = host_vendor;
3593 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
3597 /* Return a QDict containing keys for all properties that can be included
3598 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
3599 * must be included in the dictionary.
3601 static QDict *x86_cpu_static_props(void)
3605 static const char *props[] = {
3623 for (i = 0; props[i]; i++) {
3624 qdict_put_null(d, props[i]);
3627 for (w = 0; w < FEATURE_WORDS; w++) {
3628 FeatureWordInfo *fi = &feature_word_info[w];
3630 for (bit = 0; bit < 32; bit++) {
3631 if (!fi->feat_names[bit]) {
3634 qdict_put_null(d, fi->feat_names[bit]);
3641 /* Add an entry to @props dict, with the value for property. */
3642 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
3644 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
3647 qdict_put_obj(props, prop, value);
3650 /* Convert CPU model data from X86CPU object to a property dictionary
3651 * that can recreate exactly the same CPU model.
3653 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3655 QDict *sprops = x86_cpu_static_props();
3656 const QDictEntry *e;
3658 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3659 const char *prop = qdict_entry_key(e);
3660 x86_cpu_expand_prop(cpu, props, prop);
3664 /* Convert CPU model data from X86CPU object to a property dictionary
3665 * that can recreate exactly the same CPU model, including every
3666 * writeable QOM property.
3668 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3670 ObjectPropertyIterator iter;
3671 ObjectProperty *prop;
3673 object_property_iter_init(&iter, OBJECT(cpu));
3674 while ((prop = object_property_iter_next(&iter))) {
3675 /* skip read-only or write-only properties */
3676 if (!prop->get || !prop->set) {
3680 /* "hotplugged" is the only property that is configurable
3681 * on the command-line but will be set differently on CPUs
3682 * created using "-cpu ... -smp ..." and by CPUs created
3683 * on the fly by x86_cpu_from_model() for querying. Skip it.
3685 if (!strcmp(prop->name, "hotplugged")) {
3688 x86_cpu_expand_prop(cpu, props, prop->name);
3692 static void object_apply_props(Object *obj, QDict *props, Error **errp)
3694 const QDictEntry *prop;
3697 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
3698 object_property_set_qobject(obj, qdict_entry_value(prop),
3699 qdict_entry_key(prop), &err);
3705 error_propagate(errp, err);
3708 /* Create X86CPU object according to model+props specification */
3709 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
3715 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
3717 error_setg(&err, "CPU model '%s' not found", model);
3721 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3723 object_apply_props(OBJECT(xc), props, &err);
3729 x86_cpu_expand_features(xc, &err);
3736 error_propagate(errp, err);
3737 object_unref(OBJECT(xc));
3743 CpuModelExpansionInfo *
3744 arch_query_cpu_model_expansion(CpuModelExpansionType type,
3745 CpuModelInfo *model,
3750 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
3751 QDict *props = NULL;
3752 const char *base_name;
3754 xc = x86_cpu_from_model(model->name,
3756 qobject_to(QDict, model->props) :
3762 props = qdict_new();
3765 case CPU_MODEL_EXPANSION_TYPE_STATIC:
3766 /* Static expansion will be based on "base" only */
3768 x86_cpu_to_dict(xc, props);
3770 case CPU_MODEL_EXPANSION_TYPE_FULL:
3771 /* As we don't return every single property, full expansion needs
3772 * to keep the original model name+props, and add extra
3773 * properties on top of that.
3775 base_name = model->name;
3776 x86_cpu_to_dict_full(xc, props);
3779 error_setg(&err, "Unsupportted expansion type");
3784 props = qdict_new();
3786 x86_cpu_to_dict(xc, props);
3788 ret->model = g_new0(CpuModelInfo, 1);
3789 ret->model->name = g_strdup(base_name);
3790 ret->model->props = QOBJECT(props);
3791 ret->model->has_props = true;
3794 object_unref(OBJECT(xc));
3796 error_propagate(errp, err);
3797 qapi_free_CpuModelExpansionInfo(ret);
3803 static gchar *x86_gdb_arch_name(CPUState *cs)
3805 #ifdef TARGET_X86_64
3806 return g_strdup("i386:x86-64");
3808 return g_strdup("i386");
3812 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
3814 X86CPUDefinition *cpudef = data;
3815 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3817 xcc->cpu_def = cpudef;
3818 xcc->migration_safe = true;
3821 static void x86_register_cpudef_type(X86CPUDefinition *def)
3823 char *typename = x86_cpu_type_name(def->name);
3826 .parent = TYPE_X86_CPU,
3827 .class_init = x86_cpu_cpudef_class_init,
3831 /* AMD aliases are handled at runtime based on CPUID vendor, so
3832 * they shouldn't be set on the CPU model table.
3834 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
3835 /* catch mistakes instead of silently truncating model_id when too long */
3836 assert(def->model_id && strlen(def->model_id) <= 48);
3843 #if !defined(CONFIG_USER_ONLY)
3845 void cpu_clear_apic_feature(CPUX86State *env)
3847 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
3850 #endif /* !CONFIG_USER_ONLY */
3852 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
3853 uint32_t *eax, uint32_t *ebx,
3854 uint32_t *ecx, uint32_t *edx)
3856 X86CPU *cpu = x86_env_get_cpu(env);
3857 CPUState *cs = CPU(cpu);
3858 uint32_t pkg_offset;
3860 uint32_t signature[3];
3862 /* Calculate & apply limits for different index ranges */
3863 if (index >= 0xC0000000) {
3864 limit = env->cpuid_xlevel2;
3865 } else if (index >= 0x80000000) {
3866 limit = env->cpuid_xlevel;
3867 } else if (index >= 0x40000000) {
3870 limit = env->cpuid_level;
3873 if (index > limit) {
3874 /* Intel documentation states that invalid EAX input will
3875 * return the same information as EAX=cpuid_level
3876 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
3878 index = env->cpuid_level;
3883 *eax = env->cpuid_level;
3884 *ebx = env->cpuid_vendor1;
3885 *edx = env->cpuid_vendor2;
3886 *ecx = env->cpuid_vendor3;
3889 *eax = env->cpuid_version;
3890 *ebx = (cpu->apic_id << 24) |
3891 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
3892 *ecx = env->features[FEAT_1_ECX];
3893 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
3894 *ecx |= CPUID_EXT_OSXSAVE;
3896 *edx = env->features[FEAT_1_EDX];
3897 if (cs->nr_cores * cs->nr_threads > 1) {
3898 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
3903 /* cache info: needed for Pentium Pro compatibility */
3904 if (cpu->cache_info_passthrough) {
3905 host_cpuid(index, 0, eax, ebx, ecx, edx);
3908 *eax = 1; /* Number of CPUID[EAX=2] calls required */
3910 if (!cpu->enable_l3_cache) {
3913 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
3915 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
3916 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
3917 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
3920 /* cache info: needed for Core compatibility */
3921 if (cpu->cache_info_passthrough) {
3922 host_cpuid(index, count, eax, ebx, ecx, edx);
3923 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
3924 *eax &= ~0xFC000000;
3925 if ((*eax & 31) && cs->nr_cores > 1) {
3926 *eax |= (cs->nr_cores - 1) << 26;
3931 case 0: /* L1 dcache info */
3932 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
3934 eax, ebx, ecx, edx);
3936 case 1: /* L1 icache info */
3937 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
3939 eax, ebx, ecx, edx);
3941 case 2: /* L2 cache info */
3942 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
3943 cs->nr_threads, cs->nr_cores,
3944 eax, ebx, ecx, edx);
3946 case 3: /* L3 cache info */
3947 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3948 if (cpu->enable_l3_cache) {
3949 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
3950 (1 << pkg_offset), cs->nr_cores,
3951 eax, ebx, ecx, edx);
3955 default: /* end of info */
3956 *eax = *ebx = *ecx = *edx = 0;
3962 /* mwait info: needed for Core compatibility */
3963 *eax = 0; /* Smallest monitor-line size in bytes */
3964 *ebx = 0; /* Largest monitor-line size in bytes */
3965 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
3969 /* Thermal and Power Leaf */
3970 *eax = env->features[FEAT_6_EAX];
3976 /* Structured Extended Feature Flags Enumeration Leaf */
3978 *eax = 0; /* Maximum ECX value for sub-leaves */
3979 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
3980 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
3981 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
3982 *ecx |= CPUID_7_0_ECX_OSPKE;
3984 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
3993 /* Direct Cache Access Information Leaf */
3994 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
4000 /* Architectural Performance Monitoring Leaf */
4001 if (kvm_enabled() && cpu->enable_pmu) {
4002 KVMState *s = cs->kvm_state;
4004 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
4005 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
4006 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
4007 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
4008 } else if (hvf_enabled() && cpu->enable_pmu) {
4009 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
4010 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
4011 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
4012 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
4021 /* Extended Topology Enumeration Leaf */
4022 if (!cpu->enable_cpuid_0xb) {
4023 *eax = *ebx = *ecx = *edx = 0;
4027 *ecx = count & 0xff;
4028 *edx = cpu->apic_id;
4032 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
4033 *ebx = cs->nr_threads;
4034 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
4037 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
4038 *ebx = cs->nr_cores * cs->nr_threads;
4039 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
4044 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
4047 assert(!(*eax & ~0x1f));
4048 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
4051 /* Processor Extended State */
4056 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4061 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
4062 *eax = env->features[FEAT_XSAVE_COMP_LO];
4063 *edx = env->features[FEAT_XSAVE_COMP_HI];
4065 } else if (count == 1) {
4066 *eax = env->features[FEAT_XSAVE];
4067 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
4068 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
4069 const ExtSaveArea *esa = &x86_ext_save_areas[count];
4077 /* Intel Processor Trace Enumeration */
4082 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
4088 *eax = INTEL_PT_MAX_SUBLEAF;
4089 *ebx = INTEL_PT_MINIMAL_EBX;
4090 *ecx = INTEL_PT_MINIMAL_ECX;
4091 } else if (count == 1) {
4092 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
4093 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
4099 * CPUID code in kvm_arch_init_vcpu() ignores stuff
4100 * set here, but we restrict to TCG none the less.
4102 if (tcg_enabled() && cpu->expose_tcg) {
4103 memcpy(signature, "TCGTCGTCGTCG", 12);
4105 *ebx = signature[0];
4106 *ecx = signature[1];
4107 *edx = signature[2];
4122 *eax = env->cpuid_xlevel;
4123 *ebx = env->cpuid_vendor1;
4124 *edx = env->cpuid_vendor2;
4125 *ecx = env->cpuid_vendor3;
4128 *eax = env->cpuid_version;
4130 *ecx = env->features[FEAT_8000_0001_ECX];
4131 *edx = env->features[FEAT_8000_0001_EDX];
4133 /* The Linux kernel checks for the CMPLegacy bit and
4134 * discards multiple thread information if it is set.
4135 * So don't set it here for Intel to make Linux guests happy.
4137 if (cs->nr_cores * cs->nr_threads > 1) {
4138 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
4139 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
4140 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
4141 *ecx |= 1 << 1; /* CmpLegacy bit */
4148 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
4149 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
4150 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
4151 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
4154 /* cache info (L1 cache) */
4155 if (cpu->cache_info_passthrough) {
4156 host_cpuid(index, 0, eax, ebx, ecx, edx);
4159 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
4160 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
4161 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
4162 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
4163 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
4164 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
4167 /* cache info (L2 cache) */
4168 if (cpu->cache_info_passthrough) {
4169 host_cpuid(index, 0, eax, ebx, ecx, edx);
4172 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
4173 (L2_DTLB_2M_ENTRIES << 16) | \
4174 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
4175 (L2_ITLB_2M_ENTRIES);
4176 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
4177 (L2_DTLB_4K_ENTRIES << 16) | \
4178 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
4179 (L2_ITLB_4K_ENTRIES);
4180 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
4181 cpu->enable_l3_cache ?
4182 env->cache_info_amd.l3_cache : NULL,
4189 *edx = env->features[FEAT_8000_0007_EDX];
4192 /* virtual & phys address size in low 2 bytes. */
4193 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4194 /* 64 bit processor */
4195 *eax = cpu->phys_bits; /* configurable physical bits */
4196 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
4197 *eax |= 0x00003900; /* 57 bits virtual */
4199 *eax |= 0x00003000; /* 48 bits virtual */
4202 *eax = cpu->phys_bits;
4204 *ebx = env->features[FEAT_8000_0008_EBX];
4207 if (cs->nr_cores * cs->nr_threads > 1) {
4208 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
4212 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4213 *eax = 0x00000001; /* SVM Revision */
4214 *ebx = 0x00000010; /* nr of ASIDs */
4216 *edx = env->features[FEAT_SVM]; /* optional features */
4227 case 0: /* L1 dcache info */
4228 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
4229 eax, ebx, ecx, edx);
4231 case 1: /* L1 icache info */
4232 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
4233 eax, ebx, ecx, edx);
4235 case 2: /* L2 cache info */
4236 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
4237 eax, ebx, ecx, edx);
4239 case 3: /* L3 cache info */
4240 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
4241 eax, ebx, ecx, edx);
4243 default: /* end of info */
4244 *eax = *ebx = *ecx = *edx = 0;
4249 assert(cpu->core_id <= 255);
4250 encode_topo_cpuid8000001e(cs, cpu,
4251 eax, ebx, ecx, edx);
4254 *eax = env->cpuid_xlevel2;
4260 /* Support for VIA CPU's CPUID instruction */
4261 *eax = env->cpuid_version;
4264 *edx = env->features[FEAT_C000_0001_EDX];
4269 /* Reserved for the future, and now filled with zero */
4276 *eax = sev_enabled() ? 0x2 : 0;
4277 *ebx = sev_get_cbit_position();
4278 *ebx |= sev_get_reduced_phys_bits() << 6;
4283 /* reserved values: zero */
4292 /* CPUClass::reset() */
4293 static void x86_cpu_reset(CPUState *s)
4295 X86CPU *cpu = X86_CPU(s);
4296 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4297 CPUX86State *env = &cpu->env;
4302 xcc->parent_reset(s);
4304 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4306 env->old_exception = -1;
4308 /* init to reset state */
4310 env->hflags2 |= HF2_GIF_MASK;
4312 cpu_x86_update_cr0(env, 0x60000010);
4313 env->a20_mask = ~0x0;
4314 env->smbase = 0x30000;
4315 env->msr_smi_count = 0;
4317 env->idt.limit = 0xffff;
4318 env->gdt.limit = 0xffff;
4319 env->ldt.limit = 0xffff;
4320 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4321 env->tr.limit = 0xffff;
4322 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4324 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4325 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4326 DESC_R_MASK | DESC_A_MASK);
4327 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4328 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4330 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4331 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4333 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4334 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4336 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4337 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4339 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4340 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4344 env->regs[R_EDX] = env->cpuid_version;
4349 for (i = 0; i < 8; i++) {
4352 cpu_set_fpuc(env, 0x37f);
4354 env->mxcsr = 0x1f80;
4355 /* All units are in INIT state. */
4358 env->pat = 0x0007040600070406ULL;
4359 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4361 memset(env->dr, 0, sizeof(env->dr));
4362 env->dr[6] = DR6_FIXED_1;
4363 env->dr[7] = DR7_FIXED_1;
4364 cpu_breakpoint_remove_all(s, BP_CPU);
4365 cpu_watchpoint_remove_all(s, BP_CPU);
4368 xcr0 = XSTATE_FP_MASK;
4370 #ifdef CONFIG_USER_ONLY
4371 /* Enable all the features for user-mode. */
4372 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4373 xcr0 |= XSTATE_SSE_MASK;
4375 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4376 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4377 if (env->features[esa->feature] & esa->bits) {
4382 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
4383 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
4385 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
4386 cr4 |= CR4_FSGSBASE_MASK;
4391 cpu_x86_update_cr4(env, cr4);
4394 * SDM 11.11.5 requires:
4395 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4396 * - IA32_MTRR_PHYSMASKn.V = 0
4397 * All other bits are undefined. For simplification, zero it all.
4399 env->mtrr_deftype = 0;
4400 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
4401 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
4403 env->interrupt_injected = -1;
4404 env->exception_injected = -1;
4405 env->nmi_injected = false;
4406 #if !defined(CONFIG_USER_ONLY)
4407 /* We hard-wire the BSP to the first CPU. */
4408 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
4410 s->halted = !cpu_is_bsp(cpu);
4412 if (kvm_enabled()) {
4413 kvm_arch_reset_vcpu(cpu);
4415 else if (hvf_enabled()) {
4421 #ifndef CONFIG_USER_ONLY
4422 bool cpu_is_bsp(X86CPU *cpu)
4424 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
4427 /* TODO: remove me, when reset over QOM tree is implemented */
4428 static void x86_cpu_machine_reset_cb(void *opaque)
4430 X86CPU *cpu = opaque;
4431 cpu_reset(CPU(cpu));
4435 static void mce_init(X86CPU *cpu)
4437 CPUX86State *cenv = &cpu->env;
4440 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
4441 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
4442 (CPUID_MCE | CPUID_MCA)) {
4443 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
4444 (cpu->enable_lmce ? MCG_LMCE_P : 0);
4445 cenv->mcg_ctl = ~(uint64_t)0;
4446 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
4447 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
4452 #ifndef CONFIG_USER_ONLY
4453 APICCommonClass *apic_get_class(void)
4455 const char *apic_type = "apic";
4457 /* TODO: in-kernel irqchip for hvf */
4458 if (kvm_apic_in_kernel()) {
4459 apic_type = "kvm-apic";
4460 } else if (xen_enabled()) {
4461 apic_type = "xen-apic";
4464 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
4467 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
4469 APICCommonState *apic;
4470 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
4472 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
4474 object_property_add_child(OBJECT(cpu), "lapic",
4475 OBJECT(cpu->apic_state), &error_abort);
4476 object_unref(OBJECT(cpu->apic_state));
4478 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
4479 /* TODO: convert to link<> */
4480 apic = APIC_COMMON(cpu->apic_state);
4482 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
4485 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4487 APICCommonState *apic;
4488 static bool apic_mmio_map_once;
4490 if (cpu->apic_state == NULL) {
4493 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
4496 /* Map APIC MMIO area */
4497 apic = APIC_COMMON(cpu->apic_state);
4498 if (!apic_mmio_map_once) {
4499 memory_region_add_subregion_overlap(get_system_memory(),
4501 MSR_IA32_APICBASE_BASE,
4504 apic_mmio_map_once = true;
4508 static void x86_cpu_machine_done(Notifier *n, void *unused)
4510 X86CPU *cpu = container_of(n, X86CPU, machine_done);
4511 MemoryRegion *smram =
4512 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
4515 cpu->smram = g_new(MemoryRegion, 1);
4516 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
4517 smram, 0, 1ull << 32);
4518 memory_region_set_enabled(cpu->smram, true);
4519 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
4523 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4528 /* Note: Only safe for use on x86(-64) hosts */
4529 static uint32_t x86_host_phys_bits(void)
4532 uint32_t host_phys_bits;
4534 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
4535 if (eax >= 0x80000008) {
4536 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
4537 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4538 * at 23:16 that can specify a maximum physical address bits for
4539 * the guest that can override this value; but I've not seen
4540 * anything with that set.
4542 host_phys_bits = eax & 0xff;
4544 /* It's an odd 64 bit machine that doesn't have the leaf for
4545 * physical address bits; fall back to 36 that's most older
4548 host_phys_bits = 36;
4551 return host_phys_bits;
4554 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
4561 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4562 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
4564 CPUX86State *env = &cpu->env;
4565 FeatureWordInfo *fi = &feature_word_info[w];
4566 uint32_t eax = fi->cpuid_eax;
4567 uint32_t region = eax & 0xF0000000;
4569 if (!env->features[w]) {
4575 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
4578 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
4581 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
4586 /* Calculate XSAVE components based on the configured CPU feature flags */
4587 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
4589 CPUX86State *env = &cpu->env;
4593 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4598 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4599 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4600 if (env->features[esa->feature] & esa->bits) {
4601 mask |= (1ULL << i);
4605 env->features[FEAT_XSAVE_COMP_LO] = mask;
4606 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
4609 /***** Steps involved on loading and filtering CPUID data
4611 * When initializing and realizing a CPU object, the steps
4612 * involved in setting up CPUID data are:
4614 * 1) Loading CPU model definition (X86CPUDefinition). This is
4615 * implemented by x86_cpu_load_def() and should be completely
4616 * transparent, as it is done automatically by instance_init.
4617 * No code should need to look at X86CPUDefinition structs
4618 * outside instance_init.
4620 * 2) CPU expansion. This is done by realize before CPUID
4621 * filtering, and will make sure host/accelerator data is
4622 * loaded for CPU models that depend on host capabilities
4623 * (e.g. "host"). Done by x86_cpu_expand_features().
4625 * 3) CPUID filtering. This initializes extra data related to
4626 * CPUID, and checks if the host supports all capabilities
4627 * required by the CPU. Runnability of a CPU model is
4628 * determined at this step. Done by x86_cpu_filter_features().
4630 * Some operations don't require all steps to be performed.
4633 * - CPU instance creation (instance_init) will run only CPU
4634 * model loading. CPU expansion can't run at instance_init-time
4635 * because host/accelerator data may be not available yet.
4636 * - CPU realization will perform both CPU model expansion and CPUID
4637 * filtering, and return an error in case one of them fails.
4638 * - query-cpu-definitions needs to run all 3 steps. It needs
4639 * to run CPUID filtering, as the 'unavailable-features'
4640 * field is set based on the filtering results.
4641 * - The query-cpu-model-expansion QMP command only needs to run
4642 * CPU model loading and CPU expansion. It should not filter
4643 * any CPUID data based on host capabilities.
4646 /* Expand CPU configuration data, based on configured features
4647 * and host/accelerator capabilities when appropriate.
4649 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
4651 CPUX86State *env = &cpu->env;
4654 Error *local_err = NULL;
4656 /*TODO: Now cpu->max_features doesn't overwrite features
4657 * set using QOM properties, and we can convert
4658 * plus_features & minus_features to global properties
4659 * inside x86_cpu_parse_featurestr() too.
4661 if (cpu->max_features) {
4662 for (w = 0; w < FEATURE_WORDS; w++) {
4663 /* Override only features that weren't set explicitly
4667 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
4668 ~env->user_features[w] & \
4669 ~feature_word_info[w].no_autoenable_flags;
4673 for (l = plus_features; l; l = l->next) {
4674 const char *prop = l->data;
4675 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4681 for (l = minus_features; l; l = l->next) {
4682 const char *prop = l->data;
4683 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
4689 if (!kvm_enabled() || !cpu->expose_kvm) {
4690 env->features[FEAT_KVM] = 0;
4693 x86_cpu_enable_xsave_components(cpu);
4695 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
4696 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
4697 if (cpu->full_cpuid_auto_level) {
4698 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
4699 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
4700 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
4701 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
4702 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
4703 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
4704 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
4705 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
4706 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
4707 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
4708 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
4709 /* SVM requires CPUID[0x8000000A] */
4710 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4711 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
4714 /* SEV requires CPUID[0x8000001F] */
4715 if (sev_enabled()) {
4716 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
4720 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
4721 if (env->cpuid_level == UINT32_MAX) {
4722 env->cpuid_level = env->cpuid_min_level;
4724 if (env->cpuid_xlevel == UINT32_MAX) {
4725 env->cpuid_xlevel = env->cpuid_min_xlevel;
4727 if (env->cpuid_xlevel2 == UINT32_MAX) {
4728 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
4732 if (local_err != NULL) {
4733 error_propagate(errp, local_err);
4738 * Finishes initialization of CPUID data, filters CPU feature
4739 * words based on host availability of each feature.
4741 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
4743 static int x86_cpu_filter_features(X86CPU *cpu)
4745 CPUX86State *env = &cpu->env;
4749 for (w = 0; w < FEATURE_WORDS; w++) {
4750 uint32_t host_feat =
4751 x86_cpu_get_supported_feature_word(w, false);
4752 uint32_t requested_features = env->features[w];
4753 env->features[w] &= host_feat;
4754 cpu->filtered_features[w] = requested_features & ~env->features[w];
4755 if (cpu->filtered_features[w]) {
4760 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
4762 KVMState *s = CPU(cpu)->kvm_state;
4763 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
4764 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
4765 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
4766 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
4767 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
4770 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
4771 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
4772 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
4773 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
4774 INTEL_PT_ADDR_RANGES_NUM) ||
4775 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
4776 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
4777 (ecx_0 & INTEL_PT_IP_LIP)) {
4779 * Processor Trace capabilities aren't configurable, so if the
4780 * host can't emulate the capabilities we report on
4781 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
4783 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
4784 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
4792 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
4793 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
4794 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
4795 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
4796 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
4797 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
4798 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
4800 CPUState *cs = CPU(dev);
4801 X86CPU *cpu = X86_CPU(dev);
4802 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4803 CPUX86State *env = &cpu->env;
4804 Error *local_err = NULL;
4805 static bool ht_warned;
4807 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
4808 char *name = x86_cpu_class_get_model_name(xcc);
4809 error_setg(&local_err, "CPU model '%s' requires KVM", name);
4814 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
4815 error_setg(errp, "apic-id property was not initialized properly");
4819 x86_cpu_expand_features(cpu, &local_err);
4824 if (x86_cpu_filter_features(cpu) &&
4825 (cpu->check_cpuid || cpu->enforce_cpuid)) {
4826 x86_cpu_report_filtered_features(cpu);
4827 if (cpu->enforce_cpuid) {
4828 error_setg(&local_err,
4829 accel_uses_host_cpuid() ?
4830 "Host doesn't support requested features" :
4831 "TCG doesn't support requested features");
4836 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
4839 if (IS_AMD_CPU(env)) {
4840 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
4841 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
4842 & CPUID_EXT2_AMD_ALIASES);
4845 /* For 64bit systems think about the number of physical bits to present.
4846 * ideally this should be the same as the host; anything other than matching
4847 * the host can cause incorrect guest behaviour.
4848 * QEMU used to pick the magic value of 40 bits that corresponds to
4849 * consumer AMD devices but nothing else.
4851 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4852 if (accel_uses_host_cpuid()) {
4853 uint32_t host_phys_bits = x86_host_phys_bits();
4856 if (cpu->host_phys_bits) {
4857 /* The user asked for us to use the host physical bits */
4858 cpu->phys_bits = host_phys_bits;
4861 /* Print a warning if the user set it to a value that's not the
4864 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
4866 warn_report("Host physical bits (%u)"
4867 " does not match phys-bits property (%u)",
4868 host_phys_bits, cpu->phys_bits);
4872 if (cpu->phys_bits &&
4873 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
4874 cpu->phys_bits < 32)) {
4875 error_setg(errp, "phys-bits should be between 32 and %u "
4877 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
4881 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
4882 error_setg(errp, "TCG only supports phys-bits=%u",
4883 TCG_PHYS_ADDR_BITS);
4887 /* 0 means it was not explicitly set by the user (or by machine
4888 * compat_props or by the host code above). In this case, the default
4889 * is the value used by TCG (40).
4891 if (cpu->phys_bits == 0) {
4892 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
4895 /* For 32 bit systems don't use the user set value, but keep
4896 * phys_bits consistent with what we tell the guest.
4898 if (cpu->phys_bits != 0) {
4899 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
4903 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
4904 cpu->phys_bits = 36;
4906 cpu->phys_bits = 32;
4910 /* Cache information initialization */
4911 if (!cpu->legacy_cache) {
4912 if (!xcc->cpu_def || !xcc->cpu_def->cache_info) {
4913 char *name = x86_cpu_class_get_model_name(xcc);
4915 "CPU model '%s' doesn't support legacy-cache=off", name);
4919 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
4920 *xcc->cpu_def->cache_info;
4922 /* Build legacy cache information */
4923 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
4924 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
4925 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
4926 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
4928 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
4929 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
4930 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
4931 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
4933 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
4934 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
4935 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
4936 env->cache_info_amd.l3_cache = &legacy_l3_cache;
4940 cpu_exec_realizefn(cs, &local_err);
4941 if (local_err != NULL) {
4942 error_propagate(errp, local_err);
4946 #ifndef CONFIG_USER_ONLY
4947 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
4949 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
4950 x86_cpu_apic_create(cpu, &local_err);
4951 if (local_err != NULL) {
4959 #ifndef CONFIG_USER_ONLY
4960 if (tcg_enabled()) {
4961 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
4962 cpu->cpu_as_root = g_new(MemoryRegion, 1);
4964 /* Outer container... */
4965 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
4966 memory_region_set_enabled(cpu->cpu_as_root, true);
4968 /* ... with two regions inside: normal system memory with low
4971 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
4972 get_system_memory(), 0, ~0ull);
4973 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
4974 memory_region_set_enabled(cpu->cpu_as_mem, true);
4977 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
4978 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
4980 /* ... SMRAM with higher priority, linked from /machine/smram. */
4981 cpu->machine_done.notify = x86_cpu_machine_done;
4982 qemu_add_machine_init_done_notifier(&cpu->machine_done);
4989 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
4990 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
4991 * based on inputs (sockets,cores,threads), it is still better to give
4994 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
4995 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
4997 if (IS_AMD_CPU(env) &&
4998 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
4999 cs->nr_threads > 1 && !ht_warned) {
5000 error_report("This family of AMD CPU doesn't support "
5001 "hyperthreading(%d). Please configure -smp "
5002 "options properly or try enabling topoext feature.",
5007 x86_cpu_apic_realize(cpu, &local_err);
5008 if (local_err != NULL) {
5013 xcc->parent_realize(dev, &local_err);
5016 if (local_err != NULL) {
5017 error_propagate(errp, local_err);
5022 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
5024 X86CPU *cpu = X86_CPU(dev);
5025 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5026 Error *local_err = NULL;
5028 #ifndef CONFIG_USER_ONLY
5029 cpu_remove_sync(CPU(dev));
5030 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
5033 if (cpu->apic_state) {
5034 object_unparent(OBJECT(cpu->apic_state));
5035 cpu->apic_state = NULL;
5038 xcc->parent_unrealize(dev, &local_err);
5039 if (local_err != NULL) {
5040 error_propagate(errp, local_err);
5045 typedef struct BitProperty {
5050 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
5051 void *opaque, Error **errp)
5053 X86CPU *cpu = X86_CPU(obj);
5054 BitProperty *fp = opaque;
5055 uint32_t f = cpu->env.features[fp->w];
5056 bool value = (f & fp->mask) == fp->mask;
5057 visit_type_bool(v, name, &value, errp);
5060 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
5061 void *opaque, Error **errp)
5063 DeviceState *dev = DEVICE(obj);
5064 X86CPU *cpu = X86_CPU(obj);
5065 BitProperty *fp = opaque;
5066 Error *local_err = NULL;
5069 if (dev->realized) {
5070 qdev_prop_set_after_realize(dev, name, errp);
5074 visit_type_bool(v, name, &value, &local_err);
5076 error_propagate(errp, local_err);
5081 cpu->env.features[fp->w] |= fp->mask;
5083 cpu->env.features[fp->w] &= ~fp->mask;
5085 cpu->env.user_features[fp->w] |= fp->mask;
5088 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
5091 BitProperty *prop = opaque;
5095 /* Register a boolean property to get/set a single bit in a uint32_t field.
5097 * The same property name can be registered multiple times to make it affect
5098 * multiple bits in the same FeatureWord. In that case, the getter will return
5099 * true only if all bits are set.
5101 static void x86_cpu_register_bit_prop(X86CPU *cpu,
5102 const char *prop_name,
5108 uint32_t mask = (1UL << bitnr);
5110 op = object_property_find(OBJECT(cpu), prop_name, NULL);
5116 fp = g_new0(BitProperty, 1);
5119 object_property_add(OBJECT(cpu), prop_name, "bool",
5120 x86_cpu_get_bit_prop,
5121 x86_cpu_set_bit_prop,
5122 x86_cpu_release_bit_prop, fp, &error_abort);
5126 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
5130 FeatureWordInfo *fi = &feature_word_info[w];
5131 const char *name = fi->feat_names[bitnr];
5137 /* Property names should use "-" instead of "_".
5138 * Old names containing underscores are registered as aliases
5139 * using object_property_add_alias()
5141 assert(!strchr(name, '_'));
5142 /* aliases don't use "|" delimiters anymore, they are registered
5143 * manually using object_property_add_alias() */
5144 assert(!strchr(name, '|'));
5145 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
5148 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
5150 X86CPU *cpu = X86_CPU(cs);
5151 CPUX86State *env = &cpu->env;
5152 GuestPanicInformation *panic_info = NULL;
5154 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
5155 panic_info = g_malloc0(sizeof(GuestPanicInformation));
5157 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
5159 assert(HV_CRASH_PARAMS >= 5);
5160 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
5161 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
5162 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
5163 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
5164 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
5169 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
5170 const char *name, void *opaque,
5173 CPUState *cs = CPU(obj);
5174 GuestPanicInformation *panic_info;
5176 if (!cs->crash_occurred) {
5177 error_setg(errp, "No crash occured");
5181 panic_info = x86_cpu_get_crash_info(cs);
5182 if (panic_info == NULL) {
5183 error_setg(errp, "No crash information");
5187 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
5189 qapi_free_GuestPanicInformation(panic_info);
5192 static void x86_cpu_initfn(Object *obj)
5194 CPUState *cs = CPU(obj);
5195 X86CPU *cpu = X86_CPU(obj);
5196 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
5197 CPUX86State *env = &cpu->env;
5202 object_property_add(obj, "family", "int",
5203 x86_cpuid_version_get_family,
5204 x86_cpuid_version_set_family, NULL, NULL, NULL);
5205 object_property_add(obj, "model", "int",
5206 x86_cpuid_version_get_model,
5207 x86_cpuid_version_set_model, NULL, NULL, NULL);
5208 object_property_add(obj, "stepping", "int",
5209 x86_cpuid_version_get_stepping,
5210 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
5211 object_property_add_str(obj, "vendor",
5212 x86_cpuid_get_vendor,
5213 x86_cpuid_set_vendor, NULL);
5214 object_property_add_str(obj, "model-id",
5215 x86_cpuid_get_model_id,
5216 x86_cpuid_set_model_id, NULL);
5217 object_property_add(obj, "tsc-frequency", "int",
5218 x86_cpuid_get_tsc_freq,
5219 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
5220 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
5221 x86_cpu_get_feature_words,
5222 NULL, NULL, (void *)env->features, NULL);
5223 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
5224 x86_cpu_get_feature_words,
5225 NULL, NULL, (void *)cpu->filtered_features, NULL);
5227 object_property_add(obj, "crash-information", "GuestPanicInformation",
5228 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
5230 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
5232 for (w = 0; w < FEATURE_WORDS; w++) {
5235 for (bitnr = 0; bitnr < 32; bitnr++) {
5236 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
5240 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
5241 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
5242 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
5243 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
5244 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
5245 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
5246 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
5248 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
5249 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
5250 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
5251 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5252 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5253 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5254 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5255 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5256 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5257 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5258 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5259 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5260 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5261 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5262 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5263 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5264 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5265 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5266 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5267 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5268 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5271 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
5275 static int64_t x86_cpu_get_arch_id(CPUState *cs)
5277 X86CPU *cpu = X86_CPU(cs);
5279 return cpu->apic_id;
5282 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
5284 X86CPU *cpu = X86_CPU(cs);
5286 return cpu->env.cr[0] & CR0_PG_MASK;
5289 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5291 X86CPU *cpu = X86_CPU(cs);
5293 cpu->env.eip = value;
5296 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5298 X86CPU *cpu = X86_CPU(cs);
5300 cpu->env.eip = tb->pc - tb->cs_base;
5303 static bool x86_cpu_has_work(CPUState *cs)
5305 X86CPU *cpu = X86_CPU(cs);
5306 CPUX86State *env = &cpu->env;
5308 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
5309 CPU_INTERRUPT_POLL)) &&
5310 (env->eflags & IF_MASK)) ||
5311 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
5312 CPU_INTERRUPT_INIT |
5313 CPU_INTERRUPT_SIPI |
5314 CPU_INTERRUPT_MCE)) ||
5315 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
5316 !(env->hflags & HF_SMM_MASK));
5319 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
5321 X86CPU *cpu = X86_CPU(cs);
5322 CPUX86State *env = &cpu->env;
5324 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
5325 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
5326 : bfd_mach_i386_i8086);
5327 info->print_insn = print_insn_i386;
5329 info->cap_arch = CS_ARCH_X86;
5330 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
5331 : env->hflags & HF_CS32_MASK ? CS_MODE_32
5333 info->cap_insn_unit = 1;
5334 info->cap_insn_split = 8;
5337 void x86_update_hflags(CPUX86State *env)
5340 #define HFLAG_COPY_MASK \
5341 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5342 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5343 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5344 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5346 hflags = env->hflags & HFLAG_COPY_MASK;
5347 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
5348 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
5349 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
5350 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
5351 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
5353 if (env->cr[4] & CR4_OSFXSR_MASK) {
5354 hflags |= HF_OSFXSR_MASK;
5357 if (env->efer & MSR_EFER_LMA) {
5358 hflags |= HF_LMA_MASK;
5361 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
5362 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
5364 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
5365 (DESC_B_SHIFT - HF_CS32_SHIFT);
5366 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
5367 (DESC_B_SHIFT - HF_SS32_SHIFT);
5368 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
5369 !(hflags & HF_CS32_MASK)) {
5370 hflags |= HF_ADDSEG_MASK;
5372 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
5373 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
5376 env->hflags = hflags;
5379 static Property x86_cpu_properties[] = {
5380 #ifdef CONFIG_USER_ONLY
5381 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5382 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
5383 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
5384 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
5385 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
5387 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
5388 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
5389 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
5390 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
5392 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
5393 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
5394 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
5395 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
5396 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
5397 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
5398 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
5399 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
5400 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
5401 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
5402 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
5403 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
5404 DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false),
5405 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false),
5406 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
5407 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
5408 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
5409 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
5410 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
5411 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
5412 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
5413 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
5414 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
5415 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
5416 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
5417 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
5418 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
5419 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
5420 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
5421 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
5422 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
5423 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
5425 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
5426 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
5428 * lecacy_cache defaults to true unless the CPU model provides its
5429 * own cache information (see x86_cpu_load_def()).
5431 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
5434 * From "Requirements for Implementing the Microsoft
5435 * Hypervisor Interface":
5436 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5438 * "Starting with Windows Server 2012 and Windows 8, if
5439 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5440 * the hypervisor imposes no specific limit to the number of VPs.
5441 * In this case, Windows Server 2012 guest VMs may use more than
5442 * 64 VPs, up to the maximum supported number of processors applicable
5443 * to the specific Windows version being used."
5445 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
5446 DEFINE_PROP_END_OF_LIST()
5449 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
5451 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5452 CPUClass *cc = CPU_CLASS(oc);
5453 DeviceClass *dc = DEVICE_CLASS(oc);
5455 device_class_set_parent_realize(dc, x86_cpu_realizefn,
5456 &xcc->parent_realize);
5457 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
5458 &xcc->parent_unrealize);
5459 dc->props = x86_cpu_properties;
5461 xcc->parent_reset = cc->reset;
5462 cc->reset = x86_cpu_reset;
5463 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
5465 cc->class_by_name = x86_cpu_class_by_name;
5466 cc->parse_features = x86_cpu_parse_featurestr;
5467 cc->has_work = x86_cpu_has_work;
5469 cc->do_interrupt = x86_cpu_do_interrupt;
5470 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
5472 cc->dump_state = x86_cpu_dump_state;
5473 cc->get_crash_info = x86_cpu_get_crash_info;
5474 cc->set_pc = x86_cpu_set_pc;
5475 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
5476 cc->gdb_read_register = x86_cpu_gdb_read_register;
5477 cc->gdb_write_register = x86_cpu_gdb_write_register;
5478 cc->get_arch_id = x86_cpu_get_arch_id;
5479 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
5480 #ifdef CONFIG_USER_ONLY
5481 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
5483 cc->asidx_from_attrs = x86_asidx_from_attrs;
5484 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
5485 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
5486 cc->write_elf64_note = x86_cpu_write_elf64_note;
5487 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
5488 cc->write_elf32_note = x86_cpu_write_elf32_note;
5489 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
5490 cc->vmsd = &vmstate_x86_cpu;
5492 cc->gdb_arch_name = x86_gdb_arch_name;
5493 #ifdef TARGET_X86_64
5494 cc->gdb_core_xml_file = "i386-64bit.xml";
5495 cc->gdb_num_core_regs = 57;
5497 cc->gdb_core_xml_file = "i386-32bit.xml";
5498 cc->gdb_num_core_regs = 41;
5500 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
5501 cc->debug_excp_handler = breakpoint_handler;
5503 cc->cpu_exec_enter = x86_cpu_exec_enter;
5504 cc->cpu_exec_exit = x86_cpu_exec_exit;
5506 cc->tcg_initialize = tcg_x86_init;
5508 cc->disas_set_info = x86_disas_set_info;
5510 dc->user_creatable = true;
5513 static const TypeInfo x86_cpu_type_info = {
5514 .name = TYPE_X86_CPU,
5516 .instance_size = sizeof(X86CPU),
5517 .instance_init = x86_cpu_initfn,
5519 .class_size = sizeof(X86CPUClass),
5520 .class_init = x86_cpu_common_class_init,
5524 /* "base" CPU model, used by query-cpu-model-expansion */
5525 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
5527 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5529 xcc->static_model = true;
5530 xcc->migration_safe = true;
5531 xcc->model_description = "base CPU model type with no features enabled";
5535 static const TypeInfo x86_base_cpu_type_info = {
5536 .name = X86_CPU_TYPE_NAME("base"),
5537 .parent = TYPE_X86_CPU,
5538 .class_init = x86_cpu_base_class_init,
5541 static void x86_cpu_register_types(void)
5545 type_register_static(&x86_cpu_type_info);
5546 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
5547 x86_register_cpudef_type(&builtin_x86_defs[i]);
5549 type_register_static(&max_x86_cpu_type_info);
5550 type_register_static(&x86_base_cpu_type_info);
5551 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
5552 type_register_static(&host_x86_cpu_type_info);
5556 type_init(x86_cpu_register_types)