2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu/cutils.h"
23 #include "qemu/bitops.h"
26 #include "exec/exec-all.h"
27 #include "sysemu/kvm.h"
28 #include "sysemu/hvf.h"
29 #include "sysemu/cpus.h"
33 #include "qemu/error-report.h"
34 #include "qemu/option.h"
35 #include "qemu/config-file.h"
36 #include "qapi/error.h"
37 #include "qapi/qapi-visit-misc.h"
38 #include "qapi/qapi-visit-run-state.h"
39 #include "qapi/qmp/qdict.h"
40 #include "qapi/qmp/qerror.h"
41 #include "qapi/visitor.h"
42 #include "qom/qom-qobject.h"
43 #include "sysemu/arch_init.h"
45 #include "standard-headers/asm-x86/kvm_para.h"
47 #include "sysemu/sysemu.h"
48 #include "hw/qdev-properties.h"
49 #include "hw/i386/topology.h"
50 #ifndef CONFIG_USER_ONLY
51 #include "exec/address-spaces.h"
53 #include "hw/xen/xen.h"
54 #include "hw/i386/apic_internal.h"
57 #include "disas/capstone.h"
59 /* Helpers for building CPUID[2] descriptors: */
61 struct CPUID2CacheDescriptorInfo {
70 * Known CPUID 2 cache descriptors.
71 * From Intel SDM Volume 2A, CPUID instruction
73 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
74 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
75 .associativity = 4, .line_size = 32, },
76 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
77 .associativity = 4, .line_size = 32, },
78 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
79 .associativity = 4, .line_size = 64, },
80 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
81 .associativity = 2, .line_size = 32, },
82 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
83 .associativity = 4, .line_size = 32, },
84 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
85 .associativity = 4, .line_size = 64, },
86 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
87 .associativity = 6, .line_size = 64, },
88 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
89 .associativity = 2, .line_size = 64, },
90 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
91 .associativity = 8, .line_size = 64, },
92 /* lines per sector is not supported cpuid2_cache_descriptor(),
93 * so descriptors 0x22, 0x23 are not included
95 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
96 .associativity = 16, .line_size = 64, },
97 /* lines per sector is not supported cpuid2_cache_descriptor(),
98 * so descriptors 0x25, 0x20 are not included
100 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
101 .associativity = 8, .line_size = 64, },
102 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
103 .associativity = 8, .line_size = 64, },
104 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
105 .associativity = 4, .line_size = 32, },
106 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
107 .associativity = 4, .line_size = 32, },
108 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
109 .associativity = 4, .line_size = 32, },
110 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
111 .associativity = 4, .line_size = 32, },
112 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
113 .associativity = 4, .line_size = 32, },
114 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
115 .associativity = 4, .line_size = 64, },
116 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
117 .associativity = 8, .line_size = 64, },
118 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
119 .associativity = 12, .line_size = 64, },
120 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
121 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
122 .associativity = 12, .line_size = 64, },
123 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
124 .associativity = 16, .line_size = 64, },
125 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
126 .associativity = 12, .line_size = 64, },
127 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
128 .associativity = 16, .line_size = 64, },
129 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
130 .associativity = 24, .line_size = 64, },
131 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
132 .associativity = 8, .line_size = 64, },
133 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
134 .associativity = 4, .line_size = 64, },
135 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
136 .associativity = 4, .line_size = 64, },
137 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
138 .associativity = 4, .line_size = 64, },
139 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
140 .associativity = 4, .line_size = 64, },
141 /* lines per sector is not supported cpuid2_cache_descriptor(),
142 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
144 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
145 .associativity = 8, .line_size = 64, },
146 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
147 .associativity = 2, .line_size = 64, },
148 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
149 .associativity = 8, .line_size = 64, },
150 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
151 .associativity = 8, .line_size = 32, },
152 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
153 .associativity = 8, .line_size = 32, },
154 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
155 .associativity = 8, .line_size = 32, },
156 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
157 .associativity = 8, .line_size = 32, },
158 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
159 .associativity = 4, .line_size = 64, },
160 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
161 .associativity = 8, .line_size = 64, },
162 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
163 .associativity = 4, .line_size = 64, },
164 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
165 .associativity = 4, .line_size = 64, },
166 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
167 .associativity = 4, .line_size = 64, },
168 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
169 .associativity = 8, .line_size = 64, },
170 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
171 .associativity = 8, .line_size = 64, },
172 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
173 .associativity = 8, .line_size = 64, },
174 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
175 .associativity = 12, .line_size = 64, },
176 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
177 .associativity = 12, .line_size = 64, },
178 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
179 .associativity = 12, .line_size = 64, },
180 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
181 .associativity = 16, .line_size = 64, },
182 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
183 .associativity = 16, .line_size = 64, },
184 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
185 .associativity = 16, .line_size = 64, },
186 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
187 .associativity = 24, .line_size = 64, },
188 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
189 .associativity = 24, .line_size = 64, },
190 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
191 .associativity = 24, .line_size = 64, },
195 * "CPUID leaf 2 does not report cache descriptor information,
196 * use CPUID leaf 4 to query cache parameters"
198 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
201 * Return a CPUID 2 cache descriptor for a given cache.
202 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
204 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
208 assert(cache->size > 0);
209 assert(cache->level > 0);
210 assert(cache->line_size > 0);
211 assert(cache->associativity > 0);
212 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
213 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
214 if (d->level == cache->level && d->type == cache->type &&
215 d->size == cache->size && d->line_size == cache->line_size &&
216 d->associativity == cache->associativity) {
221 return CACHE_DESCRIPTOR_UNAVAILABLE;
224 /* CPUID Leaf 4 constants: */
227 #define CACHE_TYPE_D 1
228 #define CACHE_TYPE_I 2
229 #define CACHE_TYPE_UNIFIED 3
231 #define CACHE_LEVEL(l) (l << 5)
233 #define CACHE_SELF_INIT_LEVEL (1 << 8)
236 #define CACHE_NO_INVD_SHARING (1 << 0)
237 #define CACHE_INCLUSIVE (1 << 1)
238 #define CACHE_COMPLEX_IDX (1 << 2)
240 /* Encode CacheType for CPUID[4].EAX */
241 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
242 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
243 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
244 0 /* Invalid value */)
247 /* Encode cache info for CPUID[4] */
248 static void encode_cache_cpuid4(CPUCacheInfo *cache,
249 int num_apic_ids, int num_cores,
250 uint32_t *eax, uint32_t *ebx,
251 uint32_t *ecx, uint32_t *edx)
253 assert(cache->size == cache->line_size * cache->associativity *
254 cache->partitions * cache->sets);
256 assert(num_apic_ids > 0);
257 *eax = CACHE_TYPE(cache->type) |
258 CACHE_LEVEL(cache->level) |
259 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
260 ((num_cores - 1) << 26) |
261 ((num_apic_ids - 1) << 14);
263 assert(cache->line_size > 0);
264 assert(cache->partitions > 0);
265 assert(cache->associativity > 0);
266 /* We don't implement fully-associative caches */
267 assert(cache->associativity < cache->sets);
268 *ebx = (cache->line_size - 1) |
269 ((cache->partitions - 1) << 12) |
270 ((cache->associativity - 1) << 22);
272 assert(cache->sets > 0);
273 *ecx = cache->sets - 1;
275 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
276 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
277 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
280 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
281 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
283 assert(cache->size % 1024 == 0);
284 assert(cache->lines_per_tag > 0);
285 assert(cache->associativity > 0);
286 assert(cache->line_size > 0);
287 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
288 (cache->lines_per_tag << 8) | (cache->line_size);
291 #define ASSOC_FULL 0xFF
293 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
294 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
304 a == ASSOC_FULL ? 0xF : \
305 0 /* invalid value */)
308 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
311 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
313 uint32_t *ecx, uint32_t *edx)
315 assert(l2->size % 1024 == 0);
316 assert(l2->associativity > 0);
317 assert(l2->lines_per_tag > 0);
318 assert(l2->line_size > 0);
319 *ecx = ((l2->size / 1024) << 16) |
320 (AMD_ENC_ASSOC(l2->associativity) << 12) |
321 (l2->lines_per_tag << 8) | (l2->line_size);
324 assert(l3->size % (512 * 1024) == 0);
325 assert(l3->associativity > 0);
326 assert(l3->lines_per_tag > 0);
327 assert(l3->line_size > 0);
328 *edx = ((l3->size / (512 * 1024)) << 18) |
329 (AMD_ENC_ASSOC(l3->associativity) << 12) |
330 (l3->lines_per_tag << 8) | (l3->line_size);
337 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
338 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
339 * Define the constants to build the cpu topology. Right now, TOPOEXT
340 * feature is enabled only on EPYC. So, these constants are based on
341 * EPYC supported configurations. We may need to handle the cases if
342 * these values change in future.
344 /* Maximum core complexes in a node */
346 /* Maximum cores in a core complex */
347 #define MAX_CORES_IN_CCX 4
348 /* Maximum cores in a node */
349 #define MAX_CORES_IN_NODE 8
350 /* Maximum nodes in a socket */
351 #define MAX_NODES_PER_SOCKET 4
354 * Figure out the number of nodes required to build this config.
355 * Max cores in a node is 8
357 static int nodes_in_socket(int nr_cores)
361 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
363 /* Hardware does not support config with 3 nodes, return 4 in that case */
364 return (nodes == 3) ? 4 : nodes;
368 * Decide the number of cores in a core complex with the given nr_cores using
369 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
370 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
371 * L3 cache is shared across all cores in a core complex. So, this will also
372 * tell us how many cores are sharing the L3 cache.
374 static int cores_in_core_complex(int nr_cores)
378 /* Check if we can fit all the cores in one core complex */
379 if (nr_cores <= MAX_CORES_IN_CCX) {
382 /* Get the number of nodes required to build this config */
383 nodes = nodes_in_socket(nr_cores);
386 * Divide the cores accros all the core complexes
387 * Return rounded up value
389 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
392 /* Encode cache info for CPUID[8000001D] */
393 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
394 uint32_t *eax, uint32_t *ebx,
395 uint32_t *ecx, uint32_t *edx)
398 assert(cache->size == cache->line_size * cache->associativity *
399 cache->partitions * cache->sets);
401 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
402 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
404 /* L3 is shared among multiple cores */
405 if (cache->level == 3) {
406 l3_cores = cores_in_core_complex(cs->nr_cores);
407 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
409 *eax |= ((cs->nr_threads - 1) << 14);
412 assert(cache->line_size > 0);
413 assert(cache->partitions > 0);
414 assert(cache->associativity > 0);
415 /* We don't implement fully-associative caches */
416 assert(cache->associativity < cache->sets);
417 *ebx = (cache->line_size - 1) |
418 ((cache->partitions - 1) << 12) |
419 ((cache->associativity - 1) << 22);
421 assert(cache->sets > 0);
422 *ecx = cache->sets - 1;
424 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
425 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
426 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
429 /* Data structure to hold the configuration info for a given core index */
430 struct core_topology {
431 /* core complex id of the current core index */
434 * Adjusted core index for this core in the topology
435 * This can be 0,1,2,3 with max 4 cores in a core complex
438 /* Node id for this core index */
440 /* Number of nodes in this config */
445 * Build the configuration closely match the EPYC hardware. Using the EPYC
446 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
447 * right now. This could change in future.
448 * nr_cores : Total number of cores in the config
449 * core_id : Core index of the current CPU
450 * topo : Data structure to hold all the config info for this core index
452 static void build_core_topology(int nr_cores, int core_id,
453 struct core_topology *topo)
455 int nodes, cores_in_ccx;
457 /* First get the number of nodes required */
458 nodes = nodes_in_socket(nr_cores);
460 cores_in_ccx = cores_in_core_complex(nr_cores);
462 topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
463 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
464 topo->core_id = core_id % cores_in_ccx;
465 topo->num_nodes = nodes;
468 /* Encode cache info for CPUID[8000001E] */
469 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
470 uint32_t *eax, uint32_t *ebx,
471 uint32_t *ecx, uint32_t *edx)
473 struct core_topology topo = {0};
477 build_core_topology(cs->nr_cores, cpu->core_id, &topo);
480 * CPUID_Fn8000001E_EBX
482 * 15:8 Threads per core (The number of threads per core is
483 * Threads per core + 1)
484 * 7:0 Core id (see bit decoding below)
494 if (cs->nr_threads - 1) {
495 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
496 (topo.ccx_id << 2) | topo.core_id;
498 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
501 * CPUID_Fn8000001E_ECX
503 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
504 * 7:0 Node id (see bit decoding below)
508 if (topo.num_nodes <= 4) {
509 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
513 * Node id fix up. Actual hardware supports up to 4 nodes. But with
514 * more than 32 cores, we may end up with more than 4 nodes.
515 * Node id is a combination of socket id and node id. Only requirement
516 * here is that this number should be unique accross the system.
517 * Shift the socket id to accommodate more nodes. We dont expect both
518 * socket id and node id to be big number at the same time. This is not
519 * an ideal config but we need to to support it. Max nodes we can have
520 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
521 * 5 bits for nodes. Find the left most set bit to represent the total
522 * number of nodes. find_last_bit returns last set bit(0 based). Left
523 * shift(+1) the socket id to represent all the nodes.
525 nodes = topo.num_nodes - 1;
526 shift = find_last_bit(&nodes, 8);
527 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
534 * Definitions of the hardcoded cache entries we expose:
535 * These are legacy cache values. If there is a need to change any
536 * of these values please use builtin_x86_defs
540 static CPUCacheInfo legacy_l1d_cache = {
549 .no_invd_sharing = true,
552 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
553 static CPUCacheInfo legacy_l1d_cache_amd = {
563 .no_invd_sharing = true,
566 /* L1 instruction cache: */
567 static CPUCacheInfo legacy_l1i_cache = {
568 .type = INSTRUCTION_CACHE,
576 .no_invd_sharing = true,
579 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
580 static CPUCacheInfo legacy_l1i_cache_amd = {
581 .type = INSTRUCTION_CACHE,
590 .no_invd_sharing = true,
593 /* Level 2 unified cache: */
594 static CPUCacheInfo legacy_l2_cache = {
595 .type = UNIFIED_CACHE,
603 .no_invd_sharing = true,
606 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
607 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
608 .type = UNIFIED_CACHE,
616 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
617 static CPUCacheInfo legacy_l2_cache_amd = {
618 .type = UNIFIED_CACHE,
628 /* Level 3 unified cache: */
629 static CPUCacheInfo legacy_l3_cache = {
630 .type = UNIFIED_CACHE,
640 .complex_indexing = true,
643 /* TLB definitions: */
645 #define L1_DTLB_2M_ASSOC 1
646 #define L1_DTLB_2M_ENTRIES 255
647 #define L1_DTLB_4K_ASSOC 1
648 #define L1_DTLB_4K_ENTRIES 255
650 #define L1_ITLB_2M_ASSOC 1
651 #define L1_ITLB_2M_ENTRIES 255
652 #define L1_ITLB_4K_ASSOC 1
653 #define L1_ITLB_4K_ENTRIES 255
655 #define L2_DTLB_2M_ASSOC 0 /* disabled */
656 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
657 #define L2_DTLB_4K_ASSOC 4
658 #define L2_DTLB_4K_ENTRIES 512
660 #define L2_ITLB_2M_ASSOC 0 /* disabled */
661 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
662 #define L2_ITLB_4K_ASSOC 4
663 #define L2_ITLB_4K_ENTRIES 512
665 /* CPUID Leaf 0x14 constants: */
666 #define INTEL_PT_MAX_SUBLEAF 0x1
668 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
669 * MSR can be accessed;
670 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
671 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
672 * of Intel PT MSRs across warm reset;
673 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
675 #define INTEL_PT_MINIMAL_EBX 0xf
677 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
678 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
680 * bit[01]: ToPA tables can hold any number of output entries, up to the
681 * maximum allowed by the MaskOrTableOffset field of
682 * IA32_RTIT_OUTPUT_MASK_PTRS;
683 * bit[02]: Support Single-Range Output scheme;
685 #define INTEL_PT_MINIMAL_ECX 0x7
686 /* generated packets which contain IP payloads have LIP values */
687 #define INTEL_PT_IP_LIP (1 << 31)
688 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
689 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
690 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
691 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
692 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
694 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
695 uint32_t vendor2, uint32_t vendor3)
698 for (i = 0; i < 4; i++) {
699 dst[i] = vendor1 >> (8 * i);
700 dst[i + 4] = vendor2 >> (8 * i);
701 dst[i + 8] = vendor3 >> (8 * i);
703 dst[CPUID_VENDOR_SZ] = '\0';
706 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
707 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
708 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
709 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
710 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
711 CPUID_PSE36 | CPUID_FXSR)
712 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
713 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
714 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
715 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
716 CPUID_PAE | CPUID_SEP | CPUID_APIC)
718 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
719 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
720 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
721 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
722 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
723 /* partly implemented:
724 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
726 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
727 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
728 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
729 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
730 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
731 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
733 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
734 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
735 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
736 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
737 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
740 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
742 #define TCG_EXT2_X86_64_FEATURES 0
745 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
746 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
747 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
748 TCG_EXT2_X86_64_FEATURES)
749 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
750 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
751 #define TCG_EXT4_FEATURES 0
752 #define TCG_SVM_FEATURES CPUID_SVM_NPT
753 #define TCG_KVM_FEATURES 0
754 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
755 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
756 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
757 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
760 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
761 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
762 CPUID_7_0_EBX_RDSEED */
763 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
764 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
766 #define TCG_7_0_EDX_FEATURES 0
767 #define TCG_APM_FEATURES 0
768 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
769 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
771 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
773 typedef struct FeatureWordInfo {
774 /* feature flags names are taken from "Intel Processor Identification and
775 * the CPUID Instruction" and AMD's "CPUID Specification".
776 * In cases of disagreement between feature naming conventions,
777 * aliases may be added.
779 const char *feat_names[32];
780 uint32_t cpuid_eax; /* Input EAX for CPUID */
781 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
782 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
783 int cpuid_reg; /* output register (R_* constant) */
784 uint32_t tcg_features; /* Feature flags supported by TCG */
785 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
786 uint32_t migratable_flags; /* Feature flags known to be migratable */
787 /* Features that shouldn't be auto-enabled by "-cpu host" */
788 uint32_t no_autoenable_flags;
791 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
794 "fpu", "vme", "de", "pse",
795 "tsc", "msr", "pae", "mce",
796 "cx8", "apic", NULL, "sep",
797 "mtrr", "pge", "mca", "cmov",
798 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
799 NULL, "ds" /* Intel dts */, "acpi", "mmx",
800 "fxsr", "sse", "sse2", "ss",
801 "ht" /* Intel htt */, "tm", "ia64", "pbe",
803 .cpuid_eax = 1, .cpuid_reg = R_EDX,
804 .tcg_features = TCG_FEATURES,
808 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
809 "ds-cpl", "vmx", "smx", "est",
810 "tm2", "ssse3", "cid", NULL,
811 "fma", "cx16", "xtpr", "pdcm",
812 NULL, "pcid", "dca", "sse4.1",
813 "sse4.2", "x2apic", "movbe", "popcnt",
814 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
815 "avx", "f16c", "rdrand", "hypervisor",
817 .cpuid_eax = 1, .cpuid_reg = R_ECX,
818 .tcg_features = TCG_EXT_FEATURES,
820 /* Feature names that are already defined on feature_name[] but
821 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
822 * names on feat_names below. They are copied automatically
823 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
825 [FEAT_8000_0001_EDX] = {
827 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
828 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
829 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
830 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
831 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
832 "nx", NULL, "mmxext", NULL /* mmx */,
833 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
834 NULL, "lm", "3dnowext", "3dnow",
836 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
837 .tcg_features = TCG_EXT2_FEATURES,
839 [FEAT_8000_0001_ECX] = {
841 "lahf-lm", "cmp-legacy", "svm", "extapic",
842 "cr8legacy", "abm", "sse4a", "misalignsse",
843 "3dnowprefetch", "osvw", "ibs", "xop",
844 "skinit", "wdt", NULL, "lwp",
845 "fma4", "tce", NULL, "nodeid-msr",
846 NULL, "tbm", "topoext", "perfctr-core",
847 "perfctr-nb", NULL, NULL, NULL,
848 NULL, NULL, NULL, NULL,
850 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
851 .tcg_features = TCG_EXT3_FEATURES,
853 [FEAT_C000_0001_EDX] = {
855 NULL, NULL, "xstore", "xstore-en",
856 NULL, NULL, "xcrypt", "xcrypt-en",
857 "ace2", "ace2-en", "phe", "phe-en",
858 "pmm", "pmm-en", NULL, NULL,
859 NULL, NULL, NULL, NULL,
860 NULL, NULL, NULL, NULL,
861 NULL, NULL, NULL, NULL,
862 NULL, NULL, NULL, NULL,
864 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
865 .tcg_features = TCG_EXT4_FEATURES,
869 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
870 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
871 NULL, "kvm-pv-tlb-flush", NULL, NULL,
872 NULL, NULL, NULL, NULL,
873 NULL, NULL, NULL, NULL,
874 NULL, NULL, NULL, NULL,
875 "kvmclock-stable-bit", NULL, NULL, NULL,
876 NULL, NULL, NULL, NULL,
878 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
879 .tcg_features = TCG_KVM_FEATURES,
883 "kvm-hint-dedicated", NULL, NULL, NULL,
884 NULL, NULL, NULL, NULL,
885 NULL, NULL, NULL, NULL,
886 NULL, NULL, NULL, NULL,
887 NULL, NULL, NULL, NULL,
888 NULL, NULL, NULL, NULL,
889 NULL, NULL, NULL, NULL,
890 NULL, NULL, NULL, NULL,
892 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EDX,
893 .tcg_features = TCG_KVM_FEATURES,
895 * KVM hints aren't auto-enabled by -cpu host, they need to be
896 * explicitly enabled in the command-line.
898 .no_autoenable_flags = ~0U,
900 [FEAT_HYPERV_EAX] = {
902 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
903 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
904 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
905 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
906 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
907 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
908 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
910 NULL, NULL, NULL, NULL,
911 NULL, NULL, NULL, NULL,
912 NULL, NULL, NULL, NULL,
913 NULL, NULL, NULL, NULL,
915 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
917 [FEAT_HYPERV_EBX] = {
919 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
920 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
921 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
922 NULL /* hv_create_port */, NULL /* hv_connect_port */,
923 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
924 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
926 NULL, NULL, NULL, NULL,
927 NULL, NULL, NULL, NULL,
928 NULL, NULL, NULL, NULL,
929 NULL, NULL, NULL, NULL,
931 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
933 [FEAT_HYPERV_EDX] = {
935 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
936 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
937 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
939 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
940 NULL, NULL, NULL, NULL,
941 NULL, NULL, NULL, NULL,
942 NULL, NULL, NULL, NULL,
943 NULL, NULL, NULL, NULL,
944 NULL, NULL, NULL, NULL,
946 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
950 "npt", "lbrv", "svm-lock", "nrip-save",
951 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
952 NULL, NULL, "pause-filter", NULL,
953 "pfthreshold", NULL, NULL, NULL,
954 NULL, NULL, NULL, NULL,
955 NULL, NULL, NULL, NULL,
956 NULL, NULL, NULL, NULL,
957 NULL, NULL, NULL, NULL,
959 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
960 .tcg_features = TCG_SVM_FEATURES,
964 "fsgsbase", "tsc-adjust", NULL, "bmi1",
965 "hle", "avx2", NULL, "smep",
966 "bmi2", "erms", "invpcid", "rtm",
967 NULL, NULL, "mpx", NULL,
968 "avx512f", "avx512dq", "rdseed", "adx",
969 "smap", "avx512ifma", "pcommit", "clflushopt",
970 "clwb", "intel-pt", "avx512pf", "avx512er",
971 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
974 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
976 .tcg_features = TCG_7_0_EBX_FEATURES,
980 NULL, "avx512vbmi", "umip", "pku",
981 NULL /* ospke */, NULL, "avx512vbmi2", NULL,
982 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
983 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
984 "la57", NULL, NULL, NULL,
985 NULL, NULL, "rdpid", NULL,
986 NULL, "cldemote", NULL, NULL,
987 NULL, NULL, NULL, NULL,
990 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
992 .tcg_features = TCG_7_0_ECX_FEATURES,
996 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
997 NULL, NULL, NULL, NULL,
998 NULL, NULL, NULL, NULL,
999 NULL, NULL, NULL, NULL,
1000 NULL, NULL, "pconfig", NULL,
1001 NULL, NULL, NULL, NULL,
1002 NULL, NULL, "spec-ctrl", NULL,
1003 NULL, "arch-capabilities", NULL, "ssbd",
1006 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
1008 .tcg_features = TCG_7_0_EDX_FEATURES,
1009 .unmigratable_flags = CPUID_7_0_EDX_ARCH_CAPABILITIES,
1011 [FEAT_8000_0007_EDX] = {
1013 NULL, NULL, NULL, NULL,
1014 NULL, NULL, NULL, NULL,
1015 "invtsc", NULL, NULL, NULL,
1016 NULL, NULL, NULL, NULL,
1017 NULL, NULL, NULL, NULL,
1018 NULL, NULL, NULL, NULL,
1019 NULL, NULL, NULL, NULL,
1020 NULL, NULL, NULL, NULL,
1022 .cpuid_eax = 0x80000007,
1024 .tcg_features = TCG_APM_FEATURES,
1025 .unmigratable_flags = CPUID_APM_INVTSC,
1027 [FEAT_8000_0008_EBX] = {
1029 NULL, NULL, NULL, NULL,
1030 NULL, NULL, NULL, NULL,
1031 NULL, "wbnoinvd", NULL, NULL,
1032 "ibpb", NULL, NULL, NULL,
1033 NULL, NULL, NULL, NULL,
1034 NULL, NULL, NULL, NULL,
1035 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1036 NULL, NULL, NULL, NULL,
1038 .cpuid_eax = 0x80000008,
1041 .unmigratable_flags = 0,
1045 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1046 NULL, NULL, NULL, NULL,
1047 NULL, NULL, NULL, NULL,
1048 NULL, NULL, NULL, NULL,
1049 NULL, NULL, NULL, NULL,
1050 NULL, NULL, NULL, NULL,
1051 NULL, NULL, NULL, NULL,
1052 NULL, NULL, NULL, NULL,
1055 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
1057 .tcg_features = TCG_XSAVE_FEATURES,
1061 NULL, NULL, "arat", NULL,
1062 NULL, NULL, NULL, NULL,
1063 NULL, NULL, NULL, NULL,
1064 NULL, NULL, NULL, NULL,
1065 NULL, NULL, NULL, NULL,
1066 NULL, NULL, NULL, NULL,
1067 NULL, NULL, NULL, NULL,
1068 NULL, NULL, NULL, NULL,
1070 .cpuid_eax = 6, .cpuid_reg = R_EAX,
1071 .tcg_features = TCG_6_EAX_FEATURES,
1073 [FEAT_XSAVE_COMP_LO] = {
1075 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
1077 .tcg_features = ~0U,
1078 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1079 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1080 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1083 [FEAT_XSAVE_COMP_HI] = {
1085 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
1087 .tcg_features = ~0U,
1091 typedef struct X86RegisterInfo32 {
1092 /* Name of register */
1094 /* QAPI enum value register */
1095 X86CPURegister32 qapi_enum;
1096 } X86RegisterInfo32;
1098 #define REGISTER(reg) \
1099 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1100 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1112 typedef struct ExtSaveArea {
1113 uint32_t feature, bits;
1114 uint32_t offset, size;
1117 static const ExtSaveArea x86_ext_save_areas[] = {
1119 /* x87 FP state component is always enabled if XSAVE is supported */
1120 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1121 /* x87 state is in the legacy region of the XSAVE area */
1123 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1125 [XSTATE_SSE_BIT] = {
1126 /* SSE state component is always enabled if XSAVE is supported */
1127 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1128 /* SSE state is in the legacy region of the XSAVE area */
1130 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1133 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1134 .offset = offsetof(X86XSaveArea, avx_state),
1135 .size = sizeof(XSaveAVX) },
1136 [XSTATE_BNDREGS_BIT] =
1137 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1138 .offset = offsetof(X86XSaveArea, bndreg_state),
1139 .size = sizeof(XSaveBNDREG) },
1140 [XSTATE_BNDCSR_BIT] =
1141 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1142 .offset = offsetof(X86XSaveArea, bndcsr_state),
1143 .size = sizeof(XSaveBNDCSR) },
1144 [XSTATE_OPMASK_BIT] =
1145 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1146 .offset = offsetof(X86XSaveArea, opmask_state),
1147 .size = sizeof(XSaveOpmask) },
1148 [XSTATE_ZMM_Hi256_BIT] =
1149 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1150 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1151 .size = sizeof(XSaveZMM_Hi256) },
1152 [XSTATE_Hi16_ZMM_BIT] =
1153 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1154 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1155 .size = sizeof(XSaveHi16_ZMM) },
1157 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1158 .offset = offsetof(X86XSaveArea, pkru_state),
1159 .size = sizeof(XSavePKRU) },
1162 static uint32_t xsave_area_size(uint64_t mask)
1167 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1168 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1169 if ((mask >> i) & 1) {
1170 ret = MAX(ret, esa->offset + esa->size);
1176 static inline bool accel_uses_host_cpuid(void)
1178 return kvm_enabled() || hvf_enabled();
1181 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1183 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1184 cpu->env.features[FEAT_XSAVE_COMP_LO];
1187 const char *get_register_name_32(unsigned int reg)
1189 if (reg >= CPU_NB_REGS32) {
1192 return x86_reg_info_32[reg].name;
1196 * Returns the set of feature flags that are supported and migratable by
1197 * QEMU, for a given FeatureWord.
1199 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
1201 FeatureWordInfo *wi = &feature_word_info[w];
1205 for (i = 0; i < 32; i++) {
1206 uint32_t f = 1U << i;
1208 /* If the feature name is known, it is implicitly considered migratable,
1209 * unless it is explicitly set in unmigratable_flags */
1210 if ((wi->migratable_flags & f) ||
1211 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1218 void host_cpuid(uint32_t function, uint32_t count,
1219 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1224 asm volatile("cpuid"
1225 : "=a"(vec[0]), "=b"(vec[1]),
1226 "=c"(vec[2]), "=d"(vec[3])
1227 : "0"(function), "c"(count) : "cc");
1228 #elif defined(__i386__)
1229 asm volatile("pusha \n\t"
1231 "mov %%eax, 0(%2) \n\t"
1232 "mov %%ebx, 4(%2) \n\t"
1233 "mov %%ecx, 8(%2) \n\t"
1234 "mov %%edx, 12(%2) \n\t"
1236 : : "a"(function), "c"(count), "S"(vec)
1252 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1254 uint32_t eax, ebx, ecx, edx;
1256 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1257 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1259 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1261 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1264 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1267 *stepping = eax & 0x0F;
1271 /* CPU class name definitions: */
1273 /* Return type name for a given CPU model name
1274 * Caller is responsible for freeing the returned string.
1276 static char *x86_cpu_type_name(const char *model_name)
1278 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1281 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1284 char *typename = x86_cpu_type_name(cpu_model);
1285 oc = object_class_by_name(typename);
1290 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1292 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1293 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1294 return g_strndup(class_name,
1295 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1298 struct X86CPUDefinition {
1302 /* vendor is zero-terminated, 12 character ASCII string */
1303 char vendor[CPUID_VENDOR_SZ + 1];
1307 FeatureWordArray features;
1308 const char *model_id;
1309 CPUCaches *cache_info;
1312 static CPUCaches epyc_cache_info = {
1313 .l1d_cache = &(CPUCacheInfo) {
1323 .no_invd_sharing = true,
1325 .l1i_cache = &(CPUCacheInfo) {
1326 .type = INSTRUCTION_CACHE,
1335 .no_invd_sharing = true,
1337 .l2_cache = &(CPUCacheInfo) {
1338 .type = UNIFIED_CACHE,
1347 .l3_cache = &(CPUCacheInfo) {
1348 .type = UNIFIED_CACHE,
1352 .associativity = 16,
1358 .complex_indexing = true,
1362 static X86CPUDefinition builtin_x86_defs[] = {
1366 .vendor = CPUID_VENDOR_AMD,
1370 .features[FEAT_1_EDX] =
1372 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1374 .features[FEAT_1_ECX] =
1375 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1376 .features[FEAT_8000_0001_EDX] =
1377 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1378 .features[FEAT_8000_0001_ECX] =
1379 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1380 .xlevel = 0x8000000A,
1381 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1386 .vendor = CPUID_VENDOR_AMD,
1390 /* Missing: CPUID_HT */
1391 .features[FEAT_1_EDX] =
1393 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1394 CPUID_PSE36 | CPUID_VME,
1395 .features[FEAT_1_ECX] =
1396 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1398 .features[FEAT_8000_0001_EDX] =
1399 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1400 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1401 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1402 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1404 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1405 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1406 .features[FEAT_8000_0001_ECX] =
1407 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1408 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1409 /* Missing: CPUID_SVM_LBRV */
1410 .features[FEAT_SVM] =
1412 .xlevel = 0x8000001A,
1413 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1418 .vendor = CPUID_VENDOR_INTEL,
1422 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1423 .features[FEAT_1_EDX] =
1425 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1426 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1427 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1428 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1429 .features[FEAT_1_ECX] =
1430 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1432 .features[FEAT_8000_0001_EDX] =
1433 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1434 .features[FEAT_8000_0001_ECX] =
1436 .xlevel = 0x80000008,
1437 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1442 .vendor = CPUID_VENDOR_INTEL,
1446 /* Missing: CPUID_HT */
1447 .features[FEAT_1_EDX] =
1448 PPRO_FEATURES | CPUID_VME |
1449 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1451 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1452 .features[FEAT_1_ECX] =
1453 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1454 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1455 .features[FEAT_8000_0001_EDX] =
1456 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1457 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1458 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1459 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1460 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1461 .features[FEAT_8000_0001_ECX] =
1463 .xlevel = 0x80000008,
1464 .model_id = "Common KVM processor"
1469 .vendor = CPUID_VENDOR_INTEL,
1473 .features[FEAT_1_EDX] =
1475 .features[FEAT_1_ECX] =
1477 .xlevel = 0x80000004,
1478 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1483 .vendor = CPUID_VENDOR_INTEL,
1487 .features[FEAT_1_EDX] =
1488 PPRO_FEATURES | CPUID_VME |
1489 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1490 .features[FEAT_1_ECX] =
1492 .features[FEAT_8000_0001_ECX] =
1494 .xlevel = 0x80000008,
1495 .model_id = "Common 32-bit KVM processor"
1500 .vendor = CPUID_VENDOR_INTEL,
1504 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1505 .features[FEAT_1_EDX] =
1506 PPRO_FEATURES | CPUID_VME |
1507 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1509 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1510 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1511 .features[FEAT_1_ECX] =
1512 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1513 .features[FEAT_8000_0001_EDX] =
1515 .xlevel = 0x80000008,
1516 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1521 .vendor = CPUID_VENDOR_INTEL,
1525 .features[FEAT_1_EDX] =
1533 .vendor = CPUID_VENDOR_INTEL,
1537 .features[FEAT_1_EDX] =
1545 .vendor = CPUID_VENDOR_INTEL,
1549 .features[FEAT_1_EDX] =
1557 .vendor = CPUID_VENDOR_INTEL,
1561 .features[FEAT_1_EDX] =
1569 .vendor = CPUID_VENDOR_AMD,
1573 .features[FEAT_1_EDX] =
1574 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1576 .features[FEAT_8000_0001_EDX] =
1577 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1578 .xlevel = 0x80000008,
1579 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1584 .vendor = CPUID_VENDOR_INTEL,
1588 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1589 .features[FEAT_1_EDX] =
1591 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1592 CPUID_ACPI | CPUID_SS,
1593 /* Some CPUs got no CPUID_SEP */
1594 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1596 .features[FEAT_1_ECX] =
1597 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1599 .features[FEAT_8000_0001_EDX] =
1601 .features[FEAT_8000_0001_ECX] =
1603 .xlevel = 0x80000008,
1604 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1609 .vendor = CPUID_VENDOR_INTEL,
1613 .features[FEAT_1_EDX] =
1614 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1615 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1616 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1617 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1618 CPUID_DE | CPUID_FP87,
1619 .features[FEAT_1_ECX] =
1620 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1621 .features[FEAT_8000_0001_EDX] =
1622 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1623 .features[FEAT_8000_0001_ECX] =
1625 .xlevel = 0x80000008,
1626 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1631 .vendor = CPUID_VENDOR_INTEL,
1635 .features[FEAT_1_EDX] =
1636 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1637 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1638 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1639 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1640 CPUID_DE | CPUID_FP87,
1641 .features[FEAT_1_ECX] =
1642 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1644 .features[FEAT_8000_0001_EDX] =
1645 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1646 .features[FEAT_8000_0001_ECX] =
1648 .xlevel = 0x80000008,
1649 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1654 .vendor = CPUID_VENDOR_INTEL,
1658 .features[FEAT_1_EDX] =
1659 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1660 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1661 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1662 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1663 CPUID_DE | CPUID_FP87,
1664 .features[FEAT_1_ECX] =
1665 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1666 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1667 .features[FEAT_8000_0001_EDX] =
1668 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1669 .features[FEAT_8000_0001_ECX] =
1671 .xlevel = 0x80000008,
1672 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1675 .name = "Nehalem-IBRS",
1677 .vendor = CPUID_VENDOR_INTEL,
1681 .features[FEAT_1_EDX] =
1682 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1683 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1684 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1685 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1686 CPUID_DE | CPUID_FP87,
1687 .features[FEAT_1_ECX] =
1688 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1689 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1690 .features[FEAT_7_0_EDX] =
1691 CPUID_7_0_EDX_SPEC_CTRL,
1692 .features[FEAT_8000_0001_EDX] =
1693 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1694 .features[FEAT_8000_0001_ECX] =
1696 .xlevel = 0x80000008,
1697 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1702 .vendor = CPUID_VENDOR_INTEL,
1706 .features[FEAT_1_EDX] =
1707 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1708 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1709 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1710 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1711 CPUID_DE | CPUID_FP87,
1712 .features[FEAT_1_ECX] =
1713 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1714 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1715 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1716 .features[FEAT_8000_0001_EDX] =
1717 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1718 .features[FEAT_8000_0001_ECX] =
1720 .features[FEAT_6_EAX] =
1722 .xlevel = 0x80000008,
1723 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1726 .name = "Westmere-IBRS",
1728 .vendor = CPUID_VENDOR_INTEL,
1732 .features[FEAT_1_EDX] =
1733 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1734 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1735 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1736 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1737 CPUID_DE | CPUID_FP87,
1738 .features[FEAT_1_ECX] =
1739 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1740 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1741 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1742 .features[FEAT_8000_0001_EDX] =
1743 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1744 .features[FEAT_8000_0001_ECX] =
1746 .features[FEAT_7_0_EDX] =
1747 CPUID_7_0_EDX_SPEC_CTRL,
1748 .features[FEAT_6_EAX] =
1750 .xlevel = 0x80000008,
1751 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1754 .name = "SandyBridge",
1756 .vendor = CPUID_VENDOR_INTEL,
1760 .features[FEAT_1_EDX] =
1761 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1762 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1763 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1764 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1765 CPUID_DE | CPUID_FP87,
1766 .features[FEAT_1_ECX] =
1767 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1768 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1769 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1770 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1772 .features[FEAT_8000_0001_EDX] =
1773 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1775 .features[FEAT_8000_0001_ECX] =
1777 .features[FEAT_XSAVE] =
1778 CPUID_XSAVE_XSAVEOPT,
1779 .features[FEAT_6_EAX] =
1781 .xlevel = 0x80000008,
1782 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1785 .name = "SandyBridge-IBRS",
1787 .vendor = CPUID_VENDOR_INTEL,
1791 .features[FEAT_1_EDX] =
1792 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1793 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1794 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1795 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1796 CPUID_DE | CPUID_FP87,
1797 .features[FEAT_1_ECX] =
1798 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1799 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1800 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1801 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1803 .features[FEAT_8000_0001_EDX] =
1804 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1806 .features[FEAT_8000_0001_ECX] =
1808 .features[FEAT_7_0_EDX] =
1809 CPUID_7_0_EDX_SPEC_CTRL,
1810 .features[FEAT_XSAVE] =
1811 CPUID_XSAVE_XSAVEOPT,
1812 .features[FEAT_6_EAX] =
1814 .xlevel = 0x80000008,
1815 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1818 .name = "IvyBridge",
1820 .vendor = CPUID_VENDOR_INTEL,
1824 .features[FEAT_1_EDX] =
1825 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1826 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1827 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1828 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1829 CPUID_DE | CPUID_FP87,
1830 .features[FEAT_1_ECX] =
1831 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1832 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1833 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1834 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1835 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1836 .features[FEAT_7_0_EBX] =
1837 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1839 .features[FEAT_8000_0001_EDX] =
1840 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1842 .features[FEAT_8000_0001_ECX] =
1844 .features[FEAT_XSAVE] =
1845 CPUID_XSAVE_XSAVEOPT,
1846 .features[FEAT_6_EAX] =
1848 .xlevel = 0x80000008,
1849 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1852 .name = "IvyBridge-IBRS",
1854 .vendor = CPUID_VENDOR_INTEL,
1858 .features[FEAT_1_EDX] =
1859 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1860 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1861 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1862 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1863 CPUID_DE | CPUID_FP87,
1864 .features[FEAT_1_ECX] =
1865 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1866 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1867 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1868 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1869 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1870 .features[FEAT_7_0_EBX] =
1871 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1873 .features[FEAT_8000_0001_EDX] =
1874 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1876 .features[FEAT_8000_0001_ECX] =
1878 .features[FEAT_7_0_EDX] =
1879 CPUID_7_0_EDX_SPEC_CTRL,
1880 .features[FEAT_XSAVE] =
1881 CPUID_XSAVE_XSAVEOPT,
1882 .features[FEAT_6_EAX] =
1884 .xlevel = 0x80000008,
1885 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1888 .name = "Haswell-noTSX",
1890 .vendor = CPUID_VENDOR_INTEL,
1894 .features[FEAT_1_EDX] =
1895 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1896 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1897 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1898 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1899 CPUID_DE | CPUID_FP87,
1900 .features[FEAT_1_ECX] =
1901 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1902 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1903 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1904 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1905 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1906 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1907 .features[FEAT_8000_0001_EDX] =
1908 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1910 .features[FEAT_8000_0001_ECX] =
1911 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1912 .features[FEAT_7_0_EBX] =
1913 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1914 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1915 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1916 .features[FEAT_XSAVE] =
1917 CPUID_XSAVE_XSAVEOPT,
1918 .features[FEAT_6_EAX] =
1920 .xlevel = 0x80000008,
1921 .model_id = "Intel Core Processor (Haswell, no TSX)",
1924 .name = "Haswell-noTSX-IBRS",
1926 .vendor = CPUID_VENDOR_INTEL,
1930 .features[FEAT_1_EDX] =
1931 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1932 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1933 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1934 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1935 CPUID_DE | CPUID_FP87,
1936 .features[FEAT_1_ECX] =
1937 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1938 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1939 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1940 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1941 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1942 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1943 .features[FEAT_8000_0001_EDX] =
1944 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1946 .features[FEAT_8000_0001_ECX] =
1947 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1948 .features[FEAT_7_0_EDX] =
1949 CPUID_7_0_EDX_SPEC_CTRL,
1950 .features[FEAT_7_0_EBX] =
1951 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1952 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1953 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1954 .features[FEAT_XSAVE] =
1955 CPUID_XSAVE_XSAVEOPT,
1956 .features[FEAT_6_EAX] =
1958 .xlevel = 0x80000008,
1959 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
1964 .vendor = CPUID_VENDOR_INTEL,
1968 .features[FEAT_1_EDX] =
1969 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1970 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1971 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1972 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1973 CPUID_DE | CPUID_FP87,
1974 .features[FEAT_1_ECX] =
1975 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1976 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1977 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1978 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1979 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1980 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1981 .features[FEAT_8000_0001_EDX] =
1982 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1984 .features[FEAT_8000_0001_ECX] =
1985 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1986 .features[FEAT_7_0_EBX] =
1987 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1988 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1989 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1991 .features[FEAT_XSAVE] =
1992 CPUID_XSAVE_XSAVEOPT,
1993 .features[FEAT_6_EAX] =
1995 .xlevel = 0x80000008,
1996 .model_id = "Intel Core Processor (Haswell)",
1999 .name = "Haswell-IBRS",
2001 .vendor = CPUID_VENDOR_INTEL,
2005 .features[FEAT_1_EDX] =
2006 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2007 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2008 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2009 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2010 CPUID_DE | CPUID_FP87,
2011 .features[FEAT_1_ECX] =
2012 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2013 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2014 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2015 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2016 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2017 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2018 .features[FEAT_8000_0001_EDX] =
2019 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2021 .features[FEAT_8000_0001_ECX] =
2022 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2023 .features[FEAT_7_0_EDX] =
2024 CPUID_7_0_EDX_SPEC_CTRL,
2025 .features[FEAT_7_0_EBX] =
2026 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2027 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2028 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2030 .features[FEAT_XSAVE] =
2031 CPUID_XSAVE_XSAVEOPT,
2032 .features[FEAT_6_EAX] =
2034 .xlevel = 0x80000008,
2035 .model_id = "Intel Core Processor (Haswell, IBRS)",
2038 .name = "Broadwell-noTSX",
2040 .vendor = CPUID_VENDOR_INTEL,
2044 .features[FEAT_1_EDX] =
2045 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2046 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2047 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2048 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2049 CPUID_DE | CPUID_FP87,
2050 .features[FEAT_1_ECX] =
2051 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2052 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2053 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2054 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2055 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2056 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2057 .features[FEAT_8000_0001_EDX] =
2058 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2060 .features[FEAT_8000_0001_ECX] =
2061 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2062 .features[FEAT_7_0_EBX] =
2063 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2064 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2065 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2066 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2068 .features[FEAT_XSAVE] =
2069 CPUID_XSAVE_XSAVEOPT,
2070 .features[FEAT_6_EAX] =
2072 .xlevel = 0x80000008,
2073 .model_id = "Intel Core Processor (Broadwell, no TSX)",
2076 .name = "Broadwell-noTSX-IBRS",
2078 .vendor = CPUID_VENDOR_INTEL,
2082 .features[FEAT_1_EDX] =
2083 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2084 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2085 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2086 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2087 CPUID_DE | CPUID_FP87,
2088 .features[FEAT_1_ECX] =
2089 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2090 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2091 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2092 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2093 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2094 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2095 .features[FEAT_8000_0001_EDX] =
2096 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2098 .features[FEAT_8000_0001_ECX] =
2099 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2100 .features[FEAT_7_0_EDX] =
2101 CPUID_7_0_EDX_SPEC_CTRL,
2102 .features[FEAT_7_0_EBX] =
2103 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2104 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2105 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2106 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2108 .features[FEAT_XSAVE] =
2109 CPUID_XSAVE_XSAVEOPT,
2110 .features[FEAT_6_EAX] =
2112 .xlevel = 0x80000008,
2113 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
2116 .name = "Broadwell",
2118 .vendor = CPUID_VENDOR_INTEL,
2122 .features[FEAT_1_EDX] =
2123 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2124 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2125 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2126 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2127 CPUID_DE | CPUID_FP87,
2128 .features[FEAT_1_ECX] =
2129 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2130 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2131 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2132 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2133 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2134 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2135 .features[FEAT_8000_0001_EDX] =
2136 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2138 .features[FEAT_8000_0001_ECX] =
2139 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2140 .features[FEAT_7_0_EBX] =
2141 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2142 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2143 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2144 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2146 .features[FEAT_XSAVE] =
2147 CPUID_XSAVE_XSAVEOPT,
2148 .features[FEAT_6_EAX] =
2150 .xlevel = 0x80000008,
2151 .model_id = "Intel Core Processor (Broadwell)",
2154 .name = "Broadwell-IBRS",
2156 .vendor = CPUID_VENDOR_INTEL,
2160 .features[FEAT_1_EDX] =
2161 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2162 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2163 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2164 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2165 CPUID_DE | CPUID_FP87,
2166 .features[FEAT_1_ECX] =
2167 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2168 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2169 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2170 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2171 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2172 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2173 .features[FEAT_8000_0001_EDX] =
2174 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2176 .features[FEAT_8000_0001_ECX] =
2177 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2178 .features[FEAT_7_0_EDX] =
2179 CPUID_7_0_EDX_SPEC_CTRL,
2180 .features[FEAT_7_0_EBX] =
2181 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2182 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2183 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2184 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2186 .features[FEAT_XSAVE] =
2187 CPUID_XSAVE_XSAVEOPT,
2188 .features[FEAT_6_EAX] =
2190 .xlevel = 0x80000008,
2191 .model_id = "Intel Core Processor (Broadwell, IBRS)",
2194 .name = "Skylake-Client",
2196 .vendor = CPUID_VENDOR_INTEL,
2200 .features[FEAT_1_EDX] =
2201 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2202 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2203 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2204 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2205 CPUID_DE | CPUID_FP87,
2206 .features[FEAT_1_ECX] =
2207 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2208 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2209 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2210 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2211 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2212 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2213 .features[FEAT_8000_0001_EDX] =
2214 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2216 .features[FEAT_8000_0001_ECX] =
2217 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2218 .features[FEAT_7_0_EBX] =
2219 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2220 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2221 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2222 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2223 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2224 /* Missing: XSAVES (not supported by some Linux versions,
2225 * including v4.1 to v4.12).
2226 * KVM doesn't yet expose any XSAVES state save component,
2227 * and the only one defined in Skylake (processor tracing)
2228 * probably will block migration anyway.
2230 .features[FEAT_XSAVE] =
2231 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2232 CPUID_XSAVE_XGETBV1,
2233 .features[FEAT_6_EAX] =
2235 .xlevel = 0x80000008,
2236 .model_id = "Intel Core Processor (Skylake)",
2239 .name = "Skylake-Client-IBRS",
2241 .vendor = CPUID_VENDOR_INTEL,
2245 .features[FEAT_1_EDX] =
2246 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2247 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2248 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2249 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2250 CPUID_DE | CPUID_FP87,
2251 .features[FEAT_1_ECX] =
2252 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2253 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2254 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2255 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2256 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2257 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2258 .features[FEAT_8000_0001_EDX] =
2259 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2261 .features[FEAT_8000_0001_ECX] =
2262 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2263 .features[FEAT_7_0_EDX] =
2264 CPUID_7_0_EDX_SPEC_CTRL,
2265 .features[FEAT_7_0_EBX] =
2266 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2267 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2268 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2269 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2270 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2271 /* Missing: XSAVES (not supported by some Linux versions,
2272 * including v4.1 to v4.12).
2273 * KVM doesn't yet expose any XSAVES state save component,
2274 * and the only one defined in Skylake (processor tracing)
2275 * probably will block migration anyway.
2277 .features[FEAT_XSAVE] =
2278 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2279 CPUID_XSAVE_XGETBV1,
2280 .features[FEAT_6_EAX] =
2282 .xlevel = 0x80000008,
2283 .model_id = "Intel Core Processor (Skylake, IBRS)",
2286 .name = "Skylake-Server",
2288 .vendor = CPUID_VENDOR_INTEL,
2292 .features[FEAT_1_EDX] =
2293 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2294 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2295 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2296 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2297 CPUID_DE | CPUID_FP87,
2298 .features[FEAT_1_ECX] =
2299 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2300 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2301 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2302 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2303 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2304 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2305 .features[FEAT_8000_0001_EDX] =
2306 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2307 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2308 .features[FEAT_8000_0001_ECX] =
2309 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2310 .features[FEAT_7_0_EBX] =
2311 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2312 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2313 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2314 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2315 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2316 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2317 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2318 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2319 /* Missing: XSAVES (not supported by some Linux versions,
2320 * including v4.1 to v4.12).
2321 * KVM doesn't yet expose any XSAVES state save component,
2322 * and the only one defined in Skylake (processor tracing)
2323 * probably will block migration anyway.
2325 .features[FEAT_XSAVE] =
2326 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2327 CPUID_XSAVE_XGETBV1,
2328 .features[FEAT_6_EAX] =
2330 .xlevel = 0x80000008,
2331 .model_id = "Intel Xeon Processor (Skylake)",
2334 .name = "Skylake-Server-IBRS",
2336 .vendor = CPUID_VENDOR_INTEL,
2340 .features[FEAT_1_EDX] =
2341 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2342 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2343 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2344 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2345 CPUID_DE | CPUID_FP87,
2346 .features[FEAT_1_ECX] =
2347 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2348 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2349 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2350 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2351 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2352 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2353 .features[FEAT_8000_0001_EDX] =
2354 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2355 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2356 .features[FEAT_8000_0001_ECX] =
2357 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2358 .features[FEAT_7_0_EDX] =
2359 CPUID_7_0_EDX_SPEC_CTRL,
2360 .features[FEAT_7_0_EBX] =
2361 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2362 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2363 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2364 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2365 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2366 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2367 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2368 CPUID_7_0_EBX_AVX512VL,
2369 /* Missing: XSAVES (not supported by some Linux versions,
2370 * including v4.1 to v4.12).
2371 * KVM doesn't yet expose any XSAVES state save component,
2372 * and the only one defined in Skylake (processor tracing)
2373 * probably will block migration anyway.
2375 .features[FEAT_XSAVE] =
2376 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2377 CPUID_XSAVE_XGETBV1,
2378 .features[FEAT_6_EAX] =
2380 .xlevel = 0x80000008,
2381 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
2384 .name = "KnightsMill",
2386 .vendor = CPUID_VENDOR_INTEL,
2390 .features[FEAT_1_EDX] =
2391 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2392 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2393 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2394 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2395 CPUID_PSE | CPUID_DE | CPUID_FP87,
2396 .features[FEAT_1_ECX] =
2397 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2398 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2399 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2400 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2401 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2402 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2403 .features[FEAT_8000_0001_EDX] =
2404 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2405 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2406 .features[FEAT_8000_0001_ECX] =
2407 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2408 .features[FEAT_7_0_EBX] =
2409 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2410 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2411 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2412 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2413 CPUID_7_0_EBX_AVX512ER,
2414 .features[FEAT_7_0_ECX] =
2415 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2416 .features[FEAT_7_0_EDX] =
2417 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2418 .features[FEAT_XSAVE] =
2419 CPUID_XSAVE_XSAVEOPT,
2420 .features[FEAT_6_EAX] =
2422 .xlevel = 0x80000008,
2423 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2426 .name = "Opteron_G1",
2428 .vendor = CPUID_VENDOR_AMD,
2432 .features[FEAT_1_EDX] =
2433 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2434 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2435 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2436 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2437 CPUID_DE | CPUID_FP87,
2438 .features[FEAT_1_ECX] =
2440 .features[FEAT_8000_0001_EDX] =
2441 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2442 .xlevel = 0x80000008,
2443 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2446 .name = "Opteron_G2",
2448 .vendor = CPUID_VENDOR_AMD,
2452 .features[FEAT_1_EDX] =
2453 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2454 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2455 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2456 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2457 CPUID_DE | CPUID_FP87,
2458 .features[FEAT_1_ECX] =
2459 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2460 /* Missing: CPUID_EXT2_RDTSCP */
2461 .features[FEAT_8000_0001_EDX] =
2462 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2463 .features[FEAT_8000_0001_ECX] =
2464 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2465 .xlevel = 0x80000008,
2466 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2469 .name = "Opteron_G3",
2471 .vendor = CPUID_VENDOR_AMD,
2475 .features[FEAT_1_EDX] =
2476 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2477 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2478 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2479 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2480 CPUID_DE | CPUID_FP87,
2481 .features[FEAT_1_ECX] =
2482 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2484 /* Missing: CPUID_EXT2_RDTSCP */
2485 .features[FEAT_8000_0001_EDX] =
2486 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2487 .features[FEAT_8000_0001_ECX] =
2488 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2489 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2490 .xlevel = 0x80000008,
2491 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2494 .name = "Opteron_G4",
2496 .vendor = CPUID_VENDOR_AMD,
2500 .features[FEAT_1_EDX] =
2501 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2502 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2503 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2504 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2505 CPUID_DE | CPUID_FP87,
2506 .features[FEAT_1_ECX] =
2507 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2508 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2509 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2511 /* Missing: CPUID_EXT2_RDTSCP */
2512 .features[FEAT_8000_0001_EDX] =
2513 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2515 .features[FEAT_8000_0001_ECX] =
2516 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2517 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2518 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2521 .xlevel = 0x8000001A,
2522 .model_id = "AMD Opteron 62xx class CPU",
2525 .name = "Opteron_G5",
2527 .vendor = CPUID_VENDOR_AMD,
2531 .features[FEAT_1_EDX] =
2532 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2533 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2534 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2535 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2536 CPUID_DE | CPUID_FP87,
2537 .features[FEAT_1_ECX] =
2538 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2539 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2540 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2541 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2542 /* Missing: CPUID_EXT2_RDTSCP */
2543 .features[FEAT_8000_0001_EDX] =
2544 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2546 .features[FEAT_8000_0001_ECX] =
2547 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2548 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2549 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2552 .xlevel = 0x8000001A,
2553 .model_id = "AMD Opteron 63xx class CPU",
2558 .vendor = CPUID_VENDOR_AMD,
2562 .features[FEAT_1_EDX] =
2563 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2564 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2565 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2566 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2567 CPUID_VME | CPUID_FP87,
2568 .features[FEAT_1_ECX] =
2569 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2570 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2571 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2572 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2573 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2574 .features[FEAT_8000_0001_EDX] =
2575 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2576 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2578 .features[FEAT_8000_0001_ECX] =
2579 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2580 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2581 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2583 .features[FEAT_7_0_EBX] =
2584 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2585 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2586 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2587 CPUID_7_0_EBX_SHA_NI,
2588 /* Missing: XSAVES (not supported by some Linux versions,
2589 * including v4.1 to v4.12).
2590 * KVM doesn't yet expose any XSAVES state save component.
2592 .features[FEAT_XSAVE] =
2593 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2594 CPUID_XSAVE_XGETBV1,
2595 .features[FEAT_6_EAX] =
2597 .xlevel = 0x8000001E,
2598 .model_id = "AMD EPYC Processor",
2599 .cache_info = &epyc_cache_info,
2602 .name = "EPYC-IBPB",
2604 .vendor = CPUID_VENDOR_AMD,
2608 .features[FEAT_1_EDX] =
2609 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2610 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2611 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2612 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2613 CPUID_VME | CPUID_FP87,
2614 .features[FEAT_1_ECX] =
2615 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2616 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2617 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2618 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2619 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2620 .features[FEAT_8000_0001_EDX] =
2621 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2622 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2624 .features[FEAT_8000_0001_ECX] =
2625 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2626 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2627 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2629 .features[FEAT_8000_0008_EBX] =
2630 CPUID_8000_0008_EBX_IBPB,
2631 .features[FEAT_7_0_EBX] =
2632 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2633 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2634 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2635 CPUID_7_0_EBX_SHA_NI,
2636 /* Missing: XSAVES (not supported by some Linux versions,
2637 * including v4.1 to v4.12).
2638 * KVM doesn't yet expose any XSAVES state save component.
2640 .features[FEAT_XSAVE] =
2641 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2642 CPUID_XSAVE_XGETBV1,
2643 .features[FEAT_6_EAX] =
2645 .xlevel = 0x8000001E,
2646 .model_id = "AMD EPYC Processor (with IBPB)",
2647 .cache_info = &epyc_cache_info,
2651 typedef struct PropValue {
2652 const char *prop, *value;
2655 /* KVM-specific features that are automatically added/removed
2656 * from all CPU models when KVM is enabled.
2658 static PropValue kvm_default_props[] = {
2659 { "kvmclock", "on" },
2660 { "kvm-nopiodelay", "on" },
2661 { "kvm-asyncpf", "on" },
2662 { "kvm-steal-time", "on" },
2663 { "kvm-pv-eoi", "on" },
2664 { "kvmclock-stable-bit", "on" },
2667 { "monitor", "off" },
2672 /* TCG-specific defaults that override all CPU models when using TCG
2674 static PropValue tcg_default_props[] = {
2680 void x86_cpu_change_kvm_default(const char *prop, const char *value)
2683 for (pv = kvm_default_props; pv->prop; pv++) {
2684 if (!strcmp(pv->prop, prop)) {
2690 /* It is valid to call this function only for properties that
2691 * are already present in the kvm_default_props table.
2696 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2697 bool migratable_only);
2699 static bool lmce_supported(void)
2701 uint64_t mce_cap = 0;
2704 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2709 return !!(mce_cap & MCG_LMCE_P);
2712 #define CPUID_MODEL_ID_SZ 48
2715 * cpu_x86_fill_model_id:
2716 * Get CPUID model ID string from host CPU.
2718 * @str should have at least CPUID_MODEL_ID_SZ bytes
2720 * The function does NOT add a null terminator to the string
2723 static int cpu_x86_fill_model_id(char *str)
2725 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2728 for (i = 0; i < 3; i++) {
2729 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
2730 memcpy(str + i * 16 + 0, &eax, 4);
2731 memcpy(str + i * 16 + 4, &ebx, 4);
2732 memcpy(str + i * 16 + 8, &ecx, 4);
2733 memcpy(str + i * 16 + 12, &edx, 4);
2738 static Property max_x86_cpu_properties[] = {
2739 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
2740 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
2741 DEFINE_PROP_END_OF_LIST()
2744 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
2746 DeviceClass *dc = DEVICE_CLASS(oc);
2747 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2751 xcc->model_description =
2752 "Enables all features supported by the accelerator in the current host";
2754 dc->props = max_x86_cpu_properties;
2757 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
2759 static void max_x86_cpu_initfn(Object *obj)
2761 X86CPU *cpu = X86_CPU(obj);
2762 CPUX86State *env = &cpu->env;
2763 KVMState *s = kvm_state;
2765 /* We can't fill the features array here because we don't know yet if
2766 * "migratable" is true or false.
2768 cpu->max_features = true;
2770 if (accel_uses_host_cpuid()) {
2771 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
2772 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
2773 int family, model, stepping;
2774 X86CPUDefinition host_cpudef = { };
2775 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2777 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
2778 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
2780 host_vendor_fms(vendor, &family, &model, &stepping);
2782 cpu_x86_fill_model_id(model_id);
2784 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
2785 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
2786 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
2787 object_property_set_int(OBJECT(cpu), stepping, "stepping",
2789 object_property_set_str(OBJECT(cpu), model_id, "model-id",
2792 if (kvm_enabled()) {
2793 env->cpuid_min_level =
2794 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
2795 env->cpuid_min_xlevel =
2796 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
2797 env->cpuid_min_xlevel2 =
2798 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
2800 env->cpuid_min_level =
2801 hvf_get_supported_cpuid(0x0, 0, R_EAX);
2802 env->cpuid_min_xlevel =
2803 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
2804 env->cpuid_min_xlevel2 =
2805 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
2808 if (lmce_supported()) {
2809 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
2812 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
2813 "vendor", &error_abort);
2814 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
2815 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
2816 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
2817 object_property_set_str(OBJECT(cpu),
2818 "QEMU TCG CPU version " QEMU_HW_VERSION,
2819 "model-id", &error_abort);
2822 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
2825 static const TypeInfo max_x86_cpu_type_info = {
2826 .name = X86_CPU_TYPE_NAME("max"),
2827 .parent = TYPE_X86_CPU,
2828 .instance_init = max_x86_cpu_initfn,
2829 .class_init = max_x86_cpu_class_init,
2832 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
2833 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
2835 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2837 xcc->host_cpuid_required = true;
2840 #if defined(CONFIG_KVM)
2841 xcc->model_description =
2842 "KVM processor with all supported host features ";
2843 #elif defined(CONFIG_HVF)
2844 xcc->model_description =
2845 "HVF processor with all supported host features ";
2849 static const TypeInfo host_x86_cpu_type_info = {
2850 .name = X86_CPU_TYPE_NAME("host"),
2851 .parent = X86_CPU_TYPE_NAME("max"),
2852 .class_init = host_x86_cpu_class_init,
2857 static void report_unavailable_features(FeatureWord w, uint32_t mask)
2859 FeatureWordInfo *f = &feature_word_info[w];
2862 for (i = 0; i < 32; ++i) {
2863 if ((1UL << i) & mask) {
2864 const char *reg = get_register_name_32(f->cpuid_reg);
2866 warn_report("%s doesn't support requested feature: "
2867 "CPUID.%02XH:%s%s%s [bit %d]",
2868 accel_uses_host_cpuid() ? "host" : "TCG",
2870 f->feat_names[i] ? "." : "",
2871 f->feat_names[i] ? f->feat_names[i] : "", i);
2876 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
2877 const char *name, void *opaque,
2880 X86CPU *cpu = X86_CPU(obj);
2881 CPUX86State *env = &cpu->env;
2884 value = (env->cpuid_version >> 8) & 0xf;
2886 value += (env->cpuid_version >> 20) & 0xff;
2888 visit_type_int(v, name, &value, errp);
2891 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
2892 const char *name, void *opaque,
2895 X86CPU *cpu = X86_CPU(obj);
2896 CPUX86State *env = &cpu->env;
2897 const int64_t min = 0;
2898 const int64_t max = 0xff + 0xf;
2899 Error *local_err = NULL;
2902 visit_type_int(v, name, &value, &local_err);
2904 error_propagate(errp, local_err);
2907 if (value < min || value > max) {
2908 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2909 name ? name : "null", value, min, max);
2913 env->cpuid_version &= ~0xff00f00;
2915 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
2917 env->cpuid_version |= value << 8;
2921 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
2922 const char *name, void *opaque,
2925 X86CPU *cpu = X86_CPU(obj);
2926 CPUX86State *env = &cpu->env;
2929 value = (env->cpuid_version >> 4) & 0xf;
2930 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
2931 visit_type_int(v, name, &value, errp);
2934 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
2935 const char *name, void *opaque,
2938 X86CPU *cpu = X86_CPU(obj);
2939 CPUX86State *env = &cpu->env;
2940 const int64_t min = 0;
2941 const int64_t max = 0xff;
2942 Error *local_err = NULL;
2945 visit_type_int(v, name, &value, &local_err);
2947 error_propagate(errp, local_err);
2950 if (value < min || value > max) {
2951 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2952 name ? name : "null", value, min, max);
2956 env->cpuid_version &= ~0xf00f0;
2957 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
2960 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
2961 const char *name, void *opaque,
2964 X86CPU *cpu = X86_CPU(obj);
2965 CPUX86State *env = &cpu->env;
2968 value = env->cpuid_version & 0xf;
2969 visit_type_int(v, name, &value, errp);
2972 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
2973 const char *name, void *opaque,
2976 X86CPU *cpu = X86_CPU(obj);
2977 CPUX86State *env = &cpu->env;
2978 const int64_t min = 0;
2979 const int64_t max = 0xf;
2980 Error *local_err = NULL;
2983 visit_type_int(v, name, &value, &local_err);
2985 error_propagate(errp, local_err);
2988 if (value < min || value > max) {
2989 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2990 name ? name : "null", value, min, max);
2994 env->cpuid_version &= ~0xf;
2995 env->cpuid_version |= value & 0xf;
2998 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
3000 X86CPU *cpu = X86_CPU(obj);
3001 CPUX86State *env = &cpu->env;
3004 value = g_malloc(CPUID_VENDOR_SZ + 1);
3005 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
3006 env->cpuid_vendor3);
3010 static void x86_cpuid_set_vendor(Object *obj, const char *value,
3013 X86CPU *cpu = X86_CPU(obj);
3014 CPUX86State *env = &cpu->env;
3017 if (strlen(value) != CPUID_VENDOR_SZ) {
3018 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
3022 env->cpuid_vendor1 = 0;
3023 env->cpuid_vendor2 = 0;
3024 env->cpuid_vendor3 = 0;
3025 for (i = 0; i < 4; i++) {
3026 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
3027 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
3028 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
3032 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
3034 X86CPU *cpu = X86_CPU(obj);
3035 CPUX86State *env = &cpu->env;
3039 value = g_malloc(48 + 1);
3040 for (i = 0; i < 48; i++) {
3041 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
3047 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
3050 X86CPU *cpu = X86_CPU(obj);
3051 CPUX86State *env = &cpu->env;
3054 if (model_id == NULL) {
3057 len = strlen(model_id);
3058 memset(env->cpuid_model, 0, 48);
3059 for (i = 0; i < 48; i++) {
3063 c = (uint8_t)model_id[i];
3065 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
3069 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
3070 void *opaque, Error **errp)
3072 X86CPU *cpu = X86_CPU(obj);
3075 value = cpu->env.tsc_khz * 1000;
3076 visit_type_int(v, name, &value, errp);
3079 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
3080 void *opaque, Error **errp)
3082 X86CPU *cpu = X86_CPU(obj);
3083 const int64_t min = 0;
3084 const int64_t max = INT64_MAX;
3085 Error *local_err = NULL;
3088 visit_type_int(v, name, &value, &local_err);
3090 error_propagate(errp, local_err);
3093 if (value < min || value > max) {
3094 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3095 name ? name : "null", value, min, max);
3099 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
3102 /* Generic getter for "feature-words" and "filtered-features" properties */
3103 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
3104 const char *name, void *opaque,
3107 uint32_t *array = (uint32_t *)opaque;
3109 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
3110 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
3111 X86CPUFeatureWordInfoList *list = NULL;
3113 for (w = 0; w < FEATURE_WORDS; w++) {
3114 FeatureWordInfo *wi = &feature_word_info[w];
3115 X86CPUFeatureWordInfo *qwi = &word_infos[w];
3116 qwi->cpuid_input_eax = wi->cpuid_eax;
3117 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
3118 qwi->cpuid_input_ecx = wi->cpuid_ecx;
3119 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
3120 qwi->features = array[w];
3122 /* List will be in reverse order, but order shouldn't matter */
3123 list_entries[w].next = list;
3124 list_entries[w].value = &word_infos[w];
3125 list = &list_entries[w];
3128 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
3131 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3132 void *opaque, Error **errp)
3134 X86CPU *cpu = X86_CPU(obj);
3135 int64_t value = cpu->hyperv_spinlock_attempts;
3137 visit_type_int(v, name, &value, errp);
3140 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3141 void *opaque, Error **errp)
3143 const int64_t min = 0xFFF;
3144 const int64_t max = UINT_MAX;
3145 X86CPU *cpu = X86_CPU(obj);
3149 visit_type_int(v, name, &value, &err);
3151 error_propagate(errp, err);
3155 if (value < min || value > max) {
3156 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
3157 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
3158 object_get_typename(obj), name ? name : "null",
3162 cpu->hyperv_spinlock_attempts = value;
3165 static const PropertyInfo qdev_prop_spinlocks = {
3167 .get = x86_get_hv_spinlocks,
3168 .set = x86_set_hv_spinlocks,
3171 /* Convert all '_' in a feature string option name to '-', to make feature
3172 * name conform to QOM property naming rule, which uses '-' instead of '_'.
3174 static inline void feat2prop(char *s)
3176 while ((s = strchr(s, '_'))) {
3181 /* Return the feature property name for a feature flag bit */
3182 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
3184 /* XSAVE components are automatically enabled by other features,
3185 * so return the original feature name instead
3187 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
3188 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
3190 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
3191 x86_ext_save_areas[comp].bits) {
3192 w = x86_ext_save_areas[comp].feature;
3193 bitnr = ctz32(x86_ext_save_areas[comp].bits);
3198 assert(w < FEATURE_WORDS);
3199 return feature_word_info[w].feat_names[bitnr];
3202 /* Compatibily hack to maintain legacy +-feat semantic,
3203 * where +-feat overwrites any feature set by
3204 * feat=on|feat even if the later is parsed after +-feat
3205 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3207 static GList *plus_features, *minus_features;
3209 static gint compare_string(gconstpointer a, gconstpointer b)
3211 return g_strcmp0(a, b);
3214 /* Parse "+feature,-feature,feature=foo" CPU feature string
3216 static void x86_cpu_parse_featurestr(const char *typename, char *features,
3219 char *featurestr; /* Single 'key=value" string being parsed */
3220 static bool cpu_globals_initialized;
3221 bool ambiguous = false;
3223 if (cpu_globals_initialized) {
3226 cpu_globals_initialized = true;
3232 for (featurestr = strtok(features, ",");
3234 featurestr = strtok(NULL, ",")) {
3236 const char *val = NULL;
3239 GlobalProperty *prop;
3241 /* Compatibility syntax: */
3242 if (featurestr[0] == '+') {
3243 plus_features = g_list_append(plus_features,
3244 g_strdup(featurestr + 1));
3246 } else if (featurestr[0] == '-') {
3247 minus_features = g_list_append(minus_features,
3248 g_strdup(featurestr + 1));
3252 eq = strchr(featurestr, '=');
3260 feat2prop(featurestr);
3263 if (g_list_find_custom(plus_features, name, compare_string)) {
3264 warn_report("Ambiguous CPU model string. "
3265 "Don't mix both \"+%s\" and \"%s=%s\"",
3269 if (g_list_find_custom(minus_features, name, compare_string)) {
3270 warn_report("Ambiguous CPU model string. "
3271 "Don't mix both \"-%s\" and \"%s=%s\"",
3277 if (!strcmp(name, "tsc-freq")) {
3281 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3282 if (ret < 0 || tsc_freq > INT64_MAX) {
3283 error_setg(errp, "bad numerical value %s", val);
3286 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3288 name = "tsc-frequency";
3291 prop = g_new0(typeof(*prop), 1);
3292 prop->driver = typename;
3293 prop->property = g_strdup(name);
3294 prop->value = g_strdup(val);
3295 prop->errp = &error_fatal;
3296 qdev_prop_register_global(prop);
3300 warn_report("Compatibility of ambiguous CPU model "
3301 "strings won't be kept on future QEMU versions");
3305 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3306 static int x86_cpu_filter_features(X86CPU *cpu);
3308 /* Check for missing features that may prevent the CPU class from
3309 * running using the current machine and accelerator.
3311 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3312 strList **missing_feats)
3317 strList **next = missing_feats;
3319 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3320 strList *new = g_new0(strList, 1);
3321 new->value = g_strdup("kvm");
3322 *missing_feats = new;
3326 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3328 x86_cpu_expand_features(xc, &err);
3330 /* Errors at x86_cpu_expand_features should never happen,
3331 * but in case it does, just report the model as not
3332 * runnable at all using the "type" property.
3334 strList *new = g_new0(strList, 1);
3335 new->value = g_strdup("type");
3340 x86_cpu_filter_features(xc);
3342 for (w = 0; w < FEATURE_WORDS; w++) {
3343 uint32_t filtered = xc->filtered_features[w];
3345 for (i = 0; i < 32; i++) {
3346 if (filtered & (1UL << i)) {
3347 strList *new = g_new0(strList, 1);
3348 new->value = g_strdup(x86_cpu_feature_name(w, i));
3355 object_unref(OBJECT(xc));
3358 /* Print all cpuid feature names in featureset
3360 static void listflags(FILE *f, fprintf_function print, GList *features)
3365 for (tmp = features; tmp; tmp = tmp->next) {
3366 const char *name = tmp->data;
3367 if ((len + strlen(name) + 1) >= 75) {
3371 print(f, "%s%s", len == 0 ? " " : " ", name);
3372 len += strlen(name) + 1;
3377 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3378 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3380 ObjectClass *class_a = (ObjectClass *)a;
3381 ObjectClass *class_b = (ObjectClass *)b;
3382 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3383 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3384 char *name_a, *name_b;
3387 if (cc_a->ordering != cc_b->ordering) {
3388 ret = cc_a->ordering - cc_b->ordering;
3390 name_a = x86_cpu_class_get_model_name(cc_a);
3391 name_b = x86_cpu_class_get_model_name(cc_b);
3392 ret = strcmp(name_a, name_b);
3399 static GSList *get_sorted_cpu_model_list(void)
3401 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3402 list = g_slist_sort(list, x86_cpu_list_compare);
3406 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3408 ObjectClass *oc = data;
3409 X86CPUClass *cc = X86_CPU_CLASS(oc);
3410 CPUListState *s = user_data;
3411 char *name = x86_cpu_class_get_model_name(cc);
3412 const char *desc = cc->model_description;
3413 if (!desc && cc->cpu_def) {
3414 desc = cc->cpu_def->model_id;
3417 (*s->cpu_fprintf)(s->file, "x86 %-20s %-48s\n",
3422 /* list available CPU models and flags */
3423 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
3428 .cpu_fprintf = cpu_fprintf,
3431 GList *names = NULL;
3433 (*cpu_fprintf)(f, "Available CPUs:\n");
3434 list = get_sorted_cpu_model_list();
3435 g_slist_foreach(list, x86_cpu_list_entry, &s);
3439 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3440 FeatureWordInfo *fw = &feature_word_info[i];
3441 for (j = 0; j < 32; j++) {
3442 if (fw->feat_names[j]) {
3443 names = g_list_append(names, (gpointer)fw->feat_names[j]);
3448 names = g_list_sort(names, (GCompareFunc)strcmp);
3450 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
3451 listflags(f, cpu_fprintf, names);
3452 (*cpu_fprintf)(f, "\n");
3456 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3458 ObjectClass *oc = data;
3459 X86CPUClass *cc = X86_CPU_CLASS(oc);
3460 CpuDefinitionInfoList **cpu_list = user_data;
3461 CpuDefinitionInfoList *entry;
3462 CpuDefinitionInfo *info;
3464 info = g_malloc0(sizeof(*info));
3465 info->name = x86_cpu_class_get_model_name(cc);
3466 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3467 info->has_unavailable_features = true;
3468 info->q_typename = g_strdup(object_class_get_name(oc));
3469 info->migration_safe = cc->migration_safe;
3470 info->has_migration_safe = true;
3471 info->q_static = cc->static_model;
3473 entry = g_malloc0(sizeof(*entry));
3474 entry->value = info;
3475 entry->next = *cpu_list;
3479 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
3481 CpuDefinitionInfoList *cpu_list = NULL;
3482 GSList *list = get_sorted_cpu_model_list();
3483 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
3488 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3489 bool migratable_only)
3491 FeatureWordInfo *wi = &feature_word_info[w];
3494 if (kvm_enabled()) {
3495 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
3498 } else if (hvf_enabled()) {
3499 r = hvf_get_supported_cpuid(wi->cpuid_eax,
3502 } else if (tcg_enabled()) {
3503 r = wi->tcg_features;
3507 if (migratable_only) {
3508 r &= x86_cpu_get_migratable_flags(w);
3513 static void x86_cpu_report_filtered_features(X86CPU *cpu)
3517 for (w = 0; w < FEATURE_WORDS; w++) {
3518 report_unavailable_features(w, cpu->filtered_features[w]);
3522 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
3525 for (pv = props; pv->prop; pv++) {
3529 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
3534 /* Load data from X86CPUDefinition into a X86CPU object
3536 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
3538 CPUX86State *env = &cpu->env;
3540 char host_vendor[CPUID_VENDOR_SZ + 1];
3543 /*NOTE: any property set by this function should be returned by
3544 * x86_cpu_static_props(), so static expansion of
3545 * query-cpu-model-expansion is always complete.
3548 /* CPU models only set _minimum_ values for level/xlevel: */
3549 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
3550 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
3552 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
3553 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
3554 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
3555 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
3556 for (w = 0; w < FEATURE_WORDS; w++) {
3557 env->features[w] = def->features[w];
3560 /* legacy-cache defaults to 'off' if CPU model provides cache info */
3561 cpu->legacy_cache = !def->cache_info;
3563 /* Special cases not set in the X86CPUDefinition structs: */
3564 /* TODO: in-kernel irqchip for hvf */
3565 if (kvm_enabled()) {
3566 if (!kvm_irqchip_in_kernel()) {
3567 x86_cpu_change_kvm_default("x2apic", "off");
3570 x86_cpu_apply_props(cpu, kvm_default_props);
3571 } else if (tcg_enabled()) {
3572 x86_cpu_apply_props(cpu, tcg_default_props);
3575 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
3577 /* sysenter isn't supported in compatibility mode on AMD,
3578 * syscall isn't supported in compatibility mode on Intel.
3579 * Normally we advertise the actual CPU vendor, but you can
3580 * override this using the 'vendor' property if you want to use
3581 * KVM's sysenter/syscall emulation in compatibility mode and
3582 * when doing cross vendor migration
3584 vendor = def->vendor;
3585 if (accel_uses_host_cpuid()) {
3586 uint32_t ebx = 0, ecx = 0, edx = 0;
3587 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
3588 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
3589 vendor = host_vendor;
3592 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
3596 /* Return a QDict containing keys for all properties that can be included
3597 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
3598 * must be included in the dictionary.
3600 static QDict *x86_cpu_static_props(void)
3604 static const char *props[] = {
3622 for (i = 0; props[i]; i++) {
3623 qdict_put_null(d, props[i]);
3626 for (w = 0; w < FEATURE_WORDS; w++) {
3627 FeatureWordInfo *fi = &feature_word_info[w];
3629 for (bit = 0; bit < 32; bit++) {
3630 if (!fi->feat_names[bit]) {
3633 qdict_put_null(d, fi->feat_names[bit]);
3640 /* Add an entry to @props dict, with the value for property. */
3641 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
3643 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
3646 qdict_put_obj(props, prop, value);
3649 /* Convert CPU model data from X86CPU object to a property dictionary
3650 * that can recreate exactly the same CPU model.
3652 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3654 QDict *sprops = x86_cpu_static_props();
3655 const QDictEntry *e;
3657 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3658 const char *prop = qdict_entry_key(e);
3659 x86_cpu_expand_prop(cpu, props, prop);
3663 /* Convert CPU model data from X86CPU object to a property dictionary
3664 * that can recreate exactly the same CPU model, including every
3665 * writeable QOM property.
3667 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3669 ObjectPropertyIterator iter;
3670 ObjectProperty *prop;
3672 object_property_iter_init(&iter, OBJECT(cpu));
3673 while ((prop = object_property_iter_next(&iter))) {
3674 /* skip read-only or write-only properties */
3675 if (!prop->get || !prop->set) {
3679 /* "hotplugged" is the only property that is configurable
3680 * on the command-line but will be set differently on CPUs
3681 * created using "-cpu ... -smp ..." and by CPUs created
3682 * on the fly by x86_cpu_from_model() for querying. Skip it.
3684 if (!strcmp(prop->name, "hotplugged")) {
3687 x86_cpu_expand_prop(cpu, props, prop->name);
3691 static void object_apply_props(Object *obj, QDict *props, Error **errp)
3693 const QDictEntry *prop;
3696 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
3697 object_property_set_qobject(obj, qdict_entry_value(prop),
3698 qdict_entry_key(prop), &err);
3704 error_propagate(errp, err);
3707 /* Create X86CPU object according to model+props specification */
3708 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
3714 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
3716 error_setg(&err, "CPU model '%s' not found", model);
3720 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3722 object_apply_props(OBJECT(xc), props, &err);
3728 x86_cpu_expand_features(xc, &err);
3735 error_propagate(errp, err);
3736 object_unref(OBJECT(xc));
3742 CpuModelExpansionInfo *
3743 arch_query_cpu_model_expansion(CpuModelExpansionType type,
3744 CpuModelInfo *model,
3749 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
3750 QDict *props = NULL;
3751 const char *base_name;
3753 xc = x86_cpu_from_model(model->name,
3755 qobject_to(QDict, model->props) :
3761 props = qdict_new();
3764 case CPU_MODEL_EXPANSION_TYPE_STATIC:
3765 /* Static expansion will be based on "base" only */
3767 x86_cpu_to_dict(xc, props);
3769 case CPU_MODEL_EXPANSION_TYPE_FULL:
3770 /* As we don't return every single property, full expansion needs
3771 * to keep the original model name+props, and add extra
3772 * properties on top of that.
3774 base_name = model->name;
3775 x86_cpu_to_dict_full(xc, props);
3778 error_setg(&err, "Unsupportted expansion type");
3783 props = qdict_new();
3785 x86_cpu_to_dict(xc, props);
3787 ret->model = g_new0(CpuModelInfo, 1);
3788 ret->model->name = g_strdup(base_name);
3789 ret->model->props = QOBJECT(props);
3790 ret->model->has_props = true;
3793 object_unref(OBJECT(xc));
3795 error_propagate(errp, err);
3796 qapi_free_CpuModelExpansionInfo(ret);
3802 static gchar *x86_gdb_arch_name(CPUState *cs)
3804 #ifdef TARGET_X86_64
3805 return g_strdup("i386:x86-64");
3807 return g_strdup("i386");
3811 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
3813 X86CPUDefinition *cpudef = data;
3814 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3816 xcc->cpu_def = cpudef;
3817 xcc->migration_safe = true;
3820 static void x86_register_cpudef_type(X86CPUDefinition *def)
3822 char *typename = x86_cpu_type_name(def->name);
3825 .parent = TYPE_X86_CPU,
3826 .class_init = x86_cpu_cpudef_class_init,
3830 /* AMD aliases are handled at runtime based on CPUID vendor, so
3831 * they shouldn't be set on the CPU model table.
3833 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
3834 /* catch mistakes instead of silently truncating model_id when too long */
3835 assert(def->model_id && strlen(def->model_id) <= 48);
3842 #if !defined(CONFIG_USER_ONLY)
3844 void cpu_clear_apic_feature(CPUX86State *env)
3846 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
3849 #endif /* !CONFIG_USER_ONLY */
3851 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
3852 uint32_t *eax, uint32_t *ebx,
3853 uint32_t *ecx, uint32_t *edx)
3855 X86CPU *cpu = x86_env_get_cpu(env);
3856 CPUState *cs = CPU(cpu);
3857 uint32_t pkg_offset;
3859 uint32_t signature[3];
3861 /* Calculate & apply limits for different index ranges */
3862 if (index >= 0xC0000000) {
3863 limit = env->cpuid_xlevel2;
3864 } else if (index >= 0x80000000) {
3865 limit = env->cpuid_xlevel;
3866 } else if (index >= 0x40000000) {
3869 limit = env->cpuid_level;
3872 if (index > limit) {
3873 /* Intel documentation states that invalid EAX input will
3874 * return the same information as EAX=cpuid_level
3875 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
3877 index = env->cpuid_level;
3882 *eax = env->cpuid_level;
3883 *ebx = env->cpuid_vendor1;
3884 *edx = env->cpuid_vendor2;
3885 *ecx = env->cpuid_vendor3;
3888 *eax = env->cpuid_version;
3889 *ebx = (cpu->apic_id << 24) |
3890 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
3891 *ecx = env->features[FEAT_1_ECX];
3892 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
3893 *ecx |= CPUID_EXT_OSXSAVE;
3895 *edx = env->features[FEAT_1_EDX];
3896 if (cs->nr_cores * cs->nr_threads > 1) {
3897 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
3902 /* cache info: needed for Pentium Pro compatibility */
3903 if (cpu->cache_info_passthrough) {
3904 host_cpuid(index, 0, eax, ebx, ecx, edx);
3907 *eax = 1; /* Number of CPUID[EAX=2] calls required */
3909 if (!cpu->enable_l3_cache) {
3912 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
3914 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
3915 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
3916 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
3919 /* cache info: needed for Core compatibility */
3920 if (cpu->cache_info_passthrough) {
3921 host_cpuid(index, count, eax, ebx, ecx, edx);
3922 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
3923 *eax &= ~0xFC000000;
3924 if ((*eax & 31) && cs->nr_cores > 1) {
3925 *eax |= (cs->nr_cores - 1) << 26;
3930 case 0: /* L1 dcache info */
3931 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
3933 eax, ebx, ecx, edx);
3935 case 1: /* L1 icache info */
3936 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
3938 eax, ebx, ecx, edx);
3940 case 2: /* L2 cache info */
3941 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
3942 cs->nr_threads, cs->nr_cores,
3943 eax, ebx, ecx, edx);
3945 case 3: /* L3 cache info */
3946 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3947 if (cpu->enable_l3_cache) {
3948 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
3949 (1 << pkg_offset), cs->nr_cores,
3950 eax, ebx, ecx, edx);
3954 default: /* end of info */
3955 *eax = *ebx = *ecx = *edx = 0;
3961 /* MONITOR/MWAIT Leaf */
3962 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
3963 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
3964 *ecx = cpu->mwait.ecx; /* flags */
3965 *edx = cpu->mwait.edx; /* mwait substates */
3968 /* Thermal and Power Leaf */
3969 *eax = env->features[FEAT_6_EAX];
3975 /* Structured Extended Feature Flags Enumeration Leaf */
3977 *eax = 0; /* Maximum ECX value for sub-leaves */
3978 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
3979 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
3980 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
3981 *ecx |= CPUID_7_0_ECX_OSPKE;
3983 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
3992 /* Direct Cache Access Information Leaf */
3993 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
3999 /* Architectural Performance Monitoring Leaf */
4000 if (kvm_enabled() && cpu->enable_pmu) {
4001 KVMState *s = cs->kvm_state;
4003 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
4004 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
4005 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
4006 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
4007 } else if (hvf_enabled() && cpu->enable_pmu) {
4008 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
4009 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
4010 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
4011 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
4020 /* Extended Topology Enumeration Leaf */
4021 if (!cpu->enable_cpuid_0xb) {
4022 *eax = *ebx = *ecx = *edx = 0;
4026 *ecx = count & 0xff;
4027 *edx = cpu->apic_id;
4031 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
4032 *ebx = cs->nr_threads;
4033 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
4036 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
4037 *ebx = cs->nr_cores * cs->nr_threads;
4038 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
4043 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
4046 assert(!(*eax & ~0x1f));
4047 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
4050 /* Processor Extended State */
4055 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4060 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
4061 *eax = env->features[FEAT_XSAVE_COMP_LO];
4062 *edx = env->features[FEAT_XSAVE_COMP_HI];
4064 } else if (count == 1) {
4065 *eax = env->features[FEAT_XSAVE];
4066 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
4067 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
4068 const ExtSaveArea *esa = &x86_ext_save_areas[count];
4076 /* Intel Processor Trace Enumeration */
4081 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
4087 *eax = INTEL_PT_MAX_SUBLEAF;
4088 *ebx = INTEL_PT_MINIMAL_EBX;
4089 *ecx = INTEL_PT_MINIMAL_ECX;
4090 } else if (count == 1) {
4091 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
4092 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
4098 * CPUID code in kvm_arch_init_vcpu() ignores stuff
4099 * set here, but we restrict to TCG none the less.
4101 if (tcg_enabled() && cpu->expose_tcg) {
4102 memcpy(signature, "TCGTCGTCGTCG", 12);
4104 *ebx = signature[0];
4105 *ecx = signature[1];
4106 *edx = signature[2];
4121 *eax = env->cpuid_xlevel;
4122 *ebx = env->cpuid_vendor1;
4123 *edx = env->cpuid_vendor2;
4124 *ecx = env->cpuid_vendor3;
4127 *eax = env->cpuid_version;
4129 *ecx = env->features[FEAT_8000_0001_ECX];
4130 *edx = env->features[FEAT_8000_0001_EDX];
4132 /* The Linux kernel checks for the CMPLegacy bit and
4133 * discards multiple thread information if it is set.
4134 * So don't set it here for Intel to make Linux guests happy.
4136 if (cs->nr_cores * cs->nr_threads > 1) {
4137 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
4138 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
4139 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
4140 *ecx |= 1 << 1; /* CmpLegacy bit */
4147 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
4148 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
4149 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
4150 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
4153 /* cache info (L1 cache) */
4154 if (cpu->cache_info_passthrough) {
4155 host_cpuid(index, 0, eax, ebx, ecx, edx);
4158 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
4159 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
4160 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
4161 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
4162 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
4163 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
4166 /* cache info (L2 cache) */
4167 if (cpu->cache_info_passthrough) {
4168 host_cpuid(index, 0, eax, ebx, ecx, edx);
4171 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
4172 (L2_DTLB_2M_ENTRIES << 16) | \
4173 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
4174 (L2_ITLB_2M_ENTRIES);
4175 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
4176 (L2_DTLB_4K_ENTRIES << 16) | \
4177 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
4178 (L2_ITLB_4K_ENTRIES);
4179 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
4180 cpu->enable_l3_cache ?
4181 env->cache_info_amd.l3_cache : NULL,
4188 *edx = env->features[FEAT_8000_0007_EDX];
4191 /* virtual & phys address size in low 2 bytes. */
4192 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4193 /* 64 bit processor */
4194 *eax = cpu->phys_bits; /* configurable physical bits */
4195 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
4196 *eax |= 0x00003900; /* 57 bits virtual */
4198 *eax |= 0x00003000; /* 48 bits virtual */
4201 *eax = cpu->phys_bits;
4203 *ebx = env->features[FEAT_8000_0008_EBX];
4206 if (cs->nr_cores * cs->nr_threads > 1) {
4207 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
4211 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4212 *eax = 0x00000001; /* SVM Revision */
4213 *ebx = 0x00000010; /* nr of ASIDs */
4215 *edx = env->features[FEAT_SVM]; /* optional features */
4226 case 0: /* L1 dcache info */
4227 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
4228 eax, ebx, ecx, edx);
4230 case 1: /* L1 icache info */
4231 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
4232 eax, ebx, ecx, edx);
4234 case 2: /* L2 cache info */
4235 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
4236 eax, ebx, ecx, edx);
4238 case 3: /* L3 cache info */
4239 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
4240 eax, ebx, ecx, edx);
4242 default: /* end of info */
4243 *eax = *ebx = *ecx = *edx = 0;
4248 assert(cpu->core_id <= 255);
4249 encode_topo_cpuid8000001e(cs, cpu,
4250 eax, ebx, ecx, edx);
4253 *eax = env->cpuid_xlevel2;
4259 /* Support for VIA CPU's CPUID instruction */
4260 *eax = env->cpuid_version;
4263 *edx = env->features[FEAT_C000_0001_EDX];
4268 /* Reserved for the future, and now filled with zero */
4275 *eax = sev_enabled() ? 0x2 : 0;
4276 *ebx = sev_get_cbit_position();
4277 *ebx |= sev_get_reduced_phys_bits() << 6;
4282 /* reserved values: zero */
4291 /* CPUClass::reset() */
4292 static void x86_cpu_reset(CPUState *s)
4294 X86CPU *cpu = X86_CPU(s);
4295 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4296 CPUX86State *env = &cpu->env;
4301 xcc->parent_reset(s);
4303 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4305 env->old_exception = -1;
4307 /* init to reset state */
4309 env->hflags2 |= HF2_GIF_MASK;
4311 cpu_x86_update_cr0(env, 0x60000010);
4312 env->a20_mask = ~0x0;
4313 env->smbase = 0x30000;
4314 env->msr_smi_count = 0;
4316 env->idt.limit = 0xffff;
4317 env->gdt.limit = 0xffff;
4318 env->ldt.limit = 0xffff;
4319 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4320 env->tr.limit = 0xffff;
4321 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4323 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4324 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4325 DESC_R_MASK | DESC_A_MASK);
4326 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4327 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4329 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4330 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4332 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4333 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4335 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4336 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4338 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4339 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4343 env->regs[R_EDX] = env->cpuid_version;
4348 for (i = 0; i < 8; i++) {
4351 cpu_set_fpuc(env, 0x37f);
4353 env->mxcsr = 0x1f80;
4354 /* All units are in INIT state. */
4357 env->pat = 0x0007040600070406ULL;
4358 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4360 memset(env->dr, 0, sizeof(env->dr));
4361 env->dr[6] = DR6_FIXED_1;
4362 env->dr[7] = DR7_FIXED_1;
4363 cpu_breakpoint_remove_all(s, BP_CPU);
4364 cpu_watchpoint_remove_all(s, BP_CPU);
4367 xcr0 = XSTATE_FP_MASK;
4369 #ifdef CONFIG_USER_ONLY
4370 /* Enable all the features for user-mode. */
4371 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4372 xcr0 |= XSTATE_SSE_MASK;
4374 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4375 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4376 if (env->features[esa->feature] & esa->bits) {
4381 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
4382 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
4384 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
4385 cr4 |= CR4_FSGSBASE_MASK;
4390 cpu_x86_update_cr4(env, cr4);
4393 * SDM 11.11.5 requires:
4394 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4395 * - IA32_MTRR_PHYSMASKn.V = 0
4396 * All other bits are undefined. For simplification, zero it all.
4398 env->mtrr_deftype = 0;
4399 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
4400 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
4402 env->interrupt_injected = -1;
4403 env->exception_injected = -1;
4404 env->nmi_injected = false;
4405 #if !defined(CONFIG_USER_ONLY)
4406 /* We hard-wire the BSP to the first CPU. */
4407 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
4409 s->halted = !cpu_is_bsp(cpu);
4411 if (kvm_enabled()) {
4412 kvm_arch_reset_vcpu(cpu);
4414 else if (hvf_enabled()) {
4420 #ifndef CONFIG_USER_ONLY
4421 bool cpu_is_bsp(X86CPU *cpu)
4423 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
4426 /* TODO: remove me, when reset over QOM tree is implemented */
4427 static void x86_cpu_machine_reset_cb(void *opaque)
4429 X86CPU *cpu = opaque;
4430 cpu_reset(CPU(cpu));
4434 static void mce_init(X86CPU *cpu)
4436 CPUX86State *cenv = &cpu->env;
4439 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
4440 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
4441 (CPUID_MCE | CPUID_MCA)) {
4442 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
4443 (cpu->enable_lmce ? MCG_LMCE_P : 0);
4444 cenv->mcg_ctl = ~(uint64_t)0;
4445 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
4446 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
4451 #ifndef CONFIG_USER_ONLY
4452 APICCommonClass *apic_get_class(void)
4454 const char *apic_type = "apic";
4456 /* TODO: in-kernel irqchip for hvf */
4457 if (kvm_apic_in_kernel()) {
4458 apic_type = "kvm-apic";
4459 } else if (xen_enabled()) {
4460 apic_type = "xen-apic";
4463 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
4466 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
4468 APICCommonState *apic;
4469 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
4471 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
4473 object_property_add_child(OBJECT(cpu), "lapic",
4474 OBJECT(cpu->apic_state), &error_abort);
4475 object_unref(OBJECT(cpu->apic_state));
4477 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
4478 /* TODO: convert to link<> */
4479 apic = APIC_COMMON(cpu->apic_state);
4481 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
4484 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4486 APICCommonState *apic;
4487 static bool apic_mmio_map_once;
4489 if (cpu->apic_state == NULL) {
4492 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
4495 /* Map APIC MMIO area */
4496 apic = APIC_COMMON(cpu->apic_state);
4497 if (!apic_mmio_map_once) {
4498 memory_region_add_subregion_overlap(get_system_memory(),
4500 MSR_IA32_APICBASE_BASE,
4503 apic_mmio_map_once = true;
4507 static void x86_cpu_machine_done(Notifier *n, void *unused)
4509 X86CPU *cpu = container_of(n, X86CPU, machine_done);
4510 MemoryRegion *smram =
4511 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
4514 cpu->smram = g_new(MemoryRegion, 1);
4515 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
4516 smram, 0, 1ull << 32);
4517 memory_region_set_enabled(cpu->smram, true);
4518 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
4522 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4527 /* Note: Only safe for use on x86(-64) hosts */
4528 static uint32_t x86_host_phys_bits(void)
4531 uint32_t host_phys_bits;
4533 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
4534 if (eax >= 0x80000008) {
4535 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
4536 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4537 * at 23:16 that can specify a maximum physical address bits for
4538 * the guest that can override this value; but I've not seen
4539 * anything with that set.
4541 host_phys_bits = eax & 0xff;
4543 /* It's an odd 64 bit machine that doesn't have the leaf for
4544 * physical address bits; fall back to 36 that's most older
4547 host_phys_bits = 36;
4550 return host_phys_bits;
4553 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
4560 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4561 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
4563 CPUX86State *env = &cpu->env;
4564 FeatureWordInfo *fi = &feature_word_info[w];
4565 uint32_t eax = fi->cpuid_eax;
4566 uint32_t region = eax & 0xF0000000;
4568 if (!env->features[w]) {
4574 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
4577 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
4580 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
4585 /* Calculate XSAVE components based on the configured CPU feature flags */
4586 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
4588 CPUX86State *env = &cpu->env;
4592 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4597 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4598 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4599 if (env->features[esa->feature] & esa->bits) {
4600 mask |= (1ULL << i);
4604 env->features[FEAT_XSAVE_COMP_LO] = mask;
4605 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
4608 /***** Steps involved on loading and filtering CPUID data
4610 * When initializing and realizing a CPU object, the steps
4611 * involved in setting up CPUID data are:
4613 * 1) Loading CPU model definition (X86CPUDefinition). This is
4614 * implemented by x86_cpu_load_def() and should be completely
4615 * transparent, as it is done automatically by instance_init.
4616 * No code should need to look at X86CPUDefinition structs
4617 * outside instance_init.
4619 * 2) CPU expansion. This is done by realize before CPUID
4620 * filtering, and will make sure host/accelerator data is
4621 * loaded for CPU models that depend on host capabilities
4622 * (e.g. "host"). Done by x86_cpu_expand_features().
4624 * 3) CPUID filtering. This initializes extra data related to
4625 * CPUID, and checks if the host supports all capabilities
4626 * required by the CPU. Runnability of a CPU model is
4627 * determined at this step. Done by x86_cpu_filter_features().
4629 * Some operations don't require all steps to be performed.
4632 * - CPU instance creation (instance_init) will run only CPU
4633 * model loading. CPU expansion can't run at instance_init-time
4634 * because host/accelerator data may be not available yet.
4635 * - CPU realization will perform both CPU model expansion and CPUID
4636 * filtering, and return an error in case one of them fails.
4637 * - query-cpu-definitions needs to run all 3 steps. It needs
4638 * to run CPUID filtering, as the 'unavailable-features'
4639 * field is set based on the filtering results.
4640 * - The query-cpu-model-expansion QMP command only needs to run
4641 * CPU model loading and CPU expansion. It should not filter
4642 * any CPUID data based on host capabilities.
4645 /* Expand CPU configuration data, based on configured features
4646 * and host/accelerator capabilities when appropriate.
4648 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
4650 CPUX86State *env = &cpu->env;
4653 Error *local_err = NULL;
4655 /*TODO: Now cpu->max_features doesn't overwrite features
4656 * set using QOM properties, and we can convert
4657 * plus_features & minus_features to global properties
4658 * inside x86_cpu_parse_featurestr() too.
4660 if (cpu->max_features) {
4661 for (w = 0; w < FEATURE_WORDS; w++) {
4662 /* Override only features that weren't set explicitly
4666 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
4667 ~env->user_features[w] & \
4668 ~feature_word_info[w].no_autoenable_flags;
4672 for (l = plus_features; l; l = l->next) {
4673 const char *prop = l->data;
4674 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4680 for (l = minus_features; l; l = l->next) {
4681 const char *prop = l->data;
4682 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
4688 if (!kvm_enabled() || !cpu->expose_kvm) {
4689 env->features[FEAT_KVM] = 0;
4692 x86_cpu_enable_xsave_components(cpu);
4694 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
4695 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
4696 if (cpu->full_cpuid_auto_level) {
4697 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
4698 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
4699 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
4700 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
4701 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
4702 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
4703 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
4704 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
4705 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
4706 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
4707 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
4708 /* SVM requires CPUID[0x8000000A] */
4709 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4710 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
4713 /* SEV requires CPUID[0x8000001F] */
4714 if (sev_enabled()) {
4715 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
4719 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
4720 if (env->cpuid_level == UINT32_MAX) {
4721 env->cpuid_level = env->cpuid_min_level;
4723 if (env->cpuid_xlevel == UINT32_MAX) {
4724 env->cpuid_xlevel = env->cpuid_min_xlevel;
4726 if (env->cpuid_xlevel2 == UINT32_MAX) {
4727 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
4731 if (local_err != NULL) {
4732 error_propagate(errp, local_err);
4737 * Finishes initialization of CPUID data, filters CPU feature
4738 * words based on host availability of each feature.
4740 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
4742 static int x86_cpu_filter_features(X86CPU *cpu)
4744 CPUX86State *env = &cpu->env;
4748 for (w = 0; w < FEATURE_WORDS; w++) {
4749 uint32_t host_feat =
4750 x86_cpu_get_supported_feature_word(w, false);
4751 uint32_t requested_features = env->features[w];
4752 env->features[w] &= host_feat;
4753 cpu->filtered_features[w] = requested_features & ~env->features[w];
4754 if (cpu->filtered_features[w]) {
4759 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
4761 KVMState *s = CPU(cpu)->kvm_state;
4762 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
4763 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
4764 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
4765 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
4766 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
4769 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
4770 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
4771 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
4772 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
4773 INTEL_PT_ADDR_RANGES_NUM) ||
4774 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
4775 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
4776 (ecx_0 & INTEL_PT_IP_LIP)) {
4778 * Processor Trace capabilities aren't configurable, so if the
4779 * host can't emulate the capabilities we report on
4780 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
4782 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
4783 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
4791 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
4792 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
4793 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
4794 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
4795 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
4796 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
4797 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
4799 CPUState *cs = CPU(dev);
4800 X86CPU *cpu = X86_CPU(dev);
4801 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4802 CPUX86State *env = &cpu->env;
4803 Error *local_err = NULL;
4804 static bool ht_warned;
4806 if (xcc->host_cpuid_required) {
4807 if (!accel_uses_host_cpuid()) {
4808 char *name = x86_cpu_class_get_model_name(xcc);
4809 error_setg(&local_err, "CPU model '%s' requires KVM", name);
4814 if (enable_cpu_pm) {
4815 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
4816 &cpu->mwait.ecx, &cpu->mwait.edx);
4817 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
4821 /* mwait extended info: needed for Core compatibility */
4822 /* We always wake on interrupt even if host does not have the capability */
4823 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
4825 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
4826 error_setg(errp, "apic-id property was not initialized properly");
4830 x86_cpu_expand_features(cpu, &local_err);
4835 if (x86_cpu_filter_features(cpu) &&
4836 (cpu->check_cpuid || cpu->enforce_cpuid)) {
4837 x86_cpu_report_filtered_features(cpu);
4838 if (cpu->enforce_cpuid) {
4839 error_setg(&local_err,
4840 accel_uses_host_cpuid() ?
4841 "Host doesn't support requested features" :
4842 "TCG doesn't support requested features");
4847 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
4850 if (IS_AMD_CPU(env)) {
4851 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
4852 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
4853 & CPUID_EXT2_AMD_ALIASES);
4856 /* For 64bit systems think about the number of physical bits to present.
4857 * ideally this should be the same as the host; anything other than matching
4858 * the host can cause incorrect guest behaviour.
4859 * QEMU used to pick the magic value of 40 bits that corresponds to
4860 * consumer AMD devices but nothing else.
4862 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4863 if (accel_uses_host_cpuid()) {
4864 uint32_t host_phys_bits = x86_host_phys_bits();
4867 if (cpu->host_phys_bits) {
4868 /* The user asked for us to use the host physical bits */
4869 cpu->phys_bits = host_phys_bits;
4872 /* Print a warning if the user set it to a value that's not the
4875 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
4877 warn_report("Host physical bits (%u)"
4878 " does not match phys-bits property (%u)",
4879 host_phys_bits, cpu->phys_bits);
4883 if (cpu->phys_bits &&
4884 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
4885 cpu->phys_bits < 32)) {
4886 error_setg(errp, "phys-bits should be between 32 and %u "
4888 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
4892 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
4893 error_setg(errp, "TCG only supports phys-bits=%u",
4894 TCG_PHYS_ADDR_BITS);
4898 /* 0 means it was not explicitly set by the user (or by machine
4899 * compat_props or by the host code above). In this case, the default
4900 * is the value used by TCG (40).
4902 if (cpu->phys_bits == 0) {
4903 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
4906 /* For 32 bit systems don't use the user set value, but keep
4907 * phys_bits consistent with what we tell the guest.
4909 if (cpu->phys_bits != 0) {
4910 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
4914 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
4915 cpu->phys_bits = 36;
4917 cpu->phys_bits = 32;
4921 /* Cache information initialization */
4922 if (!cpu->legacy_cache) {
4923 if (!xcc->cpu_def || !xcc->cpu_def->cache_info) {
4924 char *name = x86_cpu_class_get_model_name(xcc);
4926 "CPU model '%s' doesn't support legacy-cache=off", name);
4930 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
4931 *xcc->cpu_def->cache_info;
4933 /* Build legacy cache information */
4934 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
4935 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
4936 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
4937 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
4939 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
4940 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
4941 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
4942 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
4944 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
4945 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
4946 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
4947 env->cache_info_amd.l3_cache = &legacy_l3_cache;
4951 cpu_exec_realizefn(cs, &local_err);
4952 if (local_err != NULL) {
4953 error_propagate(errp, local_err);
4957 #ifndef CONFIG_USER_ONLY
4958 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
4960 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
4961 x86_cpu_apic_create(cpu, &local_err);
4962 if (local_err != NULL) {
4970 #ifndef CONFIG_USER_ONLY
4971 if (tcg_enabled()) {
4972 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
4973 cpu->cpu_as_root = g_new(MemoryRegion, 1);
4975 /* Outer container... */
4976 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
4977 memory_region_set_enabled(cpu->cpu_as_root, true);
4979 /* ... with two regions inside: normal system memory with low
4982 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
4983 get_system_memory(), 0, ~0ull);
4984 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
4985 memory_region_set_enabled(cpu->cpu_as_mem, true);
4988 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
4989 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
4991 /* ... SMRAM with higher priority, linked from /machine/smram. */
4992 cpu->machine_done.notify = x86_cpu_machine_done;
4993 qemu_add_machine_init_done_notifier(&cpu->machine_done);
5000 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
5001 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
5002 * based on inputs (sockets,cores,threads), it is still better to give
5005 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
5006 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
5008 if (IS_AMD_CPU(env) &&
5009 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
5010 cs->nr_threads > 1 && !ht_warned) {
5011 error_report("This family of AMD CPU doesn't support "
5012 "hyperthreading(%d). Please configure -smp "
5013 "options properly or try enabling topoext feature.",
5018 x86_cpu_apic_realize(cpu, &local_err);
5019 if (local_err != NULL) {
5024 xcc->parent_realize(dev, &local_err);
5027 if (local_err != NULL) {
5028 error_propagate(errp, local_err);
5033 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
5035 X86CPU *cpu = X86_CPU(dev);
5036 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5037 Error *local_err = NULL;
5039 #ifndef CONFIG_USER_ONLY
5040 cpu_remove_sync(CPU(dev));
5041 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
5044 if (cpu->apic_state) {
5045 object_unparent(OBJECT(cpu->apic_state));
5046 cpu->apic_state = NULL;
5049 xcc->parent_unrealize(dev, &local_err);
5050 if (local_err != NULL) {
5051 error_propagate(errp, local_err);
5056 typedef struct BitProperty {
5061 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
5062 void *opaque, Error **errp)
5064 X86CPU *cpu = X86_CPU(obj);
5065 BitProperty *fp = opaque;
5066 uint32_t f = cpu->env.features[fp->w];
5067 bool value = (f & fp->mask) == fp->mask;
5068 visit_type_bool(v, name, &value, errp);
5071 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
5072 void *opaque, Error **errp)
5074 DeviceState *dev = DEVICE(obj);
5075 X86CPU *cpu = X86_CPU(obj);
5076 BitProperty *fp = opaque;
5077 Error *local_err = NULL;
5080 if (dev->realized) {
5081 qdev_prop_set_after_realize(dev, name, errp);
5085 visit_type_bool(v, name, &value, &local_err);
5087 error_propagate(errp, local_err);
5092 cpu->env.features[fp->w] |= fp->mask;
5094 cpu->env.features[fp->w] &= ~fp->mask;
5096 cpu->env.user_features[fp->w] |= fp->mask;
5099 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
5102 BitProperty *prop = opaque;
5106 /* Register a boolean property to get/set a single bit in a uint32_t field.
5108 * The same property name can be registered multiple times to make it affect
5109 * multiple bits in the same FeatureWord. In that case, the getter will return
5110 * true only if all bits are set.
5112 static void x86_cpu_register_bit_prop(X86CPU *cpu,
5113 const char *prop_name,
5119 uint32_t mask = (1UL << bitnr);
5121 op = object_property_find(OBJECT(cpu), prop_name, NULL);
5127 fp = g_new0(BitProperty, 1);
5130 object_property_add(OBJECT(cpu), prop_name, "bool",
5131 x86_cpu_get_bit_prop,
5132 x86_cpu_set_bit_prop,
5133 x86_cpu_release_bit_prop, fp, &error_abort);
5137 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
5141 FeatureWordInfo *fi = &feature_word_info[w];
5142 const char *name = fi->feat_names[bitnr];
5148 /* Property names should use "-" instead of "_".
5149 * Old names containing underscores are registered as aliases
5150 * using object_property_add_alias()
5152 assert(!strchr(name, '_'));
5153 /* aliases don't use "|" delimiters anymore, they are registered
5154 * manually using object_property_add_alias() */
5155 assert(!strchr(name, '|'));
5156 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
5159 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
5161 X86CPU *cpu = X86_CPU(cs);
5162 CPUX86State *env = &cpu->env;
5163 GuestPanicInformation *panic_info = NULL;
5165 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
5166 panic_info = g_malloc0(sizeof(GuestPanicInformation));
5168 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
5170 assert(HV_CRASH_PARAMS >= 5);
5171 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
5172 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
5173 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
5174 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
5175 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
5180 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
5181 const char *name, void *opaque,
5184 CPUState *cs = CPU(obj);
5185 GuestPanicInformation *panic_info;
5187 if (!cs->crash_occurred) {
5188 error_setg(errp, "No crash occured");
5192 panic_info = x86_cpu_get_crash_info(cs);
5193 if (panic_info == NULL) {
5194 error_setg(errp, "No crash information");
5198 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
5200 qapi_free_GuestPanicInformation(panic_info);
5203 static void x86_cpu_initfn(Object *obj)
5205 CPUState *cs = CPU(obj);
5206 X86CPU *cpu = X86_CPU(obj);
5207 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
5208 CPUX86State *env = &cpu->env;
5213 object_property_add(obj, "family", "int",
5214 x86_cpuid_version_get_family,
5215 x86_cpuid_version_set_family, NULL, NULL, NULL);
5216 object_property_add(obj, "model", "int",
5217 x86_cpuid_version_get_model,
5218 x86_cpuid_version_set_model, NULL, NULL, NULL);
5219 object_property_add(obj, "stepping", "int",
5220 x86_cpuid_version_get_stepping,
5221 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
5222 object_property_add_str(obj, "vendor",
5223 x86_cpuid_get_vendor,
5224 x86_cpuid_set_vendor, NULL);
5225 object_property_add_str(obj, "model-id",
5226 x86_cpuid_get_model_id,
5227 x86_cpuid_set_model_id, NULL);
5228 object_property_add(obj, "tsc-frequency", "int",
5229 x86_cpuid_get_tsc_freq,
5230 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
5231 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
5232 x86_cpu_get_feature_words,
5233 NULL, NULL, (void *)env->features, NULL);
5234 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
5235 x86_cpu_get_feature_words,
5236 NULL, NULL, (void *)cpu->filtered_features, NULL);
5238 object_property_add(obj, "crash-information", "GuestPanicInformation",
5239 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
5241 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
5243 for (w = 0; w < FEATURE_WORDS; w++) {
5246 for (bitnr = 0; bitnr < 32; bitnr++) {
5247 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
5251 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
5252 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
5253 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
5254 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
5255 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
5256 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
5257 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
5259 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
5260 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
5261 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
5262 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5263 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5264 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5265 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5266 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5267 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5268 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5269 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5270 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5271 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5272 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5273 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5274 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5275 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5276 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5277 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5278 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5279 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5282 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
5286 static int64_t x86_cpu_get_arch_id(CPUState *cs)
5288 X86CPU *cpu = X86_CPU(cs);
5290 return cpu->apic_id;
5293 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
5295 X86CPU *cpu = X86_CPU(cs);
5297 return cpu->env.cr[0] & CR0_PG_MASK;
5300 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5302 X86CPU *cpu = X86_CPU(cs);
5304 cpu->env.eip = value;
5307 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5309 X86CPU *cpu = X86_CPU(cs);
5311 cpu->env.eip = tb->pc - tb->cs_base;
5314 static bool x86_cpu_has_work(CPUState *cs)
5316 X86CPU *cpu = X86_CPU(cs);
5317 CPUX86State *env = &cpu->env;
5319 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
5320 CPU_INTERRUPT_POLL)) &&
5321 (env->eflags & IF_MASK)) ||
5322 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
5323 CPU_INTERRUPT_INIT |
5324 CPU_INTERRUPT_SIPI |
5325 CPU_INTERRUPT_MCE)) ||
5326 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
5327 !(env->hflags & HF_SMM_MASK));
5330 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
5332 X86CPU *cpu = X86_CPU(cs);
5333 CPUX86State *env = &cpu->env;
5335 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
5336 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
5337 : bfd_mach_i386_i8086);
5338 info->print_insn = print_insn_i386;
5340 info->cap_arch = CS_ARCH_X86;
5341 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
5342 : env->hflags & HF_CS32_MASK ? CS_MODE_32
5344 info->cap_insn_unit = 1;
5345 info->cap_insn_split = 8;
5348 void x86_update_hflags(CPUX86State *env)
5351 #define HFLAG_COPY_MASK \
5352 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5353 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5354 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5355 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5357 hflags = env->hflags & HFLAG_COPY_MASK;
5358 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
5359 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
5360 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
5361 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
5362 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
5364 if (env->cr[4] & CR4_OSFXSR_MASK) {
5365 hflags |= HF_OSFXSR_MASK;
5368 if (env->efer & MSR_EFER_LMA) {
5369 hflags |= HF_LMA_MASK;
5372 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
5373 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
5375 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
5376 (DESC_B_SHIFT - HF_CS32_SHIFT);
5377 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
5378 (DESC_B_SHIFT - HF_SS32_SHIFT);
5379 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
5380 !(hflags & HF_CS32_MASK)) {
5381 hflags |= HF_ADDSEG_MASK;
5383 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
5384 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
5387 env->hflags = hflags;
5390 static Property x86_cpu_properties[] = {
5391 #ifdef CONFIG_USER_ONLY
5392 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5393 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
5394 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
5395 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
5396 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
5398 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
5399 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
5400 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
5401 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
5403 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
5404 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
5405 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
5406 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
5407 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
5408 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
5409 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
5410 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
5411 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
5412 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
5413 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
5414 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
5415 DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false),
5416 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false),
5417 DEFINE_PROP_BOOL("hv-tlbflush", X86CPU, hyperv_tlbflush, false),
5418 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
5419 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
5420 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
5421 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
5422 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
5423 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
5424 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
5425 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
5426 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
5427 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
5428 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
5429 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
5430 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
5431 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
5432 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
5433 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
5434 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
5435 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
5437 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
5438 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
5439 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
5442 * lecacy_cache defaults to true unless the CPU model provides its
5443 * own cache information (see x86_cpu_load_def()).
5445 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
5448 * From "Requirements for Implementing the Microsoft
5449 * Hypervisor Interface":
5450 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5452 * "Starting with Windows Server 2012 and Windows 8, if
5453 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5454 * the hypervisor imposes no specific limit to the number of VPs.
5455 * In this case, Windows Server 2012 guest VMs may use more than
5456 * 64 VPs, up to the maximum supported number of processors applicable
5457 * to the specific Windows version being used."
5459 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
5460 DEFINE_PROP_END_OF_LIST()
5463 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
5465 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5466 CPUClass *cc = CPU_CLASS(oc);
5467 DeviceClass *dc = DEVICE_CLASS(oc);
5469 device_class_set_parent_realize(dc, x86_cpu_realizefn,
5470 &xcc->parent_realize);
5471 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
5472 &xcc->parent_unrealize);
5473 dc->props = x86_cpu_properties;
5475 xcc->parent_reset = cc->reset;
5476 cc->reset = x86_cpu_reset;
5477 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
5479 cc->class_by_name = x86_cpu_class_by_name;
5480 cc->parse_features = x86_cpu_parse_featurestr;
5481 cc->has_work = x86_cpu_has_work;
5483 cc->do_interrupt = x86_cpu_do_interrupt;
5484 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
5486 cc->dump_state = x86_cpu_dump_state;
5487 cc->get_crash_info = x86_cpu_get_crash_info;
5488 cc->set_pc = x86_cpu_set_pc;
5489 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
5490 cc->gdb_read_register = x86_cpu_gdb_read_register;
5491 cc->gdb_write_register = x86_cpu_gdb_write_register;
5492 cc->get_arch_id = x86_cpu_get_arch_id;
5493 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
5494 #ifdef CONFIG_USER_ONLY
5495 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
5497 cc->asidx_from_attrs = x86_asidx_from_attrs;
5498 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
5499 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
5500 cc->write_elf64_note = x86_cpu_write_elf64_note;
5501 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
5502 cc->write_elf32_note = x86_cpu_write_elf32_note;
5503 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
5504 cc->vmsd = &vmstate_x86_cpu;
5506 cc->gdb_arch_name = x86_gdb_arch_name;
5507 #ifdef TARGET_X86_64
5508 cc->gdb_core_xml_file = "i386-64bit.xml";
5509 cc->gdb_num_core_regs = 57;
5511 cc->gdb_core_xml_file = "i386-32bit.xml";
5512 cc->gdb_num_core_regs = 41;
5514 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
5515 cc->debug_excp_handler = breakpoint_handler;
5517 cc->cpu_exec_enter = x86_cpu_exec_enter;
5518 cc->cpu_exec_exit = x86_cpu_exec_exit;
5520 cc->tcg_initialize = tcg_x86_init;
5522 cc->disas_set_info = x86_disas_set_info;
5524 dc->user_creatable = true;
5527 static const TypeInfo x86_cpu_type_info = {
5528 .name = TYPE_X86_CPU,
5530 .instance_size = sizeof(X86CPU),
5531 .instance_init = x86_cpu_initfn,
5533 .class_size = sizeof(X86CPUClass),
5534 .class_init = x86_cpu_common_class_init,
5538 /* "base" CPU model, used by query-cpu-model-expansion */
5539 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
5541 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5543 xcc->static_model = true;
5544 xcc->migration_safe = true;
5545 xcc->model_description = "base CPU model type with no features enabled";
5549 static const TypeInfo x86_base_cpu_type_info = {
5550 .name = X86_CPU_TYPE_NAME("base"),
5551 .parent = TYPE_X86_CPU,
5552 .class_init = x86_cpu_base_class_init,
5555 static void x86_cpu_register_types(void)
5559 type_register_static(&x86_cpu_type_info);
5560 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
5561 x86_register_cpudef_type(&builtin_x86_defs[i]);
5563 type_register_static(&max_x86_cpu_type_info);
5564 type_register_static(&x86_base_cpu_type_info);
5565 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
5566 type_register_static(&host_x86_cpu_type_info);
5570 type_init(x86_cpu_register_types)