]> Git Repo - qemu.git/blame - target/i386/machine.c
Merge remote-tracking branch 'rth/tags/pull-s390-20170512' into staging
[qemu.git] / target / i386 / machine.c
CommitLineData
b6a0aa05 1#include "qemu/osdep.h"
33c11879
PB
2#include "qemu-common.h"
3#include "cpu.h"
63c91552 4#include "exec/exec-all.h"
8dd3dca3
AJ
5#include "hw/hw.h"
6#include "hw/boards.h"
0d09e41a
PB
7#include "hw/i386/pc.h"
8#include "hw/isa/isa.h"
1e00b8d5 9#include "migration/cpu.h"
8dd3dca3 10
9c17d615 11#include "sysemu/kvm.h"
8dd3dca3 12
36f96c4b
HZ
13#include "qemu/error-report.h"
14
66e6d55b
JQ
15static const VMStateDescription vmstate_segment = {
16 .name = "segment",
17 .version_id = 1,
18 .minimum_version_id = 1,
d49805ae 19 .fields = (VMStateField[]) {
66e6d55b
JQ
20 VMSTATE_UINT32(selector, SegmentCache),
21 VMSTATE_UINTTL(base, SegmentCache),
22 VMSTATE_UINT32(limit, SegmentCache),
23 VMSTATE_UINT32(flags, SegmentCache),
24 VMSTATE_END_OF_LIST()
25 }
26};
27
0cb892aa
JQ
28#define VMSTATE_SEGMENT(_field, _state) { \
29 .name = (stringify(_field)), \
30 .size = sizeof(SegmentCache), \
31 .vmsd = &vmstate_segment, \
32 .flags = VMS_STRUCT, \
33 .offset = offsetof(_state, _field) \
34 + type_check(SegmentCache,typeof_field(_state, _field)) \
8dd3dca3
AJ
35}
36
0cb892aa
JQ
37#define VMSTATE_SEGMENT_ARRAY(_field, _state, _n) \
38 VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_segment, SegmentCache)
8dd3dca3 39
fc3b0aa2
JQ
40static const VMStateDescription vmstate_xmm_reg = {
41 .name = "xmm_reg",
42 .version_id = 1,
43 .minimum_version_id = 1,
d49805ae 44 .fields = (VMStateField[]) {
19cbd87c
EH
45 VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
46 VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
fc3b0aa2
JQ
47 VMSTATE_END_OF_LIST()
48 }
49};
50
a03c3e90
PB
51#define VMSTATE_XMM_REGS(_field, _state, _start) \
52 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
fa451874 53 vmstate_xmm_reg, ZMMReg)
fc3b0aa2 54
b7711471 55/* YMMH format is the same as XMM, but for bits 128-255 */
f1665b21
SY
56static const VMStateDescription vmstate_ymmh_reg = {
57 .name = "ymmh_reg",
58 .version_id = 1,
59 .minimum_version_id = 1,
d49805ae 60 .fields = (VMStateField[]) {
19cbd87c
EH
61 VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
62 VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
f1665b21
SY
63 VMSTATE_END_OF_LIST()
64 }
65};
66
a03c3e90
PB
67#define VMSTATE_YMMH_REGS_VARS(_field, _state, _start, _v) \
68 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, _v, \
fa451874 69 vmstate_ymmh_reg, ZMMReg)
f1665b21 70
9aecd6f8
CP
71static const VMStateDescription vmstate_zmmh_reg = {
72 .name = "zmmh_reg",
73 .version_id = 1,
74 .minimum_version_id = 1,
75 .fields = (VMStateField[]) {
19cbd87c
EH
76 VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
77 VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
78 VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
79 VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
9aecd6f8
CP
80 VMSTATE_END_OF_LIST()
81 }
82};
83
a03c3e90
PB
84#define VMSTATE_ZMMH_REGS_VARS(_field, _state, _start) \
85 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
fa451874 86 vmstate_zmmh_reg, ZMMReg)
9aecd6f8
CP
87
88#ifdef TARGET_X86_64
89static const VMStateDescription vmstate_hi16_zmm_reg = {
90 .name = "hi16_zmm_reg",
91 .version_id = 1,
92 .minimum_version_id = 1,
93 .fields = (VMStateField[]) {
19cbd87c
EH
94 VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
95 VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
96 VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
97 VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
98 VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
99 VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
100 VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
101 VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
9aecd6f8
CP
102 VMSTATE_END_OF_LIST()
103 }
104};
105
a03c3e90
PB
106#define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _start) \
107 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
fa451874 108 vmstate_hi16_zmm_reg, ZMMReg)
9aecd6f8
CP
109#endif
110
79e9ebeb
LJ
111static const VMStateDescription vmstate_bnd_regs = {
112 .name = "bnd_regs",
113 .version_id = 1,
114 .minimum_version_id = 1,
d49805ae 115 .fields = (VMStateField[]) {
79e9ebeb
LJ
116 VMSTATE_UINT64(lb, BNDReg),
117 VMSTATE_UINT64(ub, BNDReg),
118 VMSTATE_END_OF_LIST()
119 }
120};
121
122#define VMSTATE_BND_REGS(_field, _state, _n) \
123 VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_bnd_regs, BNDReg)
124
216c07c3
JQ
125static const VMStateDescription vmstate_mtrr_var = {
126 .name = "mtrr_var",
127 .version_id = 1,
128 .minimum_version_id = 1,
d49805ae 129 .fields = (VMStateField[]) {
216c07c3
JQ
130 VMSTATE_UINT64(base, MTRRVar),
131 VMSTATE_UINT64(mask, MTRRVar),
132 VMSTATE_END_OF_LIST()
133 }
134};
135
0cb892aa
JQ
136#define VMSTATE_MTRR_VARS(_field, _state, _n, _v) \
137 VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar)
216c07c3 138
ab808276
DDAG
139typedef struct x86_FPReg_tmp {
140 FPReg *parent;
141 uint64_t tmp_mant;
142 uint16_t tmp_exp;
143} x86_FPReg_tmp;
144
145static void fpreg_pre_save(void *opaque)
3c8ce630 146{
ab808276 147 x86_FPReg_tmp *tmp = opaque;
3c8ce630 148
ab808276
DDAG
149 /* we save the real CPU data (in case of MMX usage only 'mant'
150 contains the MMX register */
151 cpu_get_fp80(&tmp->tmp_mant, &tmp->tmp_exp, tmp->parent->d);
3c8ce630
JQ
152}
153
ab808276 154static int fpreg_post_load(void *opaque, int version)
3c8ce630 155{
ab808276 156 x86_FPReg_tmp *tmp = opaque;
2c21ee76 157
ab808276 158 tmp->parent->d = cpu_set_fp80(tmp->tmp_mant, tmp->tmp_exp);
2c21ee76 159 return 0;
3c8ce630
JQ
160}
161
ab808276
DDAG
162static const VMStateDescription vmstate_fpreg_tmp = {
163 .name = "fpreg_tmp",
164 .post_load = fpreg_post_load,
165 .pre_save = fpreg_pre_save,
166 .fields = (VMStateField[]) {
167 VMSTATE_UINT64(tmp_mant, x86_FPReg_tmp),
168 VMSTATE_UINT16(tmp_exp, x86_FPReg_tmp),
169 VMSTATE_END_OF_LIST()
170 }
171};
172
173static const VMStateDescription vmstate_fpreg = {
0cb892aa 174 .name = "fpreg",
ab808276
DDAG
175 .fields = (VMStateField[]) {
176 VMSTATE_WITH_TMP(FPReg, x86_FPReg_tmp, vmstate_fpreg_tmp),
177 VMSTATE_END_OF_LIST()
178 }
0cb892aa
JQ
179};
180
c4c38c8c 181static void cpu_pre_save(void *opaque)
8dd3dca3 182{
f56e3a14
AF
183 X86CPU *cpu = opaque;
184 CPUX86State *env = &cpu->env;
0e607a80 185 int i;
8dd3dca3 186
8dd3dca3 187 /* FPU */
67b8f419 188 env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
cdc0c58f 189 env->fptag_vmstate = 0;
8dd3dca3 190 for(i = 0; i < 8; i++) {
cdc0c58f 191 env->fptag_vmstate |= ((!env->fptags[i]) << i);
8dd3dca3
AJ
192 }
193
60a902f1 194 env->fpregs_format_vmstate = 0;
3e47c249
OW
195
196 /*
197 * Real mode guest segments register DPL should be zero.
198 * Older KVM version were setting it wrongly.
199 * Fixing it will allow live migration to host with unrestricted guest
200 * support (otherwise the migration will fail with invalid guest state
201 * error).
202 */
203 if (!(env->cr[0] & CR0_PE_MASK) &&
204 (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
205 env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
206 env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
207 env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
208 env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
209 env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
210 env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
211 }
212
c4c38c8c
JQ
213}
214
468f6581
JQ
215static int cpu_post_load(void *opaque, int version_id)
216{
f56e3a14 217 X86CPU *cpu = opaque;
75a34036 218 CPUState *cs = CPU(cpu);
f56e3a14 219 CPUX86State *env = &cpu->env;
468f6581
JQ
220 int i;
221
36f96c4b
HZ
222 if (env->tsc_khz && env->user_tsc_khz &&
223 env->tsc_khz != env->user_tsc_khz) {
224 error_report("Mismatch between user-specified TSC frequency and "
225 "migrated TSC frequency");
226 return -EINVAL;
227 }
228
46baa900
DDAG
229 if (env->fpregs_format_vmstate) {
230 error_report("Unsupported old non-softfloat CPU state");
231 return -EINVAL;
232 }
444ba679
OW
233 /*
234 * Real mode guest segments register DPL should be zero.
235 * Older KVM version were setting it wrongly.
236 * Fixing it will allow live migration from such host that don't have
237 * restricted guest support to a host with unrestricted guest support
238 * (otherwise the migration will fail with invalid guest state
239 * error).
240 */
241 if (!(env->cr[0] & CR0_PE_MASK) &&
242 (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
243 env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
244 env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
245 env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
246 env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
247 env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
248 env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
249 }
250
7125c937
PB
251 /* Older versions of QEMU incorrectly used CS.DPL as the CPL when
252 * running under KVM. This is wrong for conforming code segments.
253 * Luckily, in our implementation the CPL field of hflags is redundant
254 * and we can get the right value from the SS descriptor privilege level.
255 */
256 env->hflags &= ~HF_CPL_MASK;
257 env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
258
468f6581
JQ
259 env->fpstt = (env->fpus_vmstate >> 11) & 7;
260 env->fpus = env->fpus_vmstate & ~0x3800;
261 env->fptag_vmstate ^= 0xff;
262 for(i = 0; i < 8; i++) {
263 env->fptags[i] = (env->fptag_vmstate >> i) & 1;
264 }
5bde1407 265 update_fp_status(env);
468f6581 266
b3310ab3 267 cpu_breakpoint_remove_all(cs, BP_CPU);
75a34036 268 cpu_watchpoint_remove_all(cs, BP_CPU);
93d00d0f
RH
269 {
270 /* Indicate all breakpoints disabled, as they are, then
271 let the helper re-enable them. */
272 target_ulong dr7 = env->dr[7];
273 env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK);
274 cpu_x86_update_dr7(env, dr7);
428065ce 275 }
d10eb08f 276 tlb_flush(cs);
428065ce 277
f809c605
PB
278 if (tcg_enabled()) {
279 cpu_smm_update(cpu);
280 }
1e7fbc6d 281 return 0;
468f6581
JQ
282}
283
f6584ee2
GN
284static bool async_pf_msr_needed(void *opaque)
285{
f56e3a14 286 X86CPU *cpu = opaque;
f6584ee2 287
f56e3a14 288 return cpu->env.async_pf_en_msr != 0;
f6584ee2
GN
289}
290
bc9a839d
MT
291static bool pv_eoi_msr_needed(void *opaque)
292{
f56e3a14 293 X86CPU *cpu = opaque;
bc9a839d 294
f56e3a14 295 return cpu->env.pv_eoi_en_msr != 0;
bc9a839d
MT
296}
297
917367aa
MT
298static bool steal_time_msr_needed(void *opaque)
299{
0e503577 300 X86CPU *cpu = opaque;
917367aa 301
0e503577 302 return cpu->env.steal_time_msr != 0;
917367aa
MT
303}
304
305static const VMStateDescription vmstate_steal_time_msr = {
306 .name = "cpu/steal_time_msr",
307 .version_id = 1,
308 .minimum_version_id = 1,
5cd8cada 309 .needed = steal_time_msr_needed,
d49805ae 310 .fields = (VMStateField[]) {
0e503577 311 VMSTATE_UINT64(env.steal_time_msr, X86CPU),
917367aa
MT
312 VMSTATE_END_OF_LIST()
313 }
314};
315
f6584ee2
GN
316static const VMStateDescription vmstate_async_pf_msr = {
317 .name = "cpu/async_pf_msr",
318 .version_id = 1,
319 .minimum_version_id = 1,
5cd8cada 320 .needed = async_pf_msr_needed,
d49805ae 321 .fields = (VMStateField[]) {
f56e3a14 322 VMSTATE_UINT64(env.async_pf_en_msr, X86CPU),
f6584ee2
GN
323 VMSTATE_END_OF_LIST()
324 }
325};
326
bc9a839d
MT
327static const VMStateDescription vmstate_pv_eoi_msr = {
328 .name = "cpu/async_pv_eoi_msr",
329 .version_id = 1,
330 .minimum_version_id = 1,
5cd8cada 331 .needed = pv_eoi_msr_needed,
d49805ae 332 .fields = (VMStateField[]) {
f56e3a14 333 VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU),
bc9a839d
MT
334 VMSTATE_END_OF_LIST()
335 }
336};
337
42cc8fa6
JK
338static bool fpop_ip_dp_needed(void *opaque)
339{
f56e3a14
AF
340 X86CPU *cpu = opaque;
341 CPUX86State *env = &cpu->env;
42cc8fa6
JK
342
343 return env->fpop != 0 || env->fpip != 0 || env->fpdp != 0;
344}
345
346static const VMStateDescription vmstate_fpop_ip_dp = {
347 .name = "cpu/fpop_ip_dp",
348 .version_id = 1,
349 .minimum_version_id = 1,
5cd8cada 350 .needed = fpop_ip_dp_needed,
d49805ae 351 .fields = (VMStateField[]) {
f56e3a14
AF
352 VMSTATE_UINT16(env.fpop, X86CPU),
353 VMSTATE_UINT64(env.fpip, X86CPU),
354 VMSTATE_UINT64(env.fpdp, X86CPU),
42cc8fa6
JK
355 VMSTATE_END_OF_LIST()
356 }
357};
358
f28558d3
WA
359static bool tsc_adjust_needed(void *opaque)
360{
f56e3a14
AF
361 X86CPU *cpu = opaque;
362 CPUX86State *env = &cpu->env;
f28558d3
WA
363
364 return env->tsc_adjust != 0;
365}
366
367static const VMStateDescription vmstate_msr_tsc_adjust = {
368 .name = "cpu/msr_tsc_adjust",
369 .version_id = 1,
370 .minimum_version_id = 1,
5cd8cada 371 .needed = tsc_adjust_needed,
d49805ae 372 .fields = (VMStateField[]) {
f56e3a14 373 VMSTATE_UINT64(env.tsc_adjust, X86CPU),
f28558d3
WA
374 VMSTATE_END_OF_LIST()
375 }
376};
377
aa82ba54
LJ
378static bool tscdeadline_needed(void *opaque)
379{
f56e3a14
AF
380 X86CPU *cpu = opaque;
381 CPUX86State *env = &cpu->env;
aa82ba54
LJ
382
383 return env->tsc_deadline != 0;
384}
385
386static const VMStateDescription vmstate_msr_tscdeadline = {
387 .name = "cpu/msr_tscdeadline",
388 .version_id = 1,
389 .minimum_version_id = 1,
5cd8cada 390 .needed = tscdeadline_needed,
d49805ae 391 .fields = (VMStateField[]) {
f56e3a14 392 VMSTATE_UINT64(env.tsc_deadline, X86CPU),
aa82ba54
LJ
393 VMSTATE_END_OF_LIST()
394 }
395};
396
21e87c46
AK
397static bool misc_enable_needed(void *opaque)
398{
f56e3a14
AF
399 X86CPU *cpu = opaque;
400 CPUX86State *env = &cpu->env;
21e87c46
AK
401
402 return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT;
403}
404
0779caeb
ACL
405static bool feature_control_needed(void *opaque)
406{
407 X86CPU *cpu = opaque;
408 CPUX86State *env = &cpu->env;
409
410 return env->msr_ia32_feature_control != 0;
411}
412
21e87c46
AK
413static const VMStateDescription vmstate_msr_ia32_misc_enable = {
414 .name = "cpu/msr_ia32_misc_enable",
415 .version_id = 1,
416 .minimum_version_id = 1,
5cd8cada 417 .needed = misc_enable_needed,
d49805ae 418 .fields = (VMStateField[]) {
f56e3a14 419 VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU),
21e87c46
AK
420 VMSTATE_END_OF_LIST()
421 }
422};
423
0779caeb
ACL
424static const VMStateDescription vmstate_msr_ia32_feature_control = {
425 .name = "cpu/msr_ia32_feature_control",
426 .version_id = 1,
427 .minimum_version_id = 1,
5cd8cada 428 .needed = feature_control_needed,
d49805ae 429 .fields = (VMStateField[]) {
0779caeb
ACL
430 VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU),
431 VMSTATE_END_OF_LIST()
432 }
433};
434
0d894367
PB
435static bool pmu_enable_needed(void *opaque)
436{
437 X86CPU *cpu = opaque;
438 CPUX86State *env = &cpu->env;
439 int i;
440
441 if (env->msr_fixed_ctr_ctrl || env->msr_global_ctrl ||
442 env->msr_global_status || env->msr_global_ovf_ctrl) {
443 return true;
444 }
445 for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
446 if (env->msr_fixed_counters[i]) {
447 return true;
448 }
449 }
450 for (i = 0; i < MAX_GP_COUNTERS; i++) {
451 if (env->msr_gp_counters[i] || env->msr_gp_evtsel[i]) {
452 return true;
453 }
454 }
455
456 return false;
457}
458
459static const VMStateDescription vmstate_msr_architectural_pmu = {
460 .name = "cpu/msr_architectural_pmu",
461 .version_id = 1,
462 .minimum_version_id = 1,
5cd8cada 463 .needed = pmu_enable_needed,
d49805ae 464 .fields = (VMStateField[]) {
0d894367
PB
465 VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU),
466 VMSTATE_UINT64(env.msr_global_ctrl, X86CPU),
467 VMSTATE_UINT64(env.msr_global_status, X86CPU),
468 VMSTATE_UINT64(env.msr_global_ovf_ctrl, X86CPU),
469 VMSTATE_UINT64_ARRAY(env.msr_fixed_counters, X86CPU, MAX_FIXED_COUNTERS),
470 VMSTATE_UINT64_ARRAY(env.msr_gp_counters, X86CPU, MAX_GP_COUNTERS),
471 VMSTATE_UINT64_ARRAY(env.msr_gp_evtsel, X86CPU, MAX_GP_COUNTERS),
472 VMSTATE_END_OF_LIST()
473 }
474};
475
79e9ebeb
LJ
476static bool mpx_needed(void *opaque)
477{
478 X86CPU *cpu = opaque;
479 CPUX86State *env = &cpu->env;
480 unsigned int i;
481
482 for (i = 0; i < 4; i++) {
483 if (env->bnd_regs[i].lb || env->bnd_regs[i].ub) {
484 return true;
485 }
486 }
487
488 if (env->bndcs_regs.cfgu || env->bndcs_regs.sts) {
489 return true;
490 }
491
492 return !!env->msr_bndcfgs;
493}
494
495static const VMStateDescription vmstate_mpx = {
496 .name = "cpu/mpx",
497 .version_id = 1,
498 .minimum_version_id = 1,
5cd8cada 499 .needed = mpx_needed,
d49805ae 500 .fields = (VMStateField[]) {
79e9ebeb
LJ
501 VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4),
502 VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU),
503 VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU),
504 VMSTATE_UINT64(env.msr_bndcfgs, X86CPU),
505 VMSTATE_END_OF_LIST()
506 }
507};
508
1c90ef26
VR
509static bool hyperv_hypercall_enable_needed(void *opaque)
510{
511 X86CPU *cpu = opaque;
512 CPUX86State *env = &cpu->env;
513
514 return env->msr_hv_hypercall != 0 || env->msr_hv_guest_os_id != 0;
515}
516
517static const VMStateDescription vmstate_msr_hypercall_hypercall = {
518 .name = "cpu/msr_hyperv_hypercall",
519 .version_id = 1,
520 .minimum_version_id = 1,
5cd8cada 521 .needed = hyperv_hypercall_enable_needed,
d49805ae 522 .fields = (VMStateField[]) {
1c90ef26 523 VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU),
466e6e9d 524 VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU),
1c90ef26
VR
525 VMSTATE_END_OF_LIST()
526 }
527};
528
5ef68987
VR
529static bool hyperv_vapic_enable_needed(void *opaque)
530{
531 X86CPU *cpu = opaque;
532 CPUX86State *env = &cpu->env;
533
534 return env->msr_hv_vapic != 0;
535}
536
537static const VMStateDescription vmstate_msr_hyperv_vapic = {
538 .name = "cpu/msr_hyperv_vapic",
539 .version_id = 1,
540 .minimum_version_id = 1,
5cd8cada 541 .needed = hyperv_vapic_enable_needed,
d49805ae 542 .fields = (VMStateField[]) {
5ef68987
VR
543 VMSTATE_UINT64(env.msr_hv_vapic, X86CPU),
544 VMSTATE_END_OF_LIST()
545 }
546};
547
48a5f3bc
VR
548static bool hyperv_time_enable_needed(void *opaque)
549{
550 X86CPU *cpu = opaque;
551 CPUX86State *env = &cpu->env;
552
553 return env->msr_hv_tsc != 0;
554}
555
556static const VMStateDescription vmstate_msr_hyperv_time = {
557 .name = "cpu/msr_hyperv_time",
558 .version_id = 1,
559 .minimum_version_id = 1,
5cd8cada 560 .needed = hyperv_time_enable_needed,
d49805ae 561 .fields = (VMStateField[]) {
48a5f3bc
VR
562 VMSTATE_UINT64(env.msr_hv_tsc, X86CPU),
563 VMSTATE_END_OF_LIST()
564 }
565};
566
f2a53c9e
AS
567static bool hyperv_crash_enable_needed(void *opaque)
568{
569 X86CPU *cpu = opaque;
570 CPUX86State *env = &cpu->env;
571 int i;
572
573 for (i = 0; i < HV_X64_MSR_CRASH_PARAMS; i++) {
574 if (env->msr_hv_crash_params[i]) {
575 return true;
576 }
577 }
578 return false;
579}
580
581static const VMStateDescription vmstate_msr_hyperv_crash = {
582 .name = "cpu/msr_hyperv_crash",
583 .version_id = 1,
584 .minimum_version_id = 1,
585 .needed = hyperv_crash_enable_needed,
586 .fields = (VMStateField[]) {
587 VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params,
588 X86CPU, HV_X64_MSR_CRASH_PARAMS),
589 VMSTATE_END_OF_LIST()
590 }
591};
592
46eb8f98
AS
593static bool hyperv_runtime_enable_needed(void *opaque)
594{
595 X86CPU *cpu = opaque;
596 CPUX86State *env = &cpu->env;
597
51227875
ZY
598 if (!cpu->hyperv_runtime) {
599 return false;
600 }
601
46eb8f98
AS
602 return env->msr_hv_runtime != 0;
603}
604
605static const VMStateDescription vmstate_msr_hyperv_runtime = {
606 .name = "cpu/msr_hyperv_runtime",
607 .version_id = 1,
608 .minimum_version_id = 1,
609 .needed = hyperv_runtime_enable_needed,
610 .fields = (VMStateField[]) {
611 VMSTATE_UINT64(env.msr_hv_runtime, X86CPU),
612 VMSTATE_END_OF_LIST()
613 }
614};
615
866eea9a
AS
616static bool hyperv_synic_enable_needed(void *opaque)
617{
618 X86CPU *cpu = opaque;
619 CPUX86State *env = &cpu->env;
620 int i;
621
622 if (env->msr_hv_synic_control != 0 ||
623 env->msr_hv_synic_evt_page != 0 ||
624 env->msr_hv_synic_msg_page != 0) {
625 return true;
626 }
627
628 for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
629 if (env->msr_hv_synic_sint[i] != 0) {
630 return true;
631 }
632 }
633
634 return false;
635}
636
637static const VMStateDescription vmstate_msr_hyperv_synic = {
638 .name = "cpu/msr_hyperv_synic",
639 .version_id = 1,
640 .minimum_version_id = 1,
641 .needed = hyperv_synic_enable_needed,
642 .fields = (VMStateField[]) {
643 VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU),
644 VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU),
645 VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU),
646 VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU,
647 HV_SYNIC_SINT_COUNT),
648 VMSTATE_END_OF_LIST()
649 }
650};
651
ff99aa64
AS
652static bool hyperv_stimer_enable_needed(void *opaque)
653{
654 X86CPU *cpu = opaque;
655 CPUX86State *env = &cpu->env;
656 int i;
657
658 for (i = 0; i < ARRAY_SIZE(env->msr_hv_stimer_config); i++) {
659 if (env->msr_hv_stimer_config[i] || env->msr_hv_stimer_count[i]) {
660 return true;
661 }
662 }
663 return false;
664}
665
666static const VMStateDescription vmstate_msr_hyperv_stimer = {
667 .name = "cpu/msr_hyperv_stimer",
668 .version_id = 1,
669 .minimum_version_id = 1,
670 .needed = hyperv_stimer_enable_needed,
671 .fields = (VMStateField[]) {
672 VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config,
673 X86CPU, HV_SYNIC_STIMER_COUNT),
674 VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count,
675 X86CPU, HV_SYNIC_STIMER_COUNT),
676 VMSTATE_END_OF_LIST()
677 }
678};
679
9aecd6f8
CP
680static bool avx512_needed(void *opaque)
681{
682 X86CPU *cpu = opaque;
683 CPUX86State *env = &cpu->env;
684 unsigned int i;
685
686 for (i = 0; i < NB_OPMASK_REGS; i++) {
687 if (env->opmask_regs[i]) {
688 return true;
689 }
690 }
691
692 for (i = 0; i < CPU_NB_REGS; i++) {
19cbd87c 693#define ENV_XMM(reg, field) (env->xmm_regs[reg].ZMM_Q(field))
b7711471
PB
694 if (ENV_XMM(i, 4) || ENV_XMM(i, 6) ||
695 ENV_XMM(i, 5) || ENV_XMM(i, 7)) {
9aecd6f8
CP
696 return true;
697 }
698#ifdef TARGET_X86_64
b7711471
PB
699 if (ENV_XMM(i+16, 0) || ENV_XMM(i+16, 1) ||
700 ENV_XMM(i+16, 2) || ENV_XMM(i+16, 3) ||
701 ENV_XMM(i+16, 4) || ENV_XMM(i+16, 5) ||
702 ENV_XMM(i+16, 6) || ENV_XMM(i+16, 7)) {
9aecd6f8
CP
703 return true;
704 }
705#endif
706 }
707
708 return false;
709}
710
711static const VMStateDescription vmstate_avx512 = {
712 .name = "cpu/avx512",
713 .version_id = 1,
714 .minimum_version_id = 1,
5cd8cada 715 .needed = avx512_needed,
9aecd6f8
CP
716 .fields = (VMStateField[]) {
717 VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS),
b7711471 718 VMSTATE_ZMMH_REGS_VARS(env.xmm_regs, X86CPU, 0),
9aecd6f8 719#ifdef TARGET_X86_64
b7711471 720 VMSTATE_Hi16_ZMM_REGS_VARS(env.xmm_regs, X86CPU, 16),
9aecd6f8
CP
721#endif
722 VMSTATE_END_OF_LIST()
723 }
724};
725
18cd2c17
WL
726static bool xss_needed(void *opaque)
727{
728 X86CPU *cpu = opaque;
729 CPUX86State *env = &cpu->env;
730
731 return env->xss != 0;
732}
733
734static const VMStateDescription vmstate_xss = {
735 .name = "cpu/xss",
736 .version_id = 1,
737 .minimum_version_id = 1,
5cd8cada 738 .needed = xss_needed,
18cd2c17
WL
739 .fields = (VMStateField[]) {
740 VMSTATE_UINT64(env.xss, X86CPU),
741 VMSTATE_END_OF_LIST()
742 }
743};
744
f74eefe0
HH
745#ifdef TARGET_X86_64
746static bool pkru_needed(void *opaque)
747{
748 X86CPU *cpu = opaque;
749 CPUX86State *env = &cpu->env;
750
751 return env->pkru != 0;
752}
753
754static const VMStateDescription vmstate_pkru = {
755 .name = "cpu/pkru",
756 .version_id = 1,
757 .minimum_version_id = 1,
758 .needed = pkru_needed,
759 .fields = (VMStateField[]){
760 VMSTATE_UINT32(env.pkru, X86CPU),
761 VMSTATE_END_OF_LIST()
762 }
763};
764#endif
765
36f96c4b
HZ
766static bool tsc_khz_needed(void *opaque)
767{
768 X86CPU *cpu = opaque;
769 CPUX86State *env = &cpu->env;
770 MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
771 PCMachineClass *pcmc = PC_MACHINE_CLASS(mc);
772 return env->tsc_khz && pcmc->save_tsc_khz;
773}
774
775static const VMStateDescription vmstate_tsc_khz = {
776 .name = "cpu/tsc_khz",
777 .version_id = 1,
778 .minimum_version_id = 1,
779 .needed = tsc_khz_needed,
780 .fields = (VMStateField[]) {
781 VMSTATE_INT64(env.tsc_khz, X86CPU),
782 VMSTATE_END_OF_LIST()
783 }
784};
785
87f8b626
AR
786static bool mcg_ext_ctl_needed(void *opaque)
787{
788 X86CPU *cpu = opaque;
789 CPUX86State *env = &cpu->env;
790 return cpu->enable_lmce && env->mcg_ext_ctl;
791}
792
793static const VMStateDescription vmstate_mcg_ext_ctl = {
794 .name = "cpu/mcg_ext_ctl",
795 .version_id = 1,
796 .minimum_version_id = 1,
797 .needed = mcg_ext_ctl_needed,
798 .fields = (VMStateField[]) {
799 VMSTATE_UINT64(env.mcg_ext_ctl, X86CPU),
800 VMSTATE_END_OF_LIST()
801 }
802};
803
68bfd0ad 804VMStateDescription vmstate_x86_cpu = {
0cb892aa 805 .name = "cpu",
f56e3a14 806 .version_id = 12,
08b277ac 807 .minimum_version_id = 11,
0cb892aa 808 .pre_save = cpu_pre_save,
0cb892aa 809 .post_load = cpu_post_load,
d49805ae 810 .fields = (VMStateField[]) {
f56e3a14
AF
811 VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS),
812 VMSTATE_UINTTL(env.eip, X86CPU),
813 VMSTATE_UINTTL(env.eflags, X86CPU),
814 VMSTATE_UINT32(env.hflags, X86CPU),
0cb892aa 815 /* FPU */
f56e3a14
AF
816 VMSTATE_UINT16(env.fpuc, X86CPU),
817 VMSTATE_UINT16(env.fpus_vmstate, X86CPU),
818 VMSTATE_UINT16(env.fptag_vmstate, X86CPU),
819 VMSTATE_UINT16(env.fpregs_format_vmstate, X86CPU),
46baa900
DDAG
820
821 VMSTATE_STRUCT_ARRAY(env.fpregs, X86CPU, 8, 0, vmstate_fpreg, FPReg),
f56e3a14
AF
822
823 VMSTATE_SEGMENT_ARRAY(env.segs, X86CPU, 6),
824 VMSTATE_SEGMENT(env.ldt, X86CPU),
825 VMSTATE_SEGMENT(env.tr, X86CPU),
826 VMSTATE_SEGMENT(env.gdt, X86CPU),
827 VMSTATE_SEGMENT(env.idt, X86CPU),
828
829 VMSTATE_UINT32(env.sysenter_cs, X86CPU),
f56e3a14
AF
830 VMSTATE_UINTTL(env.sysenter_esp, X86CPU),
831 VMSTATE_UINTTL(env.sysenter_eip, X86CPU),
8dd3dca3 832
f56e3a14
AF
833 VMSTATE_UINTTL(env.cr[0], X86CPU),
834 VMSTATE_UINTTL(env.cr[2], X86CPU),
835 VMSTATE_UINTTL(env.cr[3], X86CPU),
836 VMSTATE_UINTTL(env.cr[4], X86CPU),
837 VMSTATE_UINTTL_ARRAY(env.dr, X86CPU, 8),
0cb892aa 838 /* MMU */
f56e3a14 839 VMSTATE_INT32(env.a20_mask, X86CPU),
0cb892aa 840 /* XMM */
f56e3a14 841 VMSTATE_UINT32(env.mxcsr, X86CPU),
a03c3e90 842 VMSTATE_XMM_REGS(env.xmm_regs, X86CPU, 0),
8dd3dca3
AJ
843
844#ifdef TARGET_X86_64
f56e3a14
AF
845 VMSTATE_UINT64(env.efer, X86CPU),
846 VMSTATE_UINT64(env.star, X86CPU),
847 VMSTATE_UINT64(env.lstar, X86CPU),
848 VMSTATE_UINT64(env.cstar, X86CPU),
849 VMSTATE_UINT64(env.fmask, X86CPU),
850 VMSTATE_UINT64(env.kernelgsbase, X86CPU),
8dd3dca3 851#endif
08b277ac
DDAG
852 VMSTATE_UINT32(env.smbase, X86CPU),
853
854 VMSTATE_UINT64(env.pat, X86CPU),
855 VMSTATE_UINT32(env.hflags2, X86CPU),
856
857 VMSTATE_UINT64(env.vm_hsave, X86CPU),
858 VMSTATE_UINT64(env.vm_vmcb, X86CPU),
859 VMSTATE_UINT64(env.tsc_offset, X86CPU),
860 VMSTATE_UINT64(env.intercept, X86CPU),
861 VMSTATE_UINT16(env.intercept_cr_read, X86CPU),
862 VMSTATE_UINT16(env.intercept_cr_write, X86CPU),
863 VMSTATE_UINT16(env.intercept_dr_read, X86CPU),
864 VMSTATE_UINT16(env.intercept_dr_write, X86CPU),
865 VMSTATE_UINT32(env.intercept_exceptions, X86CPU),
866 VMSTATE_UINT8(env.v_tpr, X86CPU),
dd5e3b17 867 /* MTRRs */
08b277ac
DDAG
868 VMSTATE_UINT64_ARRAY(env.mtrr_fixed, X86CPU, 11),
869 VMSTATE_UINT64(env.mtrr_deftype, X86CPU),
d8b5c67b 870 VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8),
0cb892aa 871 /* KVM-related states */
08b277ac
DDAG
872 VMSTATE_INT32(env.interrupt_injected, X86CPU),
873 VMSTATE_UINT32(env.mp_state, X86CPU),
874 VMSTATE_UINT64(env.tsc, X86CPU),
875 VMSTATE_INT32(env.exception_injected, X86CPU),
876 VMSTATE_UINT8(env.soft_interrupt, X86CPU),
877 VMSTATE_UINT8(env.nmi_injected, X86CPU),
878 VMSTATE_UINT8(env.nmi_pending, X86CPU),
879 VMSTATE_UINT8(env.has_error_code, X86CPU),
880 VMSTATE_UINT32(env.sipi_vector, X86CPU),
0cb892aa 881 /* MCE */
08b277ac
DDAG
882 VMSTATE_UINT64(env.mcg_cap, X86CPU),
883 VMSTATE_UINT64(env.mcg_status, X86CPU),
884 VMSTATE_UINT64(env.mcg_ctl, X86CPU),
885 VMSTATE_UINT64_ARRAY(env.mce_banks, X86CPU, MCE_BANKS_DEF * 4),
0cb892aa 886 /* rdtscp */
08b277ac 887 VMSTATE_UINT64(env.tsc_aux, X86CPU),
1a03675d 888 /* KVM pvclock msr */
08b277ac
DDAG
889 VMSTATE_UINT64(env.system_time_msr, X86CPU),
890 VMSTATE_UINT64(env.wall_clock_msr, X86CPU),
f1665b21 891 /* XSAVE related fields */
f56e3a14
AF
892 VMSTATE_UINT64_V(env.xcr0, X86CPU, 12),
893 VMSTATE_UINT64_V(env.xstate_bv, X86CPU, 12),
b7711471 894 VMSTATE_YMMH_REGS_VARS(env.xmm_regs, X86CPU, 0, 12),
0cb892aa 895 VMSTATE_END_OF_LIST()
a0fb002c 896 /* The above list is not sorted /wrt version numbers, watch out! */
f6584ee2 897 },
5cd8cada
JQ
898 .subsections = (const VMStateDescription*[]) {
899 &vmstate_async_pf_msr,
900 &vmstate_pv_eoi_msr,
901 &vmstate_steal_time_msr,
902 &vmstate_fpop_ip_dp,
903 &vmstate_msr_tsc_adjust,
904 &vmstate_msr_tscdeadline,
905 &vmstate_msr_ia32_misc_enable,
906 &vmstate_msr_ia32_feature_control,
907 &vmstate_msr_architectural_pmu,
908 &vmstate_mpx,
909 &vmstate_msr_hypercall_hypercall,
910 &vmstate_msr_hyperv_vapic,
911 &vmstate_msr_hyperv_time,
f2a53c9e 912 &vmstate_msr_hyperv_crash,
46eb8f98 913 &vmstate_msr_hyperv_runtime,
866eea9a 914 &vmstate_msr_hyperv_synic,
ff99aa64 915 &vmstate_msr_hyperv_stimer,
5cd8cada
JQ
916 &vmstate_avx512,
917 &vmstate_xss,
36f96c4b 918 &vmstate_tsc_khz,
f74eefe0
HH
919#ifdef TARGET_X86_64
920 &vmstate_pkru,
921#endif
87f8b626 922 &vmstate_mcg_ext_ctl,
5cd8cada 923 NULL
79c4f6b0 924 }
0cb892aa 925};
This page took 0.91069 seconds and 4 git commands to generate.