]>
Commit | Line | Data |
---|---|---|
b6a0aa05 | 1 | #include "qemu/osdep.h" |
33c11879 | 2 | #include "cpu.h" |
63c91552 | 3 | #include "exec/exec-all.h" |
0d09e41a | 4 | #include "hw/isa/isa.h" |
1e00b8d5 | 5 | #include "migration/cpu.h" |
a9dc68d9 | 6 | #include "kvm/hyperv.h" |
89a289c7 | 7 | #include "hw/i386/x86.h" |
a9dc68d9 | 8 | #include "kvm/kvm_i386.h" |
8dd3dca3 | 9 | |
9c17d615 | 10 | #include "sysemu/kvm.h" |
14a48c1d | 11 | #include "sysemu/tcg.h" |
8dd3dca3 | 12 | |
36f96c4b HZ |
13 | #include "qemu/error-report.h" |
14 | ||
66e6d55b JQ |
15 | static const VMStateDescription vmstate_segment = { |
16 | .name = "segment", | |
17 | .version_id = 1, | |
18 | .minimum_version_id = 1, | |
d49805ae | 19 | .fields = (VMStateField[]) { |
66e6d55b JQ |
20 | VMSTATE_UINT32(selector, SegmentCache), |
21 | VMSTATE_UINTTL(base, SegmentCache), | |
22 | VMSTATE_UINT32(limit, SegmentCache), | |
23 | VMSTATE_UINT32(flags, SegmentCache), | |
24 | VMSTATE_END_OF_LIST() | |
25 | } | |
26 | }; | |
27 | ||
0cb892aa JQ |
28 | #define VMSTATE_SEGMENT(_field, _state) { \ |
29 | .name = (stringify(_field)), \ | |
30 | .size = sizeof(SegmentCache), \ | |
31 | .vmsd = &vmstate_segment, \ | |
32 | .flags = VMS_STRUCT, \ | |
33 | .offset = offsetof(_state, _field) \ | |
34 | + type_check(SegmentCache,typeof_field(_state, _field)) \ | |
8dd3dca3 AJ |
35 | } |
36 | ||
0cb892aa JQ |
37 | #define VMSTATE_SEGMENT_ARRAY(_field, _state, _n) \ |
38 | VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_segment, SegmentCache) | |
8dd3dca3 | 39 | |
fc3b0aa2 JQ |
40 | static const VMStateDescription vmstate_xmm_reg = { |
41 | .name = "xmm_reg", | |
42 | .version_id = 1, | |
43 | .minimum_version_id = 1, | |
d49805ae | 44 | .fields = (VMStateField[]) { |
19cbd87c EH |
45 | VMSTATE_UINT64(ZMM_Q(0), ZMMReg), |
46 | VMSTATE_UINT64(ZMM_Q(1), ZMMReg), | |
fc3b0aa2 JQ |
47 | VMSTATE_END_OF_LIST() |
48 | } | |
49 | }; | |
50 | ||
a03c3e90 PB |
51 | #define VMSTATE_XMM_REGS(_field, _state, _start) \ |
52 | VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \ | |
fa451874 | 53 | vmstate_xmm_reg, ZMMReg) |
fc3b0aa2 | 54 | |
b7711471 | 55 | /* YMMH format is the same as XMM, but for bits 128-255 */ |
f1665b21 SY |
56 | static const VMStateDescription vmstate_ymmh_reg = { |
57 | .name = "ymmh_reg", | |
58 | .version_id = 1, | |
59 | .minimum_version_id = 1, | |
d49805ae | 60 | .fields = (VMStateField[]) { |
19cbd87c EH |
61 | VMSTATE_UINT64(ZMM_Q(2), ZMMReg), |
62 | VMSTATE_UINT64(ZMM_Q(3), ZMMReg), | |
f1665b21 SY |
63 | VMSTATE_END_OF_LIST() |
64 | } | |
65 | }; | |
66 | ||
a03c3e90 PB |
67 | #define VMSTATE_YMMH_REGS_VARS(_field, _state, _start, _v) \ |
68 | VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, _v, \ | |
fa451874 | 69 | vmstate_ymmh_reg, ZMMReg) |
f1665b21 | 70 | |
9aecd6f8 CP |
71 | static const VMStateDescription vmstate_zmmh_reg = { |
72 | .name = "zmmh_reg", | |
73 | .version_id = 1, | |
74 | .minimum_version_id = 1, | |
75 | .fields = (VMStateField[]) { | |
19cbd87c EH |
76 | VMSTATE_UINT64(ZMM_Q(4), ZMMReg), |
77 | VMSTATE_UINT64(ZMM_Q(5), ZMMReg), | |
78 | VMSTATE_UINT64(ZMM_Q(6), ZMMReg), | |
79 | VMSTATE_UINT64(ZMM_Q(7), ZMMReg), | |
9aecd6f8 CP |
80 | VMSTATE_END_OF_LIST() |
81 | } | |
82 | }; | |
83 | ||
a03c3e90 PB |
84 | #define VMSTATE_ZMMH_REGS_VARS(_field, _state, _start) \ |
85 | VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \ | |
fa451874 | 86 | vmstate_zmmh_reg, ZMMReg) |
9aecd6f8 CP |
87 | |
88 | #ifdef TARGET_X86_64 | |
89 | static const VMStateDescription vmstate_hi16_zmm_reg = { | |
90 | .name = "hi16_zmm_reg", | |
91 | .version_id = 1, | |
92 | .minimum_version_id = 1, | |
93 | .fields = (VMStateField[]) { | |
19cbd87c EH |
94 | VMSTATE_UINT64(ZMM_Q(0), ZMMReg), |
95 | VMSTATE_UINT64(ZMM_Q(1), ZMMReg), | |
96 | VMSTATE_UINT64(ZMM_Q(2), ZMMReg), | |
97 | VMSTATE_UINT64(ZMM_Q(3), ZMMReg), | |
98 | VMSTATE_UINT64(ZMM_Q(4), ZMMReg), | |
99 | VMSTATE_UINT64(ZMM_Q(5), ZMMReg), | |
100 | VMSTATE_UINT64(ZMM_Q(6), ZMMReg), | |
101 | VMSTATE_UINT64(ZMM_Q(7), ZMMReg), | |
9aecd6f8 CP |
102 | VMSTATE_END_OF_LIST() |
103 | } | |
104 | }; | |
105 | ||
a03c3e90 PB |
106 | #define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _start) \ |
107 | VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \ | |
fa451874 | 108 | vmstate_hi16_zmm_reg, ZMMReg) |
9aecd6f8 CP |
109 | #endif |
110 | ||
79e9ebeb LJ |
111 | static const VMStateDescription vmstate_bnd_regs = { |
112 | .name = "bnd_regs", | |
113 | .version_id = 1, | |
114 | .minimum_version_id = 1, | |
d49805ae | 115 | .fields = (VMStateField[]) { |
79e9ebeb LJ |
116 | VMSTATE_UINT64(lb, BNDReg), |
117 | VMSTATE_UINT64(ub, BNDReg), | |
118 | VMSTATE_END_OF_LIST() | |
119 | } | |
120 | }; | |
121 | ||
122 | #define VMSTATE_BND_REGS(_field, _state, _n) \ | |
123 | VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_bnd_regs, BNDReg) | |
124 | ||
216c07c3 JQ |
125 | static const VMStateDescription vmstate_mtrr_var = { |
126 | .name = "mtrr_var", | |
127 | .version_id = 1, | |
128 | .minimum_version_id = 1, | |
d49805ae | 129 | .fields = (VMStateField[]) { |
216c07c3 JQ |
130 | VMSTATE_UINT64(base, MTRRVar), |
131 | VMSTATE_UINT64(mask, MTRRVar), | |
132 | VMSTATE_END_OF_LIST() | |
133 | } | |
134 | }; | |
135 | ||
0cb892aa JQ |
136 | #define VMSTATE_MTRR_VARS(_field, _state, _n, _v) \ |
137 | VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar) | |
216c07c3 | 138 | |
ab808276 DDAG |
139 | typedef struct x86_FPReg_tmp { |
140 | FPReg *parent; | |
141 | uint64_t tmp_mant; | |
142 | uint16_t tmp_exp; | |
143 | } x86_FPReg_tmp; | |
144 | ||
db573d2c YZ |
145 | static void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f) |
146 | { | |
147 | CPU_LDoubleU temp; | |
148 | ||
149 | temp.d = f; | |
150 | *pmant = temp.l.lower; | |
151 | *pexp = temp.l.upper; | |
152 | } | |
153 | ||
154 | static floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper) | |
155 | { | |
156 | CPU_LDoubleU temp; | |
157 | ||
158 | temp.l.upper = upper; | |
159 | temp.l.lower = mant; | |
160 | return temp.d; | |
161 | } | |
162 | ||
44b1ff31 | 163 | static int fpreg_pre_save(void *opaque) |
3c8ce630 | 164 | { |
ab808276 | 165 | x86_FPReg_tmp *tmp = opaque; |
3c8ce630 | 166 | |
ab808276 DDAG |
167 | /* we save the real CPU data (in case of MMX usage only 'mant' |
168 | contains the MMX register */ | |
169 | cpu_get_fp80(&tmp->tmp_mant, &tmp->tmp_exp, tmp->parent->d); | |
44b1ff31 DDAG |
170 | |
171 | return 0; | |
3c8ce630 JQ |
172 | } |
173 | ||
ab808276 | 174 | static int fpreg_post_load(void *opaque, int version) |
3c8ce630 | 175 | { |
ab808276 | 176 | x86_FPReg_tmp *tmp = opaque; |
2c21ee76 | 177 | |
ab808276 | 178 | tmp->parent->d = cpu_set_fp80(tmp->tmp_mant, tmp->tmp_exp); |
2c21ee76 | 179 | return 0; |
3c8ce630 JQ |
180 | } |
181 | ||
ab808276 DDAG |
182 | static const VMStateDescription vmstate_fpreg_tmp = { |
183 | .name = "fpreg_tmp", | |
184 | .post_load = fpreg_post_load, | |
185 | .pre_save = fpreg_pre_save, | |
186 | .fields = (VMStateField[]) { | |
187 | VMSTATE_UINT64(tmp_mant, x86_FPReg_tmp), | |
188 | VMSTATE_UINT16(tmp_exp, x86_FPReg_tmp), | |
189 | VMSTATE_END_OF_LIST() | |
190 | } | |
191 | }; | |
192 | ||
193 | static const VMStateDescription vmstate_fpreg = { | |
0cb892aa | 194 | .name = "fpreg", |
ab808276 DDAG |
195 | .fields = (VMStateField[]) { |
196 | VMSTATE_WITH_TMP(FPReg, x86_FPReg_tmp, vmstate_fpreg_tmp), | |
197 | VMSTATE_END_OF_LIST() | |
198 | } | |
0cb892aa JQ |
199 | }; |
200 | ||
44b1ff31 | 201 | static int cpu_pre_save(void *opaque) |
8dd3dca3 | 202 | { |
f56e3a14 AF |
203 | X86CPU *cpu = opaque; |
204 | CPUX86State *env = &cpu->env; | |
0e607a80 | 205 | int i; |
e3126a5c | 206 | env->v_tpr = env->int_ctl & V_TPR_MASK; |
8dd3dca3 | 207 | /* FPU */ |
67b8f419 | 208 | env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; |
cdc0c58f | 209 | env->fptag_vmstate = 0; |
8dd3dca3 | 210 | for(i = 0; i < 8; i++) { |
cdc0c58f | 211 | env->fptag_vmstate |= ((!env->fptags[i]) << i); |
8dd3dca3 AJ |
212 | } |
213 | ||
60a902f1 | 214 | env->fpregs_format_vmstate = 0; |
3e47c249 OW |
215 | |
216 | /* | |
217 | * Real mode guest segments register DPL should be zero. | |
218 | * Older KVM version were setting it wrongly. | |
219 | * Fixing it will allow live migration to host with unrestricted guest | |
220 | * support (otherwise the migration will fail with invalid guest state | |
221 | * error). | |
222 | */ | |
223 | if (!(env->cr[0] & CR0_PE_MASK) && | |
224 | (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) { | |
225 | env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK); | |
226 | env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK); | |
227 | env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK); | |
228 | env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK); | |
229 | env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK); | |
230 | env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK); | |
231 | } | |
232 | ||
ebbfef2f | 233 | #ifdef CONFIG_KVM |
79a197ab LA |
234 | /* |
235 | * In case vCPU may have enabled VMX, we need to make sure kernel have | |
236 | * required capabilities in order to perform migration correctly: | |
237 | * | |
238 | * 1) We must be able to extract vCPU nested-state from KVM. | |
239 | * | |
240 | * 2) In case vCPU is running in guest-mode and it has a pending exception, | |
241 | * we must be able to determine if it's in a pending or injected state. | |
242 | * Note that in case KVM don't have required capability to do so, | |
243 | * a pending/injected exception will always appear as an | |
244 | * injected exception. | |
245 | */ | |
246 | if (kvm_enabled() && cpu_vmx_maybe_enabled(env) && | |
247 | (!env->nested_state || | |
248 | (!kvm_has_exception_payload() && (env->hflags & HF_GUEST_MASK) && | |
249 | env->exception_injected))) { | |
250 | error_report("Guest maybe enabled nested virtualization but kernel " | |
251 | "does not support required capabilities to save vCPU " | |
252 | "nested state"); | |
ebbfef2f LA |
253 | return -EINVAL; |
254 | } | |
255 | #endif | |
256 | ||
fd13f23b LA |
257 | /* |
258 | * When vCPU is running L2 and exception is still pending, | |
259 | * it can potentially be intercepted by L1 hypervisor. | |
260 | * In contrast to an injected exception which cannot be | |
261 | * intercepted anymore. | |
262 | * | |
263 | * Furthermore, when a L2 exception is intercepted by L1 | |
7332a4a4 | 264 | * hypervisor, its exception payload (CR2/DR6 on #PF/#DB) |
fd13f23b LA |
265 | * should not be set yet in the respective vCPU register. |
266 | * Thus, in case an exception is pending, it is | |
267 | * important to save the exception payload seperately. | |
268 | * | |
269 | * Therefore, if an exception is not in a pending state | |
270 | * or vCPU is not in guest-mode, it is not important to | |
271 | * distinguish between a pending and injected exception | |
272 | * and we don't need to store seperately the exception payload. | |
273 | * | |
7332a4a4 | 274 | * In order to preserve better backwards-compatible migration, |
fd13f23b | 275 | * convert a pending exception to an injected exception in |
7332a4a4 | 276 | * case it is not important to distinguish between them |
fd13f23b LA |
277 | * as described above. |
278 | */ | |
279 | if (env->exception_pending && !(env->hflags & HF_GUEST_MASK)) { | |
280 | env->exception_pending = 0; | |
281 | env->exception_injected = 1; | |
282 | ||
283 | if (env->exception_has_payload) { | |
284 | if (env->exception_nr == EXCP01_DB) { | |
285 | env->dr[6] = env->exception_payload; | |
286 | } else if (env->exception_nr == EXCP0E_PAGE) { | |
287 | env->cr[2] = env->exception_payload; | |
288 | } | |
289 | } | |
290 | } | |
291 | ||
44b1ff31 | 292 | return 0; |
c4c38c8c JQ |
293 | } |
294 | ||
468f6581 JQ |
295 | static int cpu_post_load(void *opaque, int version_id) |
296 | { | |
f56e3a14 | 297 | X86CPU *cpu = opaque; |
75a34036 | 298 | CPUState *cs = CPU(cpu); |
f56e3a14 | 299 | CPUX86State *env = &cpu->env; |
468f6581 JQ |
300 | int i; |
301 | ||
36f96c4b HZ |
302 | if (env->tsc_khz && env->user_tsc_khz && |
303 | env->tsc_khz != env->user_tsc_khz) { | |
304 | error_report("Mismatch between user-specified TSC frequency and " | |
305 | "migrated TSC frequency"); | |
306 | return -EINVAL; | |
307 | } | |
308 | ||
46baa900 DDAG |
309 | if (env->fpregs_format_vmstate) { |
310 | error_report("Unsupported old non-softfloat CPU state"); | |
311 | return -EINVAL; | |
312 | } | |
444ba679 OW |
313 | /* |
314 | * Real mode guest segments register DPL should be zero. | |
315 | * Older KVM version were setting it wrongly. | |
316 | * Fixing it will allow live migration from such host that don't have | |
317 | * restricted guest support to a host with unrestricted guest support | |
318 | * (otherwise the migration will fail with invalid guest state | |
319 | * error). | |
320 | */ | |
321 | if (!(env->cr[0] & CR0_PE_MASK) && | |
322 | (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) { | |
323 | env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK); | |
324 | env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK); | |
325 | env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK); | |
326 | env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK); | |
327 | env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK); | |
328 | env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK); | |
329 | } | |
330 | ||
7125c937 PB |
331 | /* Older versions of QEMU incorrectly used CS.DPL as the CPL when |
332 | * running under KVM. This is wrong for conforming code segments. | |
333 | * Luckily, in our implementation the CPL field of hflags is redundant | |
334 | * and we can get the right value from the SS descriptor privilege level. | |
335 | */ | |
336 | env->hflags &= ~HF_CPL_MASK; | |
337 | env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; | |
338 | ||
ebbfef2f LA |
339 | #ifdef CONFIG_KVM |
340 | if ((env->hflags & HF_GUEST_MASK) && | |
341 | (!env->nested_state || | |
342 | !(env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE))) { | |
343 | error_report("vCPU set in guest-mode inconsistent with " | |
344 | "migrated kernel nested state"); | |
345 | return -EINVAL; | |
346 | } | |
347 | #endif | |
348 | ||
fd13f23b LA |
349 | /* |
350 | * There are cases that we can get valid exception_nr with both | |
351 | * exception_pending and exception_injected being cleared. | |
352 | * This can happen in one of the following scenarios: | |
353 | * 1) Source is older QEMU without KVM_CAP_EXCEPTION_PAYLOAD support. | |
354 | * 2) Source is running on kernel without KVM_CAP_EXCEPTION_PAYLOAD support. | |
355 | * 3) "cpu/exception_info" subsection not sent because there is no exception | |
356 | * pending or guest wasn't running L2 (See comment in cpu_pre_save()). | |
357 | * | |
358 | * In those cases, we can just deduce that a valid exception_nr means | |
359 | * we can treat the exception as already injected. | |
360 | */ | |
361 | if ((env->exception_nr != -1) && | |
362 | !env->exception_pending && !env->exception_injected) { | |
363 | env->exception_injected = 1; | |
364 | } | |
365 | ||
468f6581 JQ |
366 | env->fpstt = (env->fpus_vmstate >> 11) & 7; |
367 | env->fpus = env->fpus_vmstate & ~0x3800; | |
368 | env->fptag_vmstate ^= 0xff; | |
369 | for(i = 0; i < 8; i++) { | |
370 | env->fptags[i] = (env->fptag_vmstate >> i) & 1; | |
371 | } | |
1d8ad165 | 372 | if (tcg_enabled()) { |
79c664f6 | 373 | target_ulong dr7; |
1d8ad165 YZ |
374 | update_fp_status(env); |
375 | update_mxcsr_status(env); | |
468f6581 | 376 | |
79c664f6 YZ |
377 | cpu_breakpoint_remove_all(cs, BP_CPU); |
378 | cpu_watchpoint_remove_all(cs, BP_CPU); | |
379 | ||
93d00d0f RH |
380 | /* Indicate all breakpoints disabled, as they are, then |
381 | let the helper re-enable them. */ | |
79c664f6 | 382 | dr7 = env->dr[7]; |
93d00d0f RH |
383 | env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK); |
384 | cpu_x86_update_dr7(env, dr7); | |
428065ce | 385 | } |
d10eb08f | 386 | tlb_flush(cs); |
1e7fbc6d | 387 | return 0; |
468f6581 JQ |
388 | } |
389 | ||
f6584ee2 GN |
390 | static bool async_pf_msr_needed(void *opaque) |
391 | { | |
f56e3a14 | 392 | X86CPU *cpu = opaque; |
f6584ee2 | 393 | |
f56e3a14 | 394 | return cpu->env.async_pf_en_msr != 0; |
f6584ee2 GN |
395 | } |
396 | ||
db5daafa VK |
397 | static bool async_pf_int_msr_needed(void *opaque) |
398 | { | |
399 | X86CPU *cpu = opaque; | |
400 | ||
401 | return cpu->env.async_pf_int_msr != 0; | |
402 | } | |
403 | ||
bc9a839d MT |
404 | static bool pv_eoi_msr_needed(void *opaque) |
405 | { | |
f56e3a14 | 406 | X86CPU *cpu = opaque; |
bc9a839d | 407 | |
f56e3a14 | 408 | return cpu->env.pv_eoi_en_msr != 0; |
bc9a839d MT |
409 | } |
410 | ||
917367aa MT |
411 | static bool steal_time_msr_needed(void *opaque) |
412 | { | |
0e503577 | 413 | X86CPU *cpu = opaque; |
917367aa | 414 | |
0e503577 | 415 | return cpu->env.steal_time_msr != 0; |
917367aa MT |
416 | } |
417 | ||
fd13f23b LA |
418 | static bool exception_info_needed(void *opaque) |
419 | { | |
420 | X86CPU *cpu = opaque; | |
421 | CPUX86State *env = &cpu->env; | |
422 | ||
423 | /* | |
424 | * It is important to save exception-info only in case | |
7332a4a4 | 425 | * we need to distinguish between a pending and injected |
fd13f23b LA |
426 | * exception. Which is only required in case there is a |
427 | * pending exception and vCPU is running L2. | |
428 | * For more info, refer to comment in cpu_pre_save(). | |
429 | */ | |
430 | return env->exception_pending && (env->hflags & HF_GUEST_MASK); | |
431 | } | |
432 | ||
433 | static const VMStateDescription vmstate_exception_info = { | |
434 | .name = "cpu/exception_info", | |
435 | .version_id = 1, | |
436 | .minimum_version_id = 1, | |
437 | .needed = exception_info_needed, | |
438 | .fields = (VMStateField[]) { | |
439 | VMSTATE_UINT8(env.exception_pending, X86CPU), | |
440 | VMSTATE_UINT8(env.exception_injected, X86CPU), | |
441 | VMSTATE_UINT8(env.exception_has_payload, X86CPU), | |
442 | VMSTATE_UINT64(env.exception_payload, X86CPU), | |
443 | VMSTATE_END_OF_LIST() | |
444 | } | |
445 | }; | |
446 | ||
d645e132 MT |
447 | /* Poll control MSR enabled by default */ |
448 | static bool poll_control_msr_needed(void *opaque) | |
449 | { | |
450 | X86CPU *cpu = opaque; | |
451 | ||
452 | return cpu->env.poll_control_msr != 1; | |
453 | } | |
454 | ||
917367aa MT |
455 | static const VMStateDescription vmstate_steal_time_msr = { |
456 | .name = "cpu/steal_time_msr", | |
457 | .version_id = 1, | |
458 | .minimum_version_id = 1, | |
5cd8cada | 459 | .needed = steal_time_msr_needed, |
d49805ae | 460 | .fields = (VMStateField[]) { |
0e503577 | 461 | VMSTATE_UINT64(env.steal_time_msr, X86CPU), |
917367aa MT |
462 | VMSTATE_END_OF_LIST() |
463 | } | |
464 | }; | |
465 | ||
f6584ee2 GN |
466 | static const VMStateDescription vmstate_async_pf_msr = { |
467 | .name = "cpu/async_pf_msr", | |
468 | .version_id = 1, | |
469 | .minimum_version_id = 1, | |
5cd8cada | 470 | .needed = async_pf_msr_needed, |
d49805ae | 471 | .fields = (VMStateField[]) { |
f56e3a14 | 472 | VMSTATE_UINT64(env.async_pf_en_msr, X86CPU), |
f6584ee2 GN |
473 | VMSTATE_END_OF_LIST() |
474 | } | |
475 | }; | |
476 | ||
db5daafa VK |
477 | static const VMStateDescription vmstate_async_pf_int_msr = { |
478 | .name = "cpu/async_pf_int_msr", | |
479 | .version_id = 1, | |
480 | .minimum_version_id = 1, | |
481 | .needed = async_pf_int_msr_needed, | |
482 | .fields = (VMStateField[]) { | |
483 | VMSTATE_UINT64(env.async_pf_int_msr, X86CPU), | |
484 | VMSTATE_END_OF_LIST() | |
485 | } | |
486 | }; | |
487 | ||
bc9a839d MT |
488 | static const VMStateDescription vmstate_pv_eoi_msr = { |
489 | .name = "cpu/async_pv_eoi_msr", | |
490 | .version_id = 1, | |
491 | .minimum_version_id = 1, | |
5cd8cada | 492 | .needed = pv_eoi_msr_needed, |
d49805ae | 493 | .fields = (VMStateField[]) { |
f56e3a14 | 494 | VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU), |
bc9a839d MT |
495 | VMSTATE_END_OF_LIST() |
496 | } | |
497 | }; | |
498 | ||
d645e132 MT |
499 | static const VMStateDescription vmstate_poll_control_msr = { |
500 | .name = "cpu/poll_control_msr", | |
501 | .version_id = 1, | |
502 | .minimum_version_id = 1, | |
503 | .needed = poll_control_msr_needed, | |
504 | .fields = (VMStateField[]) { | |
505 | VMSTATE_UINT64(env.poll_control_msr, X86CPU), | |
506 | VMSTATE_END_OF_LIST() | |
507 | } | |
508 | }; | |
509 | ||
42cc8fa6 JK |
510 | static bool fpop_ip_dp_needed(void *opaque) |
511 | { | |
f56e3a14 AF |
512 | X86CPU *cpu = opaque; |
513 | CPUX86State *env = &cpu->env; | |
42cc8fa6 JK |
514 | |
515 | return env->fpop != 0 || env->fpip != 0 || env->fpdp != 0; | |
516 | } | |
517 | ||
518 | static const VMStateDescription vmstate_fpop_ip_dp = { | |
519 | .name = "cpu/fpop_ip_dp", | |
520 | .version_id = 1, | |
521 | .minimum_version_id = 1, | |
5cd8cada | 522 | .needed = fpop_ip_dp_needed, |
d49805ae | 523 | .fields = (VMStateField[]) { |
f56e3a14 AF |
524 | VMSTATE_UINT16(env.fpop, X86CPU), |
525 | VMSTATE_UINT64(env.fpip, X86CPU), | |
526 | VMSTATE_UINT64(env.fpdp, X86CPU), | |
42cc8fa6 JK |
527 | VMSTATE_END_OF_LIST() |
528 | } | |
529 | }; | |
530 | ||
f28558d3 WA |
531 | static bool tsc_adjust_needed(void *opaque) |
532 | { | |
f56e3a14 AF |
533 | X86CPU *cpu = opaque; |
534 | CPUX86State *env = &cpu->env; | |
f28558d3 WA |
535 | |
536 | return env->tsc_adjust != 0; | |
537 | } | |
538 | ||
539 | static const VMStateDescription vmstate_msr_tsc_adjust = { | |
540 | .name = "cpu/msr_tsc_adjust", | |
541 | .version_id = 1, | |
542 | .minimum_version_id = 1, | |
5cd8cada | 543 | .needed = tsc_adjust_needed, |
d49805ae | 544 | .fields = (VMStateField[]) { |
f56e3a14 | 545 | VMSTATE_UINT64(env.tsc_adjust, X86CPU), |
f28558d3 WA |
546 | VMSTATE_END_OF_LIST() |
547 | } | |
548 | }; | |
549 | ||
e13713db LA |
550 | static bool msr_smi_count_needed(void *opaque) |
551 | { | |
552 | X86CPU *cpu = opaque; | |
553 | CPUX86State *env = &cpu->env; | |
554 | ||
990e0be2 | 555 | return cpu->migrate_smi_count && env->msr_smi_count != 0; |
e13713db LA |
556 | } |
557 | ||
558 | static const VMStateDescription vmstate_msr_smi_count = { | |
559 | .name = "cpu/msr_smi_count", | |
560 | .version_id = 1, | |
561 | .minimum_version_id = 1, | |
562 | .needed = msr_smi_count_needed, | |
563 | .fields = (VMStateField[]) { | |
564 | VMSTATE_UINT64(env.msr_smi_count, X86CPU), | |
565 | VMSTATE_END_OF_LIST() | |
566 | } | |
567 | }; | |
568 | ||
aa82ba54 LJ |
569 | static bool tscdeadline_needed(void *opaque) |
570 | { | |
f56e3a14 AF |
571 | X86CPU *cpu = opaque; |
572 | CPUX86State *env = &cpu->env; | |
aa82ba54 LJ |
573 | |
574 | return env->tsc_deadline != 0; | |
575 | } | |
576 | ||
577 | static const VMStateDescription vmstate_msr_tscdeadline = { | |
578 | .name = "cpu/msr_tscdeadline", | |
579 | .version_id = 1, | |
580 | .minimum_version_id = 1, | |
5cd8cada | 581 | .needed = tscdeadline_needed, |
d49805ae | 582 | .fields = (VMStateField[]) { |
f56e3a14 | 583 | VMSTATE_UINT64(env.tsc_deadline, X86CPU), |
aa82ba54 LJ |
584 | VMSTATE_END_OF_LIST() |
585 | } | |
586 | }; | |
587 | ||
21e87c46 AK |
588 | static bool misc_enable_needed(void *opaque) |
589 | { | |
f56e3a14 AF |
590 | X86CPU *cpu = opaque; |
591 | CPUX86State *env = &cpu->env; | |
21e87c46 AK |
592 | |
593 | return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT; | |
594 | } | |
595 | ||
0779caeb ACL |
596 | static bool feature_control_needed(void *opaque) |
597 | { | |
598 | X86CPU *cpu = opaque; | |
599 | CPUX86State *env = &cpu->env; | |
600 | ||
601 | return env->msr_ia32_feature_control != 0; | |
602 | } | |
603 | ||
21e87c46 AK |
604 | static const VMStateDescription vmstate_msr_ia32_misc_enable = { |
605 | .name = "cpu/msr_ia32_misc_enable", | |
606 | .version_id = 1, | |
607 | .minimum_version_id = 1, | |
5cd8cada | 608 | .needed = misc_enable_needed, |
d49805ae | 609 | .fields = (VMStateField[]) { |
f56e3a14 | 610 | VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU), |
21e87c46 AK |
611 | VMSTATE_END_OF_LIST() |
612 | } | |
613 | }; | |
614 | ||
0779caeb ACL |
615 | static const VMStateDescription vmstate_msr_ia32_feature_control = { |
616 | .name = "cpu/msr_ia32_feature_control", | |
617 | .version_id = 1, | |
618 | .minimum_version_id = 1, | |
5cd8cada | 619 | .needed = feature_control_needed, |
d49805ae | 620 | .fields = (VMStateField[]) { |
0779caeb ACL |
621 | VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU), |
622 | VMSTATE_END_OF_LIST() | |
623 | } | |
624 | }; | |
625 | ||
0d894367 PB |
626 | static bool pmu_enable_needed(void *opaque) |
627 | { | |
628 | X86CPU *cpu = opaque; | |
629 | CPUX86State *env = &cpu->env; | |
630 | int i; | |
631 | ||
632 | if (env->msr_fixed_ctr_ctrl || env->msr_global_ctrl || | |
633 | env->msr_global_status || env->msr_global_ovf_ctrl) { | |
634 | return true; | |
635 | } | |
636 | for (i = 0; i < MAX_FIXED_COUNTERS; i++) { | |
637 | if (env->msr_fixed_counters[i]) { | |
638 | return true; | |
639 | } | |
640 | } | |
641 | for (i = 0; i < MAX_GP_COUNTERS; i++) { | |
642 | if (env->msr_gp_counters[i] || env->msr_gp_evtsel[i]) { | |
643 | return true; | |
644 | } | |
645 | } | |
646 | ||
647 | return false; | |
648 | } | |
649 | ||
650 | static const VMStateDescription vmstate_msr_architectural_pmu = { | |
651 | .name = "cpu/msr_architectural_pmu", | |
652 | .version_id = 1, | |
653 | .minimum_version_id = 1, | |
5cd8cada | 654 | .needed = pmu_enable_needed, |
d49805ae | 655 | .fields = (VMStateField[]) { |
0d894367 PB |
656 | VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU), |
657 | VMSTATE_UINT64(env.msr_global_ctrl, X86CPU), | |
658 | VMSTATE_UINT64(env.msr_global_status, X86CPU), | |
659 | VMSTATE_UINT64(env.msr_global_ovf_ctrl, X86CPU), | |
660 | VMSTATE_UINT64_ARRAY(env.msr_fixed_counters, X86CPU, MAX_FIXED_COUNTERS), | |
661 | VMSTATE_UINT64_ARRAY(env.msr_gp_counters, X86CPU, MAX_GP_COUNTERS), | |
662 | VMSTATE_UINT64_ARRAY(env.msr_gp_evtsel, X86CPU, MAX_GP_COUNTERS), | |
663 | VMSTATE_END_OF_LIST() | |
664 | } | |
665 | }; | |
666 | ||
79e9ebeb LJ |
667 | static bool mpx_needed(void *opaque) |
668 | { | |
669 | X86CPU *cpu = opaque; | |
670 | CPUX86State *env = &cpu->env; | |
671 | unsigned int i; | |
672 | ||
673 | for (i = 0; i < 4; i++) { | |
674 | if (env->bnd_regs[i].lb || env->bnd_regs[i].ub) { | |
675 | return true; | |
676 | } | |
677 | } | |
678 | ||
679 | if (env->bndcs_regs.cfgu || env->bndcs_regs.sts) { | |
680 | return true; | |
681 | } | |
682 | ||
683 | return !!env->msr_bndcfgs; | |
684 | } | |
685 | ||
686 | static const VMStateDescription vmstate_mpx = { | |
687 | .name = "cpu/mpx", | |
688 | .version_id = 1, | |
689 | .minimum_version_id = 1, | |
5cd8cada | 690 | .needed = mpx_needed, |
d49805ae | 691 | .fields = (VMStateField[]) { |
79e9ebeb LJ |
692 | VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4), |
693 | VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU), | |
694 | VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU), | |
695 | VMSTATE_UINT64(env.msr_bndcfgs, X86CPU), | |
696 | VMSTATE_END_OF_LIST() | |
697 | } | |
698 | }; | |
699 | ||
1c90ef26 VR |
700 | static bool hyperv_hypercall_enable_needed(void *opaque) |
701 | { | |
702 | X86CPU *cpu = opaque; | |
703 | CPUX86State *env = &cpu->env; | |
704 | ||
705 | return env->msr_hv_hypercall != 0 || env->msr_hv_guest_os_id != 0; | |
706 | } | |
707 | ||
816d20c9 | 708 | static const VMStateDescription vmstate_msr_hyperv_hypercall = { |
1c90ef26 VR |
709 | .name = "cpu/msr_hyperv_hypercall", |
710 | .version_id = 1, | |
711 | .minimum_version_id = 1, | |
5cd8cada | 712 | .needed = hyperv_hypercall_enable_needed, |
d49805ae | 713 | .fields = (VMStateField[]) { |
1c90ef26 | 714 | VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU), |
466e6e9d | 715 | VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU), |
1c90ef26 VR |
716 | VMSTATE_END_OF_LIST() |
717 | } | |
718 | }; | |
719 | ||
5ef68987 VR |
720 | static bool hyperv_vapic_enable_needed(void *opaque) |
721 | { | |
722 | X86CPU *cpu = opaque; | |
723 | CPUX86State *env = &cpu->env; | |
724 | ||
725 | return env->msr_hv_vapic != 0; | |
726 | } | |
727 | ||
728 | static const VMStateDescription vmstate_msr_hyperv_vapic = { | |
729 | .name = "cpu/msr_hyperv_vapic", | |
730 | .version_id = 1, | |
731 | .minimum_version_id = 1, | |
5cd8cada | 732 | .needed = hyperv_vapic_enable_needed, |
d49805ae | 733 | .fields = (VMStateField[]) { |
5ef68987 VR |
734 | VMSTATE_UINT64(env.msr_hv_vapic, X86CPU), |
735 | VMSTATE_END_OF_LIST() | |
736 | } | |
737 | }; | |
738 | ||
48a5f3bc VR |
739 | static bool hyperv_time_enable_needed(void *opaque) |
740 | { | |
741 | X86CPU *cpu = opaque; | |
742 | CPUX86State *env = &cpu->env; | |
743 | ||
744 | return env->msr_hv_tsc != 0; | |
745 | } | |
746 | ||
747 | static const VMStateDescription vmstate_msr_hyperv_time = { | |
748 | .name = "cpu/msr_hyperv_time", | |
749 | .version_id = 1, | |
750 | .minimum_version_id = 1, | |
5cd8cada | 751 | .needed = hyperv_time_enable_needed, |
d49805ae | 752 | .fields = (VMStateField[]) { |
48a5f3bc VR |
753 | VMSTATE_UINT64(env.msr_hv_tsc, X86CPU), |
754 | VMSTATE_END_OF_LIST() | |
755 | } | |
756 | }; | |
757 | ||
f2a53c9e AS |
758 | static bool hyperv_crash_enable_needed(void *opaque) |
759 | { | |
760 | X86CPU *cpu = opaque; | |
761 | CPUX86State *env = &cpu->env; | |
762 | int i; | |
763 | ||
5e953812 | 764 | for (i = 0; i < HV_CRASH_PARAMS; i++) { |
f2a53c9e AS |
765 | if (env->msr_hv_crash_params[i]) { |
766 | return true; | |
767 | } | |
768 | } | |
769 | return false; | |
770 | } | |
771 | ||
772 | static const VMStateDescription vmstate_msr_hyperv_crash = { | |
773 | .name = "cpu/msr_hyperv_crash", | |
774 | .version_id = 1, | |
775 | .minimum_version_id = 1, | |
776 | .needed = hyperv_crash_enable_needed, | |
777 | .fields = (VMStateField[]) { | |
5e953812 | 778 | VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params, X86CPU, HV_CRASH_PARAMS), |
f2a53c9e AS |
779 | VMSTATE_END_OF_LIST() |
780 | } | |
781 | }; | |
782 | ||
46eb8f98 AS |
783 | static bool hyperv_runtime_enable_needed(void *opaque) |
784 | { | |
785 | X86CPU *cpu = opaque; | |
786 | CPUX86State *env = &cpu->env; | |
787 | ||
2d384d7c | 788 | if (!hyperv_feat_enabled(cpu, HYPERV_FEAT_RUNTIME)) { |
51227875 ZY |
789 | return false; |
790 | } | |
791 | ||
46eb8f98 AS |
792 | return env->msr_hv_runtime != 0; |
793 | } | |
794 | ||
795 | static const VMStateDescription vmstate_msr_hyperv_runtime = { | |
796 | .name = "cpu/msr_hyperv_runtime", | |
797 | .version_id = 1, | |
798 | .minimum_version_id = 1, | |
799 | .needed = hyperv_runtime_enable_needed, | |
800 | .fields = (VMStateField[]) { | |
801 | VMSTATE_UINT64(env.msr_hv_runtime, X86CPU), | |
802 | VMSTATE_END_OF_LIST() | |
803 | } | |
804 | }; | |
805 | ||
866eea9a AS |
806 | static bool hyperv_synic_enable_needed(void *opaque) |
807 | { | |
808 | X86CPU *cpu = opaque; | |
809 | CPUX86State *env = &cpu->env; | |
810 | int i; | |
811 | ||
812 | if (env->msr_hv_synic_control != 0 || | |
813 | env->msr_hv_synic_evt_page != 0 || | |
814 | env->msr_hv_synic_msg_page != 0) { | |
815 | return true; | |
816 | } | |
817 | ||
818 | for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) { | |
819 | if (env->msr_hv_synic_sint[i] != 0) { | |
820 | return true; | |
821 | } | |
822 | } | |
823 | ||
824 | return false; | |
825 | } | |
826 | ||
606c34bf RK |
827 | static int hyperv_synic_post_load(void *opaque, int version_id) |
828 | { | |
829 | X86CPU *cpu = opaque; | |
830 | hyperv_x86_synic_update(cpu); | |
831 | return 0; | |
832 | } | |
833 | ||
866eea9a AS |
834 | static const VMStateDescription vmstate_msr_hyperv_synic = { |
835 | .name = "cpu/msr_hyperv_synic", | |
836 | .version_id = 1, | |
837 | .minimum_version_id = 1, | |
838 | .needed = hyperv_synic_enable_needed, | |
606c34bf | 839 | .post_load = hyperv_synic_post_load, |
866eea9a AS |
840 | .fields = (VMStateField[]) { |
841 | VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU), | |
842 | VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU), | |
843 | VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU), | |
5e953812 | 844 | VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU, HV_SINT_COUNT), |
866eea9a AS |
845 | VMSTATE_END_OF_LIST() |
846 | } | |
847 | }; | |
848 | ||
ff99aa64 AS |
849 | static bool hyperv_stimer_enable_needed(void *opaque) |
850 | { | |
851 | X86CPU *cpu = opaque; | |
852 | CPUX86State *env = &cpu->env; | |
853 | int i; | |
854 | ||
855 | for (i = 0; i < ARRAY_SIZE(env->msr_hv_stimer_config); i++) { | |
856 | if (env->msr_hv_stimer_config[i] || env->msr_hv_stimer_count[i]) { | |
857 | return true; | |
858 | } | |
859 | } | |
860 | return false; | |
861 | } | |
862 | ||
863 | static const VMStateDescription vmstate_msr_hyperv_stimer = { | |
864 | .name = "cpu/msr_hyperv_stimer", | |
865 | .version_id = 1, | |
866 | .minimum_version_id = 1, | |
867 | .needed = hyperv_stimer_enable_needed, | |
868 | .fields = (VMStateField[]) { | |
5e953812 RK |
869 | VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config, X86CPU, |
870 | HV_STIMER_COUNT), | |
871 | VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count, X86CPU, HV_STIMER_COUNT), | |
ff99aa64 AS |
872 | VMSTATE_END_OF_LIST() |
873 | } | |
874 | }; | |
875 | ||
ba6a4fd9 VK |
876 | static bool hyperv_reenlightenment_enable_needed(void *opaque) |
877 | { | |
878 | X86CPU *cpu = opaque; | |
879 | CPUX86State *env = &cpu->env; | |
880 | ||
881 | return env->msr_hv_reenlightenment_control != 0 || | |
882 | env->msr_hv_tsc_emulation_control != 0 || | |
883 | env->msr_hv_tsc_emulation_status != 0; | |
884 | } | |
885 | ||
561dbb41 VK |
886 | static int hyperv_reenlightenment_post_load(void *opaque, int version_id) |
887 | { | |
888 | X86CPU *cpu = opaque; | |
889 | CPUX86State *env = &cpu->env; | |
890 | ||
891 | /* | |
892 | * KVM doesn't fully support re-enlightenment notifications so we need to | |
893 | * make sure TSC frequency doesn't change upon migration. | |
894 | */ | |
895 | if ((env->msr_hv_reenlightenment_control & HV_REENLIGHTENMENT_ENABLE_BIT) && | |
896 | !env->user_tsc_khz) { | |
897 | error_report("Guest enabled re-enlightenment notifications, " | |
898 | "'tsc-frequency=' has to be specified"); | |
899 | return -EINVAL; | |
900 | } | |
901 | ||
902 | return 0; | |
903 | } | |
904 | ||
ba6a4fd9 VK |
905 | static const VMStateDescription vmstate_msr_hyperv_reenlightenment = { |
906 | .name = "cpu/msr_hyperv_reenlightenment", | |
907 | .version_id = 1, | |
908 | .minimum_version_id = 1, | |
909 | .needed = hyperv_reenlightenment_enable_needed, | |
561dbb41 | 910 | .post_load = hyperv_reenlightenment_post_load, |
ba6a4fd9 VK |
911 | .fields = (VMStateField[]) { |
912 | VMSTATE_UINT64(env.msr_hv_reenlightenment_control, X86CPU), | |
913 | VMSTATE_UINT64(env.msr_hv_tsc_emulation_control, X86CPU), | |
914 | VMSTATE_UINT64(env.msr_hv_tsc_emulation_status, X86CPU), | |
915 | VMSTATE_END_OF_LIST() | |
916 | } | |
917 | }; | |
918 | ||
9aecd6f8 CP |
919 | static bool avx512_needed(void *opaque) |
920 | { | |
921 | X86CPU *cpu = opaque; | |
922 | CPUX86State *env = &cpu->env; | |
923 | unsigned int i; | |
924 | ||
925 | for (i = 0; i < NB_OPMASK_REGS; i++) { | |
926 | if (env->opmask_regs[i]) { | |
927 | return true; | |
928 | } | |
929 | } | |
930 | ||
931 | for (i = 0; i < CPU_NB_REGS; i++) { | |
19cbd87c | 932 | #define ENV_XMM(reg, field) (env->xmm_regs[reg].ZMM_Q(field)) |
b7711471 PB |
933 | if (ENV_XMM(i, 4) || ENV_XMM(i, 6) || |
934 | ENV_XMM(i, 5) || ENV_XMM(i, 7)) { | |
9aecd6f8 CP |
935 | return true; |
936 | } | |
937 | #ifdef TARGET_X86_64 | |
b7711471 PB |
938 | if (ENV_XMM(i+16, 0) || ENV_XMM(i+16, 1) || |
939 | ENV_XMM(i+16, 2) || ENV_XMM(i+16, 3) || | |
940 | ENV_XMM(i+16, 4) || ENV_XMM(i+16, 5) || | |
941 | ENV_XMM(i+16, 6) || ENV_XMM(i+16, 7)) { | |
9aecd6f8 CP |
942 | return true; |
943 | } | |
944 | #endif | |
945 | } | |
946 | ||
947 | return false; | |
948 | } | |
949 | ||
950 | static const VMStateDescription vmstate_avx512 = { | |
951 | .name = "cpu/avx512", | |
952 | .version_id = 1, | |
953 | .minimum_version_id = 1, | |
5cd8cada | 954 | .needed = avx512_needed, |
9aecd6f8 CP |
955 | .fields = (VMStateField[]) { |
956 | VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS), | |
b7711471 | 957 | VMSTATE_ZMMH_REGS_VARS(env.xmm_regs, X86CPU, 0), |
9aecd6f8 | 958 | #ifdef TARGET_X86_64 |
b7711471 | 959 | VMSTATE_Hi16_ZMM_REGS_VARS(env.xmm_regs, X86CPU, 16), |
9aecd6f8 CP |
960 | #endif |
961 | VMSTATE_END_OF_LIST() | |
962 | } | |
963 | }; | |
964 | ||
18cd2c17 WL |
965 | static bool xss_needed(void *opaque) |
966 | { | |
967 | X86CPU *cpu = opaque; | |
968 | CPUX86State *env = &cpu->env; | |
969 | ||
970 | return env->xss != 0; | |
971 | } | |
972 | ||
973 | static const VMStateDescription vmstate_xss = { | |
974 | .name = "cpu/xss", | |
975 | .version_id = 1, | |
976 | .minimum_version_id = 1, | |
5cd8cada | 977 | .needed = xss_needed, |
18cd2c17 WL |
978 | .fields = (VMStateField[]) { |
979 | VMSTATE_UINT64(env.xss, X86CPU), | |
980 | VMSTATE_END_OF_LIST() | |
981 | } | |
982 | }; | |
983 | ||
65087997 TX |
984 | static bool umwait_needed(void *opaque) |
985 | { | |
986 | X86CPU *cpu = opaque; | |
987 | CPUX86State *env = &cpu->env; | |
988 | ||
989 | return env->umwait != 0; | |
990 | } | |
991 | ||
992 | static const VMStateDescription vmstate_umwait = { | |
993 | .name = "cpu/umwait", | |
994 | .version_id = 1, | |
995 | .minimum_version_id = 1, | |
996 | .needed = umwait_needed, | |
997 | .fields = (VMStateField[]) { | |
998 | VMSTATE_UINT32(env.umwait, X86CPU), | |
999 | VMSTATE_END_OF_LIST() | |
1000 | } | |
1001 | }; | |
1002 | ||
f74eefe0 HH |
1003 | static bool pkru_needed(void *opaque) |
1004 | { | |
1005 | X86CPU *cpu = opaque; | |
1006 | CPUX86State *env = &cpu->env; | |
1007 | ||
1008 | return env->pkru != 0; | |
1009 | } | |
1010 | ||
1011 | static const VMStateDescription vmstate_pkru = { | |
1012 | .name = "cpu/pkru", | |
1013 | .version_id = 1, | |
1014 | .minimum_version_id = 1, | |
1015 | .needed = pkru_needed, | |
1016 | .fields = (VMStateField[]){ | |
1017 | VMSTATE_UINT32(env.pkru, X86CPU), | |
1018 | VMSTATE_END_OF_LIST() | |
1019 | } | |
1020 | }; | |
e7e7bdab PB |
1021 | |
1022 | static bool pkrs_needed(void *opaque) | |
1023 | { | |
1024 | X86CPU *cpu = opaque; | |
1025 | CPUX86State *env = &cpu->env; | |
1026 | ||
1027 | return env->pkrs != 0; | |
1028 | } | |
1029 | ||
1030 | static const VMStateDescription vmstate_pkrs = { | |
1031 | .name = "cpu/pkrs", | |
1032 | .version_id = 1, | |
1033 | .minimum_version_id = 1, | |
1034 | .needed = pkrs_needed, | |
1035 | .fields = (VMStateField[]){ | |
1036 | VMSTATE_UINT32(env.pkrs, X86CPU), | |
1037 | VMSTATE_END_OF_LIST() | |
1038 | } | |
1039 | }; | |
f74eefe0 | 1040 | |
36f96c4b HZ |
1041 | static bool tsc_khz_needed(void *opaque) |
1042 | { | |
1043 | X86CPU *cpu = opaque; | |
1044 | CPUX86State *env = &cpu->env; | |
1045 | MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); | |
2f34ebf2 LM |
1046 | X86MachineClass *x86mc = X86_MACHINE_CLASS(mc); |
1047 | return env->tsc_khz && x86mc->save_tsc_khz; | |
36f96c4b HZ |
1048 | } |
1049 | ||
1050 | static const VMStateDescription vmstate_tsc_khz = { | |
1051 | .name = "cpu/tsc_khz", | |
1052 | .version_id = 1, | |
1053 | .minimum_version_id = 1, | |
1054 | .needed = tsc_khz_needed, | |
1055 | .fields = (VMStateField[]) { | |
1056 | VMSTATE_INT64(env.tsc_khz, X86CPU), | |
1057 | VMSTATE_END_OF_LIST() | |
1058 | } | |
1059 | }; | |
1060 | ||
ebbfef2f LA |
1061 | #ifdef CONFIG_KVM |
1062 | ||
1063 | static bool vmx_vmcs12_needed(void *opaque) | |
1064 | { | |
1065 | struct kvm_nested_state *nested_state = opaque; | |
1066 | return (nested_state->size > | |
1067 | offsetof(struct kvm_nested_state, data.vmx[0].vmcs12)); | |
1068 | } | |
1069 | ||
1070 | static const VMStateDescription vmstate_vmx_vmcs12 = { | |
1071 | .name = "cpu/kvm_nested_state/vmx/vmcs12", | |
1072 | .version_id = 1, | |
1073 | .minimum_version_id = 1, | |
1074 | .needed = vmx_vmcs12_needed, | |
1075 | .fields = (VMStateField[]) { | |
1076 | VMSTATE_UINT8_ARRAY(data.vmx[0].vmcs12, | |
1077 | struct kvm_nested_state, | |
1078 | KVM_STATE_NESTED_VMX_VMCS_SIZE), | |
1079 | VMSTATE_END_OF_LIST() | |
1080 | } | |
1081 | }; | |
1082 | ||
1083 | static bool vmx_shadow_vmcs12_needed(void *opaque) | |
1084 | { | |
1085 | struct kvm_nested_state *nested_state = opaque; | |
1086 | return (nested_state->size > | |
1087 | offsetof(struct kvm_nested_state, data.vmx[0].shadow_vmcs12)); | |
1088 | } | |
1089 | ||
1090 | static const VMStateDescription vmstate_vmx_shadow_vmcs12 = { | |
1091 | .name = "cpu/kvm_nested_state/vmx/shadow_vmcs12", | |
1092 | .version_id = 1, | |
1093 | .minimum_version_id = 1, | |
1094 | .needed = vmx_shadow_vmcs12_needed, | |
1095 | .fields = (VMStateField[]) { | |
1096 | VMSTATE_UINT8_ARRAY(data.vmx[0].shadow_vmcs12, | |
1097 | struct kvm_nested_state, | |
1098 | KVM_STATE_NESTED_VMX_VMCS_SIZE), | |
1099 | VMSTATE_END_OF_LIST() | |
1100 | } | |
1101 | }; | |
1102 | ||
1103 | static bool vmx_nested_state_needed(void *opaque) | |
1104 | { | |
1105 | struct kvm_nested_state *nested_state = opaque; | |
1106 | ||
ec7b1bbd LA |
1107 | return (nested_state->format == KVM_STATE_NESTED_FORMAT_VMX && |
1108 | nested_state->hdr.vmx.vmxon_pa != -1ull); | |
ebbfef2f LA |
1109 | } |
1110 | ||
1111 | static const VMStateDescription vmstate_vmx_nested_state = { | |
1112 | .name = "cpu/kvm_nested_state/vmx", | |
1113 | .version_id = 1, | |
1114 | .minimum_version_id = 1, | |
1115 | .needed = vmx_nested_state_needed, | |
1116 | .fields = (VMStateField[]) { | |
1117 | VMSTATE_U64(hdr.vmx.vmxon_pa, struct kvm_nested_state), | |
1118 | VMSTATE_U64(hdr.vmx.vmcs12_pa, struct kvm_nested_state), | |
1119 | VMSTATE_U16(hdr.vmx.smm.flags, struct kvm_nested_state), | |
1120 | VMSTATE_END_OF_LIST() | |
1121 | }, | |
1122 | .subsections = (const VMStateDescription*[]) { | |
1123 | &vmstate_vmx_vmcs12, | |
1124 | &vmstate_vmx_shadow_vmcs12, | |
1125 | NULL, | |
1126 | } | |
1127 | }; | |
1128 | ||
b16c0e20 PB |
1129 | static bool svm_nested_state_needed(void *opaque) |
1130 | { | |
1131 | struct kvm_nested_state *nested_state = opaque; | |
1132 | ||
1133 | /* | |
1134 | * HF_GUEST_MASK and HF2_GIF_MASK are already serialized | |
1135 | * via hflags and hflags2, all that's left is the opaque | |
1136 | * nested state blob. | |
1137 | */ | |
1138 | return (nested_state->format == KVM_STATE_NESTED_FORMAT_SVM && | |
1139 | nested_state->size > offsetof(struct kvm_nested_state, data)); | |
1140 | } | |
1141 | ||
1142 | static const VMStateDescription vmstate_svm_nested_state = { | |
1143 | .name = "cpu/kvm_nested_state/svm", | |
1144 | .version_id = 1, | |
1145 | .minimum_version_id = 1, | |
1146 | .needed = svm_nested_state_needed, | |
1147 | .fields = (VMStateField[]) { | |
1148 | VMSTATE_U64(hdr.svm.vmcb_pa, struct kvm_nested_state), | |
1149 | VMSTATE_UINT8_ARRAY(data.svm[0].vmcb12, | |
1150 | struct kvm_nested_state, | |
1151 | KVM_STATE_NESTED_SVM_VMCB_SIZE), | |
1152 | VMSTATE_END_OF_LIST() | |
1153 | } | |
1154 | }; | |
1155 | ||
ebbfef2f LA |
1156 | static bool nested_state_needed(void *opaque) |
1157 | { | |
1158 | X86CPU *cpu = opaque; | |
1159 | CPUX86State *env = &cpu->env; | |
1160 | ||
1161 | return (env->nested_state && | |
b16c0e20 PB |
1162 | (vmx_nested_state_needed(env->nested_state) || |
1163 | svm_nested_state_needed(env->nested_state))); | |
ebbfef2f LA |
1164 | } |
1165 | ||
1166 | static int nested_state_post_load(void *opaque, int version_id) | |
1167 | { | |
1168 | X86CPU *cpu = opaque; | |
1169 | CPUX86State *env = &cpu->env; | |
1170 | struct kvm_nested_state *nested_state = env->nested_state; | |
1171 | int min_nested_state_len = offsetof(struct kvm_nested_state, data); | |
1172 | int max_nested_state_len = kvm_max_nested_state_length(); | |
1173 | ||
1174 | /* | |
1175 | * If our kernel don't support setting nested state | |
1176 | * and we have received nested state from migration stream, | |
1177 | * we need to fail migration | |
1178 | */ | |
1179 | if (max_nested_state_len <= 0) { | |
1180 | error_report("Received nested state when kernel cannot restore it"); | |
1181 | return -EINVAL; | |
1182 | } | |
1183 | ||
1184 | /* | |
1185 | * Verify that the size of received nested_state struct | |
1186 | * at least cover required header and is not larger | |
1187 | * than the max size that our kernel support | |
1188 | */ | |
1189 | if (nested_state->size < min_nested_state_len) { | |
1190 | error_report("Received nested state size less than min: " | |
1191 | "len=%d, min=%d", | |
1192 | nested_state->size, min_nested_state_len); | |
1193 | return -EINVAL; | |
1194 | } | |
1195 | if (nested_state->size > max_nested_state_len) { | |
cba42d61 | 1196 | error_report("Received unsupported nested state size: " |
ebbfef2f LA |
1197 | "nested_state->size=%d, max=%d", |
1198 | nested_state->size, max_nested_state_len); | |
1199 | return -EINVAL; | |
1200 | } | |
1201 | ||
1202 | /* Verify format is valid */ | |
1203 | if ((nested_state->format != KVM_STATE_NESTED_FORMAT_VMX) && | |
1204 | (nested_state->format != KVM_STATE_NESTED_FORMAT_SVM)) { | |
1205 | error_report("Received invalid nested state format: %d", | |
1206 | nested_state->format); | |
1207 | return -EINVAL; | |
1208 | } | |
1209 | ||
1210 | return 0; | |
1211 | } | |
1212 | ||
1213 | static const VMStateDescription vmstate_kvm_nested_state = { | |
1214 | .name = "cpu/kvm_nested_state", | |
1215 | .version_id = 1, | |
1216 | .minimum_version_id = 1, | |
1217 | .fields = (VMStateField[]) { | |
1218 | VMSTATE_U16(flags, struct kvm_nested_state), | |
1219 | VMSTATE_U16(format, struct kvm_nested_state), | |
1220 | VMSTATE_U32(size, struct kvm_nested_state), | |
1221 | VMSTATE_END_OF_LIST() | |
1222 | }, | |
1223 | .subsections = (const VMStateDescription*[]) { | |
1224 | &vmstate_vmx_nested_state, | |
b16c0e20 | 1225 | &vmstate_svm_nested_state, |
ebbfef2f LA |
1226 | NULL |
1227 | } | |
1228 | }; | |
1229 | ||
1230 | static const VMStateDescription vmstate_nested_state = { | |
1231 | .name = "cpu/nested_state", | |
1232 | .version_id = 1, | |
1233 | .minimum_version_id = 1, | |
1234 | .needed = nested_state_needed, | |
1235 | .post_load = nested_state_post_load, | |
1236 | .fields = (VMStateField[]) { | |
1237 | VMSTATE_STRUCT_POINTER(env.nested_state, X86CPU, | |
1238 | vmstate_kvm_nested_state, | |
1239 | struct kvm_nested_state), | |
1240 | VMSTATE_END_OF_LIST() | |
1241 | } | |
1242 | }; | |
1243 | ||
1244 | #endif | |
1245 | ||
87f8b626 AR |
1246 | static bool mcg_ext_ctl_needed(void *opaque) |
1247 | { | |
1248 | X86CPU *cpu = opaque; | |
1249 | CPUX86State *env = &cpu->env; | |
1250 | return cpu->enable_lmce && env->mcg_ext_ctl; | |
1251 | } | |
1252 | ||
1253 | static const VMStateDescription vmstate_mcg_ext_ctl = { | |
1254 | .name = "cpu/mcg_ext_ctl", | |
1255 | .version_id = 1, | |
1256 | .minimum_version_id = 1, | |
1257 | .needed = mcg_ext_ctl_needed, | |
1258 | .fields = (VMStateField[]) { | |
1259 | VMSTATE_UINT64(env.mcg_ext_ctl, X86CPU), | |
1260 | VMSTATE_END_OF_LIST() | |
1261 | } | |
1262 | }; | |
1263 | ||
a33a2cfe PB |
1264 | static bool spec_ctrl_needed(void *opaque) |
1265 | { | |
1266 | X86CPU *cpu = opaque; | |
1267 | CPUX86State *env = &cpu->env; | |
1268 | ||
1269 | return env->spec_ctrl != 0; | |
1270 | } | |
1271 | ||
1272 | static const VMStateDescription vmstate_spec_ctrl = { | |
1273 | .name = "cpu/spec_ctrl", | |
1274 | .version_id = 1, | |
1275 | .minimum_version_id = 1, | |
1276 | .needed = spec_ctrl_needed, | |
1277 | .fields = (VMStateField[]){ | |
1278 | VMSTATE_UINT64(env.spec_ctrl, X86CPU), | |
1279 | VMSTATE_END_OF_LIST() | |
1280 | } | |
1281 | }; | |
1282 | ||
cabf9862 ML |
1283 | |
1284 | static bool amd_tsc_scale_msr_needed(void *opaque) | |
1285 | { | |
1286 | X86CPU *cpu = opaque; | |
1287 | CPUX86State *env = &cpu->env; | |
1288 | ||
1289 | return (env->features[FEAT_SVM] & CPUID_SVM_TSCSCALE); | |
1290 | } | |
1291 | ||
1292 | static const VMStateDescription amd_tsc_scale_msr_ctrl = { | |
1293 | .name = "cpu/amd_tsc_scale_msr", | |
1294 | .version_id = 1, | |
1295 | .minimum_version_id = 1, | |
1296 | .needed = amd_tsc_scale_msr_needed, | |
1297 | .fields = (VMStateField[]){ | |
1298 | VMSTATE_UINT64(env.amd_tsc_scale_msr, X86CPU), | |
1299 | VMSTATE_END_OF_LIST() | |
1300 | } | |
1301 | }; | |
1302 | ||
1303 | ||
b77146e9 CP |
1304 | static bool intel_pt_enable_needed(void *opaque) |
1305 | { | |
1306 | X86CPU *cpu = opaque; | |
1307 | CPUX86State *env = &cpu->env; | |
1308 | int i; | |
1309 | ||
1310 | if (env->msr_rtit_ctrl || env->msr_rtit_status || | |
1311 | env->msr_rtit_output_base || env->msr_rtit_output_mask || | |
1312 | env->msr_rtit_cr3_match) { | |
1313 | return true; | |
1314 | } | |
1315 | ||
1316 | for (i = 0; i < MAX_RTIT_ADDRS; i++) { | |
1317 | if (env->msr_rtit_addrs[i]) { | |
1318 | return true; | |
1319 | } | |
1320 | } | |
1321 | ||
1322 | return false; | |
1323 | } | |
1324 | ||
1325 | static const VMStateDescription vmstate_msr_intel_pt = { | |
1326 | .name = "cpu/intel_pt", | |
1327 | .version_id = 1, | |
1328 | .minimum_version_id = 1, | |
1329 | .needed = intel_pt_enable_needed, | |
1330 | .fields = (VMStateField[]) { | |
1331 | VMSTATE_UINT64(env.msr_rtit_ctrl, X86CPU), | |
1332 | VMSTATE_UINT64(env.msr_rtit_status, X86CPU), | |
1333 | VMSTATE_UINT64(env.msr_rtit_output_base, X86CPU), | |
1334 | VMSTATE_UINT64(env.msr_rtit_output_mask, X86CPU), | |
1335 | VMSTATE_UINT64(env.msr_rtit_cr3_match, X86CPU), | |
1336 | VMSTATE_UINT64_ARRAY(env.msr_rtit_addrs, X86CPU, MAX_RTIT_ADDRS), | |
1337 | VMSTATE_END_OF_LIST() | |
1338 | } | |
1339 | }; | |
1340 | ||
cfeea0c0 KRW |
1341 | static bool virt_ssbd_needed(void *opaque) |
1342 | { | |
1343 | X86CPU *cpu = opaque; | |
1344 | CPUX86State *env = &cpu->env; | |
1345 | ||
1346 | return env->virt_ssbd != 0; | |
1347 | } | |
1348 | ||
1349 | static const VMStateDescription vmstate_msr_virt_ssbd = { | |
1350 | .name = "cpu/virt_ssbd", | |
1351 | .version_id = 1, | |
1352 | .minimum_version_id = 1, | |
1353 | .needed = virt_ssbd_needed, | |
1354 | .fields = (VMStateField[]){ | |
1355 | VMSTATE_UINT64(env.virt_ssbd, X86CPU), | |
1356 | VMSTATE_END_OF_LIST() | |
1357 | } | |
1358 | }; | |
1359 | ||
fe441054 JK |
1360 | static bool svm_npt_needed(void *opaque) |
1361 | { | |
1362 | X86CPU *cpu = opaque; | |
1363 | CPUX86State *env = &cpu->env; | |
1364 | ||
1365 | return !!(env->hflags2 & HF2_NPT_MASK); | |
1366 | } | |
1367 | ||
1368 | static const VMStateDescription vmstate_svm_npt = { | |
1369 | .name = "cpu/svn_npt", | |
1370 | .version_id = 1, | |
1371 | .minimum_version_id = 1, | |
1372 | .needed = svm_npt_needed, | |
1373 | .fields = (VMStateField[]){ | |
1374 | VMSTATE_UINT64(env.nested_cr3, X86CPU), | |
1375 | VMSTATE_UINT32(env.nested_pg_mode, X86CPU), | |
1376 | VMSTATE_END_OF_LIST() | |
1377 | } | |
1378 | }; | |
1379 | ||
e3126a5c LL |
1380 | static bool svm_guest_needed(void *opaque) |
1381 | { | |
1382 | X86CPU *cpu = opaque; | |
1383 | CPUX86State *env = &cpu->env; | |
1384 | ||
1385 | return tcg_enabled() && env->int_ctl; | |
1386 | } | |
1387 | ||
1388 | static const VMStateDescription vmstate_svm_guest = { | |
1389 | .name = "cpu/svm_guest", | |
1390 | .version_id = 1, | |
1391 | .minimum_version_id = 1, | |
1392 | .needed = svm_guest_needed, | |
1393 | .fields = (VMStateField[]){ | |
1394 | VMSTATE_UINT32(env.int_ctl, X86CPU), | |
1395 | VMSTATE_END_OF_LIST() | |
1396 | } | |
1397 | }; | |
1398 | ||
89a44a10 PD |
1399 | #ifndef TARGET_X86_64 |
1400 | static bool intel_efer32_needed(void *opaque) | |
1401 | { | |
1402 | X86CPU *cpu = opaque; | |
1403 | CPUX86State *env = &cpu->env; | |
1404 | ||
1405 | return env->efer != 0; | |
1406 | } | |
1407 | ||
1408 | static const VMStateDescription vmstate_efer32 = { | |
1409 | .name = "cpu/efer32", | |
1410 | .version_id = 1, | |
1411 | .minimum_version_id = 1, | |
1412 | .needed = intel_efer32_needed, | |
1413 | .fields = (VMStateField[]) { | |
1414 | VMSTATE_UINT64(env.efer, X86CPU), | |
1415 | VMSTATE_END_OF_LIST() | |
1416 | } | |
1417 | }; | |
1418 | #endif | |
1419 | ||
2a9758c5 PB |
1420 | static bool msr_tsx_ctrl_needed(void *opaque) |
1421 | { | |
1422 | X86CPU *cpu = opaque; | |
1423 | CPUX86State *env = &cpu->env; | |
1424 | ||
1425 | return env->features[FEAT_ARCH_CAPABILITIES] & ARCH_CAP_TSX_CTRL_MSR; | |
1426 | } | |
1427 | ||
1428 | static const VMStateDescription vmstate_msr_tsx_ctrl = { | |
1429 | .name = "cpu/msr_tsx_ctrl", | |
1430 | .version_id = 1, | |
1431 | .minimum_version_id = 1, | |
1432 | .needed = msr_tsx_ctrl_needed, | |
1433 | .fields = (VMStateField[]) { | |
1434 | VMSTATE_UINT32(env.tsx_ctrl, X86CPU), | |
1435 | VMSTATE_END_OF_LIST() | |
1436 | } | |
1437 | }; | |
1438 | ||
db888065 SC |
1439 | static bool intel_sgx_msrs_needed(void *opaque) |
1440 | { | |
1441 | X86CPU *cpu = opaque; | |
1442 | CPUX86State *env = &cpu->env; | |
1443 | ||
1444 | return !!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC); | |
1445 | } | |
1446 | ||
1447 | static const VMStateDescription vmstate_msr_intel_sgx = { | |
1448 | .name = "cpu/intel_sgx", | |
1449 | .version_id = 1, | |
1450 | .minimum_version_id = 1, | |
1451 | .needed = intel_sgx_msrs_needed, | |
1452 | .fields = (VMStateField[]) { | |
1453 | VMSTATE_UINT64_ARRAY(env.msr_ia32_sgxlepubkeyhash, X86CPU, 4), | |
8f515d38 ML |
1454 | VMSTATE_END_OF_LIST() |
1455 | } | |
1456 | }; | |
1457 | ||
1458 | static bool pdptrs_needed(void *opaque) | |
1459 | { | |
1460 | X86CPU *cpu = opaque; | |
1461 | CPUX86State *env = &cpu->env; | |
1462 | return env->pdptrs_valid; | |
1463 | } | |
1464 | ||
1465 | static int pdptrs_post_load(void *opaque, int version_id) | |
1466 | { | |
1467 | X86CPU *cpu = opaque; | |
1468 | CPUX86State *env = &cpu->env; | |
1469 | env->pdptrs_valid = true; | |
1470 | return 0; | |
1471 | } | |
1472 | ||
1473 | ||
1474 | static const VMStateDescription vmstate_pdptrs = { | |
1475 | .name = "cpu/pdptrs", | |
1476 | .version_id = 1, | |
1477 | .minimum_version_id = 1, | |
1478 | .needed = pdptrs_needed, | |
1479 | .post_load = pdptrs_post_load, | |
1480 | .fields = (VMStateField[]) { | |
1481 | VMSTATE_UINT64_ARRAY(env.pdptrs, X86CPU, 4), | |
db888065 SC |
1482 | VMSTATE_END_OF_LIST() |
1483 | } | |
1484 | }; | |
1485 | ||
cdec2b75 ZG |
1486 | static bool xfd_msrs_needed(void *opaque) |
1487 | { | |
1488 | X86CPU *cpu = opaque; | |
1489 | CPUX86State *env = &cpu->env; | |
1490 | ||
1491 | return !!(env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD); | |
1492 | } | |
1493 | ||
1494 | static const VMStateDescription vmstate_msr_xfd = { | |
1495 | .name = "cpu/msr_xfd", | |
1496 | .version_id = 1, | |
1497 | .minimum_version_id = 1, | |
1498 | .needed = xfd_msrs_needed, | |
1499 | .fields = (VMStateField[]) { | |
1500 | VMSTATE_UINT64(env.msr_xfd, X86CPU), | |
1501 | VMSTATE_UINT64(env.msr_xfd_err, X86CPU), | |
1502 | VMSTATE_END_OF_LIST() | |
1503 | } | |
1504 | }; | |
1505 | ||
1506 | #ifdef TARGET_X86_64 | |
1507 | static bool amx_xtile_needed(void *opaque) | |
1508 | { | |
1509 | X86CPU *cpu = opaque; | |
1510 | CPUX86State *env = &cpu->env; | |
1511 | ||
1512 | return !!(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_AMX_TILE); | |
1513 | } | |
1514 | ||
1515 | static const VMStateDescription vmstate_amx_xtile = { | |
1516 | .name = "cpu/intel_amx_xtile", | |
1517 | .version_id = 1, | |
1518 | .minimum_version_id = 1, | |
1519 | .needed = amx_xtile_needed, | |
1520 | .fields = (VMStateField[]) { | |
1521 | VMSTATE_UINT8_ARRAY(env.xtilecfg, X86CPU, 64), | |
1522 | VMSTATE_UINT8_ARRAY(env.xtiledata, X86CPU, 8192), | |
1523 | VMSTATE_END_OF_LIST() | |
1524 | } | |
1525 | }; | |
1526 | #endif | |
1527 | ||
ac701a4f | 1528 | const VMStateDescription vmstate_x86_cpu = { |
0cb892aa | 1529 | .name = "cpu", |
f56e3a14 | 1530 | .version_id = 12, |
08b277ac | 1531 | .minimum_version_id = 11, |
0cb892aa | 1532 | .pre_save = cpu_pre_save, |
0cb892aa | 1533 | .post_load = cpu_post_load, |
d49805ae | 1534 | .fields = (VMStateField[]) { |
f56e3a14 AF |
1535 | VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS), |
1536 | VMSTATE_UINTTL(env.eip, X86CPU), | |
1537 | VMSTATE_UINTTL(env.eflags, X86CPU), | |
1538 | VMSTATE_UINT32(env.hflags, X86CPU), | |
0cb892aa | 1539 | /* FPU */ |
f56e3a14 AF |
1540 | VMSTATE_UINT16(env.fpuc, X86CPU), |
1541 | VMSTATE_UINT16(env.fpus_vmstate, X86CPU), | |
1542 | VMSTATE_UINT16(env.fptag_vmstate, X86CPU), | |
1543 | VMSTATE_UINT16(env.fpregs_format_vmstate, X86CPU), | |
46baa900 DDAG |
1544 | |
1545 | VMSTATE_STRUCT_ARRAY(env.fpregs, X86CPU, 8, 0, vmstate_fpreg, FPReg), | |
f56e3a14 AF |
1546 | |
1547 | VMSTATE_SEGMENT_ARRAY(env.segs, X86CPU, 6), | |
1548 | VMSTATE_SEGMENT(env.ldt, X86CPU), | |
1549 | VMSTATE_SEGMENT(env.tr, X86CPU), | |
1550 | VMSTATE_SEGMENT(env.gdt, X86CPU), | |
1551 | VMSTATE_SEGMENT(env.idt, X86CPU), | |
1552 | ||
1553 | VMSTATE_UINT32(env.sysenter_cs, X86CPU), | |
f56e3a14 AF |
1554 | VMSTATE_UINTTL(env.sysenter_esp, X86CPU), |
1555 | VMSTATE_UINTTL(env.sysenter_eip, X86CPU), | |
8dd3dca3 | 1556 | |
f56e3a14 AF |
1557 | VMSTATE_UINTTL(env.cr[0], X86CPU), |
1558 | VMSTATE_UINTTL(env.cr[2], X86CPU), | |
1559 | VMSTATE_UINTTL(env.cr[3], X86CPU), | |
1560 | VMSTATE_UINTTL(env.cr[4], X86CPU), | |
1561 | VMSTATE_UINTTL_ARRAY(env.dr, X86CPU, 8), | |
0cb892aa | 1562 | /* MMU */ |
f56e3a14 | 1563 | VMSTATE_INT32(env.a20_mask, X86CPU), |
0cb892aa | 1564 | /* XMM */ |
f56e3a14 | 1565 | VMSTATE_UINT32(env.mxcsr, X86CPU), |
a03c3e90 | 1566 | VMSTATE_XMM_REGS(env.xmm_regs, X86CPU, 0), |
8dd3dca3 AJ |
1567 | |
1568 | #ifdef TARGET_X86_64 | |
f56e3a14 AF |
1569 | VMSTATE_UINT64(env.efer, X86CPU), |
1570 | VMSTATE_UINT64(env.star, X86CPU), | |
1571 | VMSTATE_UINT64(env.lstar, X86CPU), | |
1572 | VMSTATE_UINT64(env.cstar, X86CPU), | |
1573 | VMSTATE_UINT64(env.fmask, X86CPU), | |
1574 | VMSTATE_UINT64(env.kernelgsbase, X86CPU), | |
8dd3dca3 | 1575 | #endif |
08b277ac DDAG |
1576 | VMSTATE_UINT32(env.smbase, X86CPU), |
1577 | ||
1578 | VMSTATE_UINT64(env.pat, X86CPU), | |
1579 | VMSTATE_UINT32(env.hflags2, X86CPU), | |
1580 | ||
1581 | VMSTATE_UINT64(env.vm_hsave, X86CPU), | |
1582 | VMSTATE_UINT64(env.vm_vmcb, X86CPU), | |
1583 | VMSTATE_UINT64(env.tsc_offset, X86CPU), | |
1584 | VMSTATE_UINT64(env.intercept, X86CPU), | |
1585 | VMSTATE_UINT16(env.intercept_cr_read, X86CPU), | |
1586 | VMSTATE_UINT16(env.intercept_cr_write, X86CPU), | |
1587 | VMSTATE_UINT16(env.intercept_dr_read, X86CPU), | |
1588 | VMSTATE_UINT16(env.intercept_dr_write, X86CPU), | |
1589 | VMSTATE_UINT32(env.intercept_exceptions, X86CPU), | |
1590 | VMSTATE_UINT8(env.v_tpr, X86CPU), | |
dd5e3b17 | 1591 | /* MTRRs */ |
08b277ac DDAG |
1592 | VMSTATE_UINT64_ARRAY(env.mtrr_fixed, X86CPU, 11), |
1593 | VMSTATE_UINT64(env.mtrr_deftype, X86CPU), | |
d8b5c67b | 1594 | VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8), |
0cb892aa | 1595 | /* KVM-related states */ |
08b277ac DDAG |
1596 | VMSTATE_INT32(env.interrupt_injected, X86CPU), |
1597 | VMSTATE_UINT32(env.mp_state, X86CPU), | |
1598 | VMSTATE_UINT64(env.tsc, X86CPU), | |
fd13f23b | 1599 | VMSTATE_INT32(env.exception_nr, X86CPU), |
08b277ac DDAG |
1600 | VMSTATE_UINT8(env.soft_interrupt, X86CPU), |
1601 | VMSTATE_UINT8(env.nmi_injected, X86CPU), | |
1602 | VMSTATE_UINT8(env.nmi_pending, X86CPU), | |
1603 | VMSTATE_UINT8(env.has_error_code, X86CPU), | |
1604 | VMSTATE_UINT32(env.sipi_vector, X86CPU), | |
0cb892aa | 1605 | /* MCE */ |
08b277ac DDAG |
1606 | VMSTATE_UINT64(env.mcg_cap, X86CPU), |
1607 | VMSTATE_UINT64(env.mcg_status, X86CPU), | |
1608 | VMSTATE_UINT64(env.mcg_ctl, X86CPU), | |
1609 | VMSTATE_UINT64_ARRAY(env.mce_banks, X86CPU, MCE_BANKS_DEF * 4), | |
0cb892aa | 1610 | /* rdtscp */ |
08b277ac | 1611 | VMSTATE_UINT64(env.tsc_aux, X86CPU), |
1a03675d | 1612 | /* KVM pvclock msr */ |
08b277ac DDAG |
1613 | VMSTATE_UINT64(env.system_time_msr, X86CPU), |
1614 | VMSTATE_UINT64(env.wall_clock_msr, X86CPU), | |
f1665b21 | 1615 | /* XSAVE related fields */ |
f56e3a14 AF |
1616 | VMSTATE_UINT64_V(env.xcr0, X86CPU, 12), |
1617 | VMSTATE_UINT64_V(env.xstate_bv, X86CPU, 12), | |
b7711471 | 1618 | VMSTATE_YMMH_REGS_VARS(env.xmm_regs, X86CPU, 0, 12), |
0cb892aa | 1619 | VMSTATE_END_OF_LIST() |
a0fb002c | 1620 | /* The above list is not sorted /wrt version numbers, watch out! */ |
f6584ee2 | 1621 | }, |
5cd8cada | 1622 | .subsections = (const VMStateDescription*[]) { |
fd13f23b | 1623 | &vmstate_exception_info, |
5cd8cada | 1624 | &vmstate_async_pf_msr, |
db5daafa | 1625 | &vmstate_async_pf_int_msr, |
5cd8cada JQ |
1626 | &vmstate_pv_eoi_msr, |
1627 | &vmstate_steal_time_msr, | |
d645e132 | 1628 | &vmstate_poll_control_msr, |
5cd8cada JQ |
1629 | &vmstate_fpop_ip_dp, |
1630 | &vmstate_msr_tsc_adjust, | |
1631 | &vmstate_msr_tscdeadline, | |
1632 | &vmstate_msr_ia32_misc_enable, | |
1633 | &vmstate_msr_ia32_feature_control, | |
1634 | &vmstate_msr_architectural_pmu, | |
1635 | &vmstate_mpx, | |
816d20c9 | 1636 | &vmstate_msr_hyperv_hypercall, |
5cd8cada JQ |
1637 | &vmstate_msr_hyperv_vapic, |
1638 | &vmstate_msr_hyperv_time, | |
f2a53c9e | 1639 | &vmstate_msr_hyperv_crash, |
46eb8f98 | 1640 | &vmstate_msr_hyperv_runtime, |
866eea9a | 1641 | &vmstate_msr_hyperv_synic, |
ff99aa64 | 1642 | &vmstate_msr_hyperv_stimer, |
ba6a4fd9 | 1643 | &vmstate_msr_hyperv_reenlightenment, |
5cd8cada JQ |
1644 | &vmstate_avx512, |
1645 | &vmstate_xss, | |
65087997 | 1646 | &vmstate_umwait, |
36f96c4b | 1647 | &vmstate_tsc_khz, |
e13713db | 1648 | &vmstate_msr_smi_count, |
f74eefe0 | 1649 | &vmstate_pkru, |
e7e7bdab | 1650 | &vmstate_pkrs, |
a33a2cfe | 1651 | &vmstate_spec_ctrl, |
cabf9862 | 1652 | &amd_tsc_scale_msr_ctrl, |
87f8b626 | 1653 | &vmstate_mcg_ext_ctl, |
b77146e9 | 1654 | &vmstate_msr_intel_pt, |
cfeea0c0 | 1655 | &vmstate_msr_virt_ssbd, |
fe441054 | 1656 | &vmstate_svm_npt, |
e3126a5c | 1657 | &vmstate_svm_guest, |
89a44a10 PD |
1658 | #ifndef TARGET_X86_64 |
1659 | &vmstate_efer32, | |
ebbfef2f LA |
1660 | #endif |
1661 | #ifdef CONFIG_KVM | |
1662 | &vmstate_nested_state, | |
89a44a10 | 1663 | #endif |
2a9758c5 | 1664 | &vmstate_msr_tsx_ctrl, |
db888065 | 1665 | &vmstate_msr_intel_sgx, |
8f515d38 | 1666 | &vmstate_pdptrs, |
cdec2b75 ZG |
1667 | &vmstate_msr_xfd, |
1668 | #ifdef TARGET_X86_64 | |
1669 | &vmstate_amx_xtile, | |
1670 | #endif | |
5cd8cada | 1671 | NULL |
79c4f6b0 | 1672 | } |
0cb892aa | 1673 | }; |