.name = "segment",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT32(selector, SegmentCache),
VMSTATE_UINTTL(base, SegmentCache),
VMSTATE_UINT32(limit, SegmentCache),
.name = "xmm_reg",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(XMM_Q(0), XMMReg),
VMSTATE_UINT64(XMM_Q(1), XMMReg),
VMSTATE_END_OF_LIST()
.name = "ymmh_reg",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(XMM_Q(0), XMMReg),
VMSTATE_UINT64(XMM_Q(1), XMMReg),
VMSTATE_END_OF_LIST()
#define VMSTATE_YMMH_REGS_VARS(_field, _state, _n, _v) \
VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_ymmh_reg, XMMReg)
+static const VMStateDescription vmstate_zmmh_reg = {
+ .name = "zmmh_reg",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(YMM_Q(0), YMMReg),
+ VMSTATE_UINT64(YMM_Q(1), YMMReg),
+ VMSTATE_UINT64(YMM_Q(2), YMMReg),
+ VMSTATE_UINT64(YMM_Q(3), YMMReg),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_ZMMH_REGS_VARS(_field, _state, _n) \
+ VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_zmmh_reg, YMMReg)
+
+#ifdef TARGET_X86_64
+static const VMStateDescription vmstate_hi16_zmm_reg = {
+ .name = "hi16_zmm_reg",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _n) \
+ VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_hi16_zmm_reg, ZMMReg)
+#endif
+
+static const VMStateDescription vmstate_bnd_regs = {
+ .name = "bnd_regs",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(lb, BNDReg),
+ VMSTATE_UINT64(ub, BNDReg),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_BND_REGS(_field, _state, _n) \
+ VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_bnd_regs, BNDReg)
+
static const VMStateDescription vmstate_mtrr_var = {
.name = "mtrr_var",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(base, MTRRVar),
VMSTATE_UINT64(mask, MTRRVar),
VMSTATE_END_OF_LIST()
static int cpu_post_load(void *opaque, int version_id)
{
X86CPU *cpu = opaque;
+ CPUState *cs = CPU(cpu);
CPUX86State *env = &cpu->env;
int i;
env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
}
- /* XXX: restore FPU round state */
+ /* Older versions of QEMU incorrectly used CS.DPL as the CPL when
+ * running under KVM. This is wrong for conforming code segments.
+ * Luckily, in our implementation the CPL field of hflags is redundant
+ * and we can get the right value from the SS descriptor privilege level.
+ */
+ env->hflags &= ~HF_CPL_MASK;
+ env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
+
env->fpstt = (env->fpus_vmstate >> 11) & 7;
env->fpus = env->fpus_vmstate & ~0x3800;
env->fptag_vmstate ^= 0xff;
for(i = 0; i < 8; i++) {
env->fptags[i] = (env->fptag_vmstate >> i) & 1;
}
+ update_fp_status(env);
- cpu_breakpoint_remove_all(env, BP_CPU);
- cpu_watchpoint_remove_all(env, BP_CPU);
+ cpu_breakpoint_remove_all(cs, BP_CPU);
+ cpu_watchpoint_remove_all(cs, BP_CPU);
for (i = 0; i < DR7_MAX_BP; i++) {
hw_breakpoint_insert(env, i);
}
- tlb_flush(env, 1);
+ tlb_flush(cs, 1);
return 0;
}
.name = "cpu/steal_time_msr",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(env.steal_time_msr, X86CPU),
VMSTATE_END_OF_LIST()
}
.name = "cpu/async_pf_msr",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(env.async_pf_en_msr, X86CPU),
VMSTATE_END_OF_LIST()
}
.name = "cpu/async_pv_eoi_msr",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU),
VMSTATE_END_OF_LIST()
}
.name = "cpu/fpop_ip_dp",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT16(env.fpop, X86CPU),
VMSTATE_UINT64(env.fpip, X86CPU),
VMSTATE_UINT64(env.fpdp, X86CPU),
.name = "cpu/msr_tsc_adjust",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField[]) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(env.tsc_adjust, X86CPU),
VMSTATE_END_OF_LIST()
}
.name = "cpu/msr_tscdeadline",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(env.tsc_deadline, X86CPU),
VMSTATE_END_OF_LIST()
}
.name = "cpu/msr_ia32_misc_enable",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU),
VMSTATE_END_OF_LIST()
}
.name = "cpu/msr_ia32_feature_control",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU),
VMSTATE_END_OF_LIST()
}
.name = "cpu/msr_architectural_pmu",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU),
VMSTATE_UINT64(env.msr_global_ctrl, X86CPU),
VMSTATE_UINT64(env.msr_global_status, X86CPU),
}
};
-const VMStateDescription vmstate_x86_cpu = {
+static bool mpx_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ unsigned int i;
+
+ for (i = 0; i < 4; i++) {
+ if (env->bnd_regs[i].lb || env->bnd_regs[i].ub) {
+ return true;
+ }
+ }
+
+ if (env->bndcs_regs.cfgu || env->bndcs_regs.sts) {
+ return true;
+ }
+
+ return !!env->msr_bndcfgs;
+}
+
+static const VMStateDescription vmstate_mpx = {
+ .name = "cpu/mpx",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4),
+ VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU),
+ VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU),
+ VMSTATE_UINT64(env.msr_bndcfgs, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool hyperv_hypercall_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->msr_hv_hypercall != 0 || env->msr_hv_guest_os_id != 0;
+}
+
+static const VMStateDescription vmstate_msr_hypercall_hypercall = {
+ .name = "cpu/msr_hyperv_hypercall",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU),
+ VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool hyperv_vapic_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->msr_hv_vapic != 0;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_vapic = {
+ .name = "cpu/msr_hyperv_vapic",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_hv_vapic, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool hyperv_time_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->msr_hv_tsc != 0;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_time = {
+ .name = "cpu/msr_hyperv_time",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_hv_tsc, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool avx512_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ unsigned int i;
+
+ for (i = 0; i < NB_OPMASK_REGS; i++) {
+ if (env->opmask_regs[i]) {
+ return true;
+ }
+ }
+
+ for (i = 0; i < CPU_NB_REGS; i++) {
+#define ENV_ZMMH(reg, field) (env->zmmh_regs[reg].YMM_Q(field))
+ if (ENV_ZMMH(i, 0) || ENV_ZMMH(i, 1) ||
+ ENV_ZMMH(i, 2) || ENV_ZMMH(i, 3)) {
+ return true;
+ }
+#ifdef TARGET_X86_64
+#define ENV_Hi16_ZMM(reg, field) (env->hi16_zmm_regs[reg].ZMM_Q(field))
+ if (ENV_Hi16_ZMM(i, 0) || ENV_Hi16_ZMM(i, 1) ||
+ ENV_Hi16_ZMM(i, 2) || ENV_Hi16_ZMM(i, 3) ||
+ ENV_Hi16_ZMM(i, 4) || ENV_Hi16_ZMM(i, 5) ||
+ ENV_Hi16_ZMM(i, 6) || ENV_Hi16_ZMM(i, 7)) {
+ return true;
+ }
+#endif
+ }
+
+ return false;
+}
+
+static const VMStateDescription vmstate_avx512 = {
+ .name = "cpu/avx512",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS),
+ VMSTATE_ZMMH_REGS_VARS(env.zmmh_regs, X86CPU, CPU_NB_REGS),
+#ifdef TARGET_X86_64
+ VMSTATE_Hi16_ZMM_REGS_VARS(env.hi16_zmm_regs, X86CPU, CPU_NB_REGS),
+#endif
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool xss_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->xss != 0;
+}
+
+static const VMStateDescription vmstate_xss = {
+ .name = "cpu/xss",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.xss, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+VMStateDescription vmstate_x86_cpu = {
.name = "cpu",
.version_id = 12,
.minimum_version_id = 3,
- .minimum_version_id_old = 3,
.pre_save = cpu_pre_save,
.post_load = cpu_post_load,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS),
VMSTATE_UINTTL(env.eip, X86CPU),
VMSTATE_UINTTL(env.eflags, X86CPU),
/* MTRRs */
VMSTATE_UINT64_ARRAY_V(env.mtrr_fixed, X86CPU, 11, 8),
VMSTATE_UINT64_V(env.mtrr_deftype, X86CPU, 8),
- VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, 8, 8),
+ VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8),
/* KVM-related states */
VMSTATE_INT32_V(env.interrupt_injected, X86CPU, 9),
VMSTATE_UINT32_V(env.mp_state, X86CPU, 9),
}, {
.vmsd = &vmstate_msr_architectural_pmu,
.needed = pmu_enable_needed,
+ } , {
+ .vmsd = &vmstate_mpx,
+ .needed = mpx_needed,
+ }, {
+ .vmsd = &vmstate_msr_hypercall_hypercall,
+ .needed = hyperv_hypercall_enable_needed,
+ }, {
+ .vmsd = &vmstate_msr_hyperv_vapic,
+ .needed = hyperv_vapic_enable_needed,
+ }, {
+ .vmsd = &vmstate_msr_hyperv_time,
+ .needed = hyperv_time_enable_needed,
+ }, {
+ .vmsd = &vmstate_avx512,
+ .needed = avx512_needed,
+ }, {
+ .vmsd = &vmstate_xss,
+ .needed = xss_needed,
} , {
/* empty */
}