.name = "segment",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT32(selector, SegmentCache),
VMSTATE_UINTTL(base, SegmentCache),
VMSTATE_UINT32(limit, SegmentCache),
.name = "xmm_reg",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(XMM_Q(0), XMMReg),
VMSTATE_UINT64(XMM_Q(1), XMMReg),
VMSTATE_END_OF_LIST()
}
};
-#define VMSTATE_XMM_REGS(_field, _state, _n) \
- VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_xmm_reg, XMMReg)
+#define VMSTATE_XMM_REGS(_field, _state, _start) \
+ VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
+ vmstate_xmm_reg, XMMReg)
-/* YMMH format is the same as XMM */
+/* YMMH format is the same as XMM, but for bits 128-255 */
static const VMStateDescription vmstate_ymmh_reg = {
.name = "ymmh_reg",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(XMM_Q(2), XMMReg),
+ VMSTATE_UINT64(XMM_Q(3), XMMReg),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_YMMH_REGS_VARS(_field, _state, _start, _v) \
+ VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, _v, \
+ vmstate_ymmh_reg, XMMReg)
+
+static const VMStateDescription vmstate_zmmh_reg = {
+ .name = "zmmh_reg",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(XMM_Q(4), XMMReg),
+ VMSTATE_UINT64(XMM_Q(5), XMMReg),
+ VMSTATE_UINT64(XMM_Q(6), XMMReg),
+ VMSTATE_UINT64(XMM_Q(7), XMMReg),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_ZMMH_REGS_VARS(_field, _state, _start) \
+ VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
+ vmstate_zmmh_reg, XMMReg)
+
+#ifdef TARGET_X86_64
+static const VMStateDescription vmstate_hi16_zmm_reg = {
+ .name = "hi16_zmm_reg",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(XMM_Q(0), XMMReg),
VMSTATE_UINT64(XMM_Q(1), XMMReg),
+ VMSTATE_UINT64(XMM_Q(2), XMMReg),
+ VMSTATE_UINT64(XMM_Q(3), XMMReg),
+ VMSTATE_UINT64(XMM_Q(4), XMMReg),
+ VMSTATE_UINT64(XMM_Q(5), XMMReg),
+ VMSTATE_UINT64(XMM_Q(6), XMMReg),
+ VMSTATE_UINT64(XMM_Q(7), XMMReg),
VMSTATE_END_OF_LIST()
}
};
-#define VMSTATE_YMMH_REGS_VARS(_field, _state, _n, _v) \
- VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_ymmh_reg, XMMReg)
+#define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _start) \
+ VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
+ vmstate_hi16_zmm_reg, XMMReg)
+#endif
static const VMStateDescription vmstate_bnd_regs = {
.name = "bnd_regs",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField[]) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(lb, BNDReg),
VMSTATE_UINT64(ub, BNDReg),
VMSTATE_END_OF_LIST()
.name = "mtrr_var",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(base, MTRRVar),
VMSTATE_UINT64(mask, MTRRVar),
VMSTATE_END_OF_LIST()
env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
}
- /* XXX: restore FPU round state */
+ /* Older versions of QEMU incorrectly used CS.DPL as the CPL when
+ * running under KVM. This is wrong for conforming code segments.
+ * Luckily, in our implementation the CPL field of hflags is redundant
+ * and we can get the right value from the SS descriptor privilege level.
+ */
+ env->hflags &= ~HF_CPL_MASK;
+ env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
+
env->fpstt = (env->fpus_vmstate >> 11) & 7;
env->fpus = env->fpus_vmstate & ~0x3800;
env->fptag_vmstate ^= 0xff;
for(i = 0; i < 8; i++) {
env->fptags[i] = (env->fptag_vmstate >> i) & 1;
}
+ update_fp_status(env);
cpu_breakpoint_remove_all(cs, BP_CPU);
cpu_watchpoint_remove_all(cs, BP_CPU);
.name = "cpu/steal_time_msr",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(env.steal_time_msr, X86CPU),
VMSTATE_END_OF_LIST()
}
.name = "cpu/async_pf_msr",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(env.async_pf_en_msr, X86CPU),
VMSTATE_END_OF_LIST()
}
.name = "cpu/async_pv_eoi_msr",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU),
VMSTATE_END_OF_LIST()
}
.name = "cpu/fpop_ip_dp",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT16(env.fpop, X86CPU),
VMSTATE_UINT64(env.fpip, X86CPU),
VMSTATE_UINT64(env.fpdp, X86CPU),
.name = "cpu/msr_tsc_adjust",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField[]) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(env.tsc_adjust, X86CPU),
VMSTATE_END_OF_LIST()
}
.name = "cpu/msr_tscdeadline",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(env.tsc_deadline, X86CPU),
VMSTATE_END_OF_LIST()
}
.name = "cpu/msr_ia32_misc_enable",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU),
VMSTATE_END_OF_LIST()
}
.name = "cpu/msr_ia32_feature_control",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU),
VMSTATE_END_OF_LIST()
}
.name = "cpu/msr_architectural_pmu",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU),
VMSTATE_UINT64(env.msr_global_ctrl, X86CPU),
VMSTATE_UINT64(env.msr_global_status, X86CPU),
.name = "cpu/mpx",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField[]) {
+ .fields = (VMStateField[]) {
VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4),
VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU),
VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU),
.name = "cpu/msr_hyperv_hypercall",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
- VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU),
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU),
+ VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU),
VMSTATE_END_OF_LIST()
}
};
.name = "cpu/msr_hyperv_vapic",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(env.msr_hv_vapic, X86CPU),
VMSTATE_END_OF_LIST()
}
.name = "cpu/msr_hyperv_time",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT64(env.msr_hv_tsc, X86CPU),
VMSTATE_END_OF_LIST()
}
};
-const VMStateDescription vmstate_x86_cpu = {
+static bool avx512_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ unsigned int i;
+
+ for (i = 0; i < NB_OPMASK_REGS; i++) {
+ if (env->opmask_regs[i]) {
+ return true;
+ }
+ }
+
+ for (i = 0; i < CPU_NB_REGS; i++) {
+#define ENV_XMM(reg, field) (env->xmm_regs[reg].XMM_Q(field))
+ if (ENV_XMM(i, 4) || ENV_XMM(i, 6) ||
+ ENV_XMM(i, 5) || ENV_XMM(i, 7)) {
+ return true;
+ }
+#ifdef TARGET_X86_64
+ if (ENV_XMM(i+16, 0) || ENV_XMM(i+16, 1) ||
+ ENV_XMM(i+16, 2) || ENV_XMM(i+16, 3) ||
+ ENV_XMM(i+16, 4) || ENV_XMM(i+16, 5) ||
+ ENV_XMM(i+16, 6) || ENV_XMM(i+16, 7)) {
+ return true;
+ }
+#endif
+ }
+
+ return false;
+}
+
+static const VMStateDescription vmstate_avx512 = {
+ .name = "cpu/avx512",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS),
+ VMSTATE_ZMMH_REGS_VARS(env.xmm_regs, X86CPU, 0),
+#ifdef TARGET_X86_64
+ VMSTATE_Hi16_ZMM_REGS_VARS(env.xmm_regs, X86CPU, 16),
+#endif
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool xss_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->xss != 0;
+}
+
+static const VMStateDescription vmstate_xss = {
+ .name = "cpu/xss",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.xss, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+VMStateDescription vmstate_x86_cpu = {
.name = "cpu",
.version_id = 12,
.minimum_version_id = 3,
- .minimum_version_id_old = 3,
.pre_save = cpu_pre_save,
.post_load = cpu_post_load,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS),
VMSTATE_UINTTL(env.eip, X86CPU),
VMSTATE_UINTTL(env.eflags, X86CPU),
VMSTATE_INT32(env.a20_mask, X86CPU),
/* XMM */
VMSTATE_UINT32(env.mxcsr, X86CPU),
- VMSTATE_XMM_REGS(env.xmm_regs, X86CPU, CPU_NB_REGS),
+ VMSTATE_XMM_REGS(env.xmm_regs, X86CPU, 0),
#ifdef TARGET_X86_64
VMSTATE_UINT64(env.efer, X86CPU),
/* MTRRs */
VMSTATE_UINT64_ARRAY_V(env.mtrr_fixed, X86CPU, 11, 8),
VMSTATE_UINT64_V(env.mtrr_deftype, X86CPU, 8),
- VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, 8, 8),
+ VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8),
/* KVM-related states */
VMSTATE_INT32_V(env.interrupt_injected, X86CPU, 9),
VMSTATE_UINT32_V(env.mp_state, X86CPU, 9),
/* XSAVE related fields */
VMSTATE_UINT64_V(env.xcr0, X86CPU, 12),
VMSTATE_UINT64_V(env.xstate_bv, X86CPU, 12),
- VMSTATE_YMMH_REGS_VARS(env.ymmh_regs, X86CPU, CPU_NB_REGS, 12),
+ VMSTATE_YMMH_REGS_VARS(env.xmm_regs, X86CPU, 0, 12),
VMSTATE_END_OF_LIST()
/* The above list is not sorted /wrt version numbers, watch out! */
},
}, {
.vmsd = &vmstate_msr_hyperv_time,
.needed = hyperv_time_enable_needed,
+ }, {
+ .vmsd = &vmstate_avx512,
+ .needed = avx512_needed,
+ }, {
+ .vmsd = &vmstate_xss,
+ .needed = xss_needed,
} , {
/* empty */
}