#include "sysemu/kvm_int.h"
#include "kvm_i386.h"
#include "hyperv.h"
+#include "hyperv-proto.h"
#include "exec/gdbstub.h"
#include "qemu/host-utils.h"
#include "hw/i386/x86-iommu.h"
#include "exec/ioport.h"
-#include "standard-headers/asm-x86/hyperv.h"
#include "hw/pci/pci.h"
#include "hw/pci/msi.h"
-#include "migration/migration.h"
+#include "hw/pci/msix.h"
+#include "migration/blocker.h"
#include "exec/memattrs.h"
#include "trace.h"
* 255 kvm_msr_entry structs */
#define MSR_BUF_SIZE 4096
-#ifndef BUS_MCEERR_AR
-#define BUS_MCEERR_AR 4
-#endif
-#ifndef BUS_MCEERR_AO
-#define BUS_MCEERR_AO 5
-#endif
-
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
KVM_CAP_INFO(SET_TSS_ADDR),
KVM_CAP_INFO(EXT_CPUID),
static bool has_msr_hv_runtime;
static bool has_msr_hv_synic;
static bool has_msr_hv_stimer;
+static bool has_msr_hv_frequencies;
static bool has_msr_xss;
+static bool has_msr_spec_ctrl;
-static bool has_msr_architectural_pmu;
-static uint32_t num_architectural_pmu_counters;
+static uint32_t has_architectural_pmu_version;
+static uint32_t num_architectural_pmu_gp_counters;
+static uint32_t num_architectural_pmu_fixed_counters;
static int has_xsave;
static int has_xcrs;
return features;
}
+static bool host_tsx_blacklisted(void)
+{
+ int family, model, stepping;\
+ char vendor[CPUID_VENDOR_SZ + 1];
+
+ host_vendor_fms(vendor, &family, &model, &stepping);
+
+ /* Check if we are running on a Haswell host known to have broken TSX */
+ return !strcmp(vendor, CPUID_VENDOR_INTEL) &&
+ (family == 6) &&
+ ((model == 63 && stepping < 4) ||
+ model == 60 || model == 69 || model == 70);
+}
/* Returns the value for a specific register on the cpuid entry
*/
}
} else if (function == 6 && reg == R_EAX) {
ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */
+ } else if (function == 7 && index == 0 && reg == R_EBX) {
+ if (host_tsx_blacklisted()) {
+ ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE);
+ }
} else if (function == 0x80000001 && reg == R_EDX) {
/* On Intel, kvm returns cpuid according to the Intel spec,
* so add missing bits according to the AMD spec:
if (!kvm_irqchip_in_kernel()) {
ret &= ~(1U << KVM_FEATURE_PV_UNHALT);
}
+ } else if (function == KVM_CPUID_FEATURES && reg == R_EDX) {
+ ret |= KVM_HINTS_DEDICATED;
+ found = 1;
}
/* fallback for older kernels */
exit(1);
}
-int kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
+void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
{
X86CPU *cpu = X86_CPU(c);
CPUX86State *env = &cpu->env;
ram_addr_t ram_addr;
hwaddr paddr;
- if ((env->mcg_cap & MCG_SER_P) && addr
- && (code == BUS_MCEERR_AR || code == BUS_MCEERR_AO)) {
+ /* If we get an action required MCE, it has been injected by KVM
+ * while the VM was running. An action optional MCE instead should
+ * be coming from the main thread, which qemu_init_sigbus identifies
+ * as the "early kill" thread.
+ */
+ assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
+
+ if ((env->mcg_cap & MCG_SER_P) && addr) {
ram_addr = qemu_ram_addr_from_host(addr);
- if (ram_addr == RAM_ADDR_INVALID ||
- !kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
- fprintf(stderr, "Hardware memory error for memory used by "
- "QEMU itself instead of guest system!\n");
- /* Hope we are lucky for AO MCE */
- if (code == BUS_MCEERR_AO) {
- return 0;
- } else {
- hardware_memory_error();
- }
- }
- kvm_hwpoison_page_add(ram_addr);
- kvm_mce_inject(cpu, paddr, code);
- } else {
- if (code == BUS_MCEERR_AO) {
- return 0;
- } else if (code == BUS_MCEERR_AR) {
- hardware_memory_error();
- } else {
- return 1;
+ if (ram_addr != RAM_ADDR_INVALID &&
+ kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
+ kvm_hwpoison_page_add(ram_addr);
+ kvm_mce_inject(cpu, paddr, code);
+ return;
}
- }
- return 0;
-}
-
-int kvm_arch_on_sigbus(int code, void *addr)
-{
- X86CPU *cpu = X86_CPU(first_cpu);
- if ((cpu->env.mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
- ram_addr_t ram_addr;
- hwaddr paddr;
+ fprintf(stderr, "Hardware memory error for memory used by "
+ "QEMU itself instead of guest system!\n");
+ }
- /* Hope we are lucky for AO MCE */
- ram_addr = qemu_ram_addr_from_host(addr);
- if (ram_addr == RAM_ADDR_INVALID ||
- !kvm_physical_memory_addr_from_host(first_cpu->kvm_state,
- addr, &paddr)) {
- fprintf(stderr, "Hardware memory error for memory used by "
- "QEMU itself instead of guest system!: %p\n", addr);
- return 0;
- }
- kvm_hwpoison_page_add(ram_addr);
- kvm_mce_inject(X86_CPU(first_cpu), paddr, code);
- } else {
- if (code == BUS_MCEERR_AO) {
- return 0;
- } else if (code == BUS_MCEERR_AR) {
- hardware_memory_error();
- } else {
- return 1;
- }
+ if (code == BUS_MCEERR_AR) {
+ hardware_memory_error();
}
- return 0;
+
+ /* Hope we are lucky for AO MCE */
}
static int kvm_inject_mce_oldstyle(X86CPU *cpu)
kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
-ENOTSUP;
if (cur_freq <= 0 || cur_freq != env->tsc_khz) {
- error_report("warning: TSC frequency mismatch between "
- "VM (%" PRId64 " kHz) and host (%d kHz), "
- "and TSC scaling unavailable",
- env->tsc_khz, cur_freq);
+ warn_report("TSC frequency mismatch between "
+ "VM (%" PRId64 " kHz) and host (%d kHz), "
+ "and TSC scaling unavailable",
+ env->tsc_khz, cur_freq);
return r;
}
}
return 0;
}
+static bool tsc_is_stable_and_known(CPUX86State *env)
+{
+ if (!env->tsc_khz) {
+ return false;
+ }
+ return (env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC)
+ || env->user_tsc_khz;
+}
+
static int hyperv_handle_properties(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs);
}
if (cpu->hyperv_relaxed_timing) {
- env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
+ env->features[FEAT_HYPERV_EAX] |= HV_HYPERCALL_AVAILABLE;
}
if (cpu->hyperv_vapic) {
- env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
- env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
+ env->features[FEAT_HYPERV_EAX] |= HV_HYPERCALL_AVAILABLE;
+ env->features[FEAT_HYPERV_EAX] |= HV_APIC_ACCESS_AVAILABLE;
}
if (cpu->hyperv_time) {
- env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
- env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_TIME_REF_COUNT_AVAILABLE;
- env->features[FEAT_HYPERV_EAX] |= 0x200;
+ env->features[FEAT_HYPERV_EAX] |= HV_HYPERCALL_AVAILABLE;
+ env->features[FEAT_HYPERV_EAX] |= HV_TIME_REF_COUNT_AVAILABLE;
+ env->features[FEAT_HYPERV_EAX] |= HV_REFERENCE_TSC_AVAILABLE;
+
+ if (has_msr_hv_frequencies && tsc_is_stable_and_known(env)) {
+ env->features[FEAT_HYPERV_EAX] |= HV_ACCESS_FREQUENCY_MSRS;
+ env->features[FEAT_HYPERV_EDX] |= HV_FREQUENCY_MSRS_AVAILABLE;
+ }
}
if (cpu->hyperv_crash && has_msr_hv_crash) {
- env->features[FEAT_HYPERV_EDX] |= HV_X64_GUEST_CRASH_MSR_AVAILABLE;
+ env->features[FEAT_HYPERV_EDX] |= HV_GUEST_CRASH_MSR_AVAILABLE;
}
- env->features[FEAT_HYPERV_EDX] |= HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
+ env->features[FEAT_HYPERV_EDX] |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
if (cpu->hyperv_reset && has_msr_hv_reset) {
- env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_RESET_AVAILABLE;
+ env->features[FEAT_HYPERV_EAX] |= HV_RESET_AVAILABLE;
}
if (cpu->hyperv_vpindex && has_msr_hv_vpindex) {
- env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_VP_INDEX_AVAILABLE;
+ env->features[FEAT_HYPERV_EAX] |= HV_VP_INDEX_AVAILABLE;
}
if (cpu->hyperv_runtime && has_msr_hv_runtime) {
- env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_VP_RUNTIME_AVAILABLE;
+ env->features[FEAT_HYPERV_EAX] |= HV_VP_RUNTIME_AVAILABLE;
}
if (cpu->hyperv_synic) {
- int sint;
-
if (!has_msr_hv_synic ||
kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_SYNIC, 0)) {
fprintf(stderr, "Hyper-V SynIC is not supported by kernel\n");
return -ENOSYS;
}
- env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_SYNIC_AVAILABLE;
- env->msr_hv_synic_version = HV_SYNIC_VERSION_1;
- for (sint = 0; sint < ARRAY_SIZE(env->msr_hv_synic_sint); sint++) {
- env->msr_hv_synic_sint[sint] = HV_SYNIC_SINT_MASKED;
- }
+ env->features[FEAT_HYPERV_EAX] |= HV_SYNIC_AVAILABLE;
}
if (cpu->hyperv_stimer) {
if (!has_msr_hv_stimer) {
fprintf(stderr, "Hyper-V timers aren't supported by kernel\n");
return -ENOSYS;
}
- env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_SYNTIMER_AVAILABLE;
+ env->features[FEAT_HYPERV_EAX] |= HV_SYNTIMERS_AVAILABLE;
}
return 0;
}
cpuid_i = 0;
+ r = kvm_arch_set_tsc_khz(cs);
+ if (r < 0) {
+ goto fail;
+ }
+
+ /* vcpu's TSC frequency is either specified by user, or following
+ * the value used by KVM if the former is not present. In the
+ * latter case, we query it from KVM and record in env->tsc_khz,
+ * so that vcpu's TSC frequency can be migrated later via this field.
+ */
+ if (!env->tsc_khz) {
+ r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
+ kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
+ -ENOTSUP;
+ if (r > 0) {
+ env->tsc_khz = r;
+ }
+ }
+
/* Paravirtualization CPUIDs */
if (hyperv_enabled(cpu)) {
c = &cpuid_data.entries[cpuid_i++];
- c->function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
+ c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
if (!cpu->hyperv_vendor_id) {
memcpy(signature, "Microsoft Hv", 12);
} else {
memset(signature, 0, 12);
memcpy(signature, cpu->hyperv_vendor_id, len);
}
- c->eax = HYPERV_CPUID_MIN;
+ c->eax = HV_CPUID_MIN;
c->ebx = signature[0];
c->ecx = signature[1];
c->edx = signature[2];
c = &cpuid_data.entries[cpuid_i++];
- c->function = HYPERV_CPUID_INTERFACE;
+ c->function = HV_CPUID_INTERFACE;
memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
c->eax = signature[0];
c->ebx = 0;
c->edx = 0;
c = &cpuid_data.entries[cpuid_i++];
- c->function = HYPERV_CPUID_VERSION;
+ c->function = HV_CPUID_VERSION;
c->eax = 0x00001bbc;
c->ebx = 0x00060001;
c = &cpuid_data.entries[cpuid_i++];
- c->function = HYPERV_CPUID_FEATURES;
+ c->function = HV_CPUID_FEATURES;
r = hyperv_handle_properties(cs);
if (r) {
return r;
c->edx = env->features[FEAT_HYPERV_EDX];
c = &cpuid_data.entries[cpuid_i++];
- c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
+ c->function = HV_CPUID_ENLIGHTMENT_INFO;
if (cpu->hyperv_relaxed_timing) {
- c->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
+ c->eax |= HV_RELAXED_TIMING_RECOMMENDED;
}
if (cpu->hyperv_vapic) {
- c->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
+ c->eax |= HV_APIC_ACCESS_RECOMMENDED;
}
c->ebx = cpu->hyperv_spinlock_attempts;
c = &cpuid_data.entries[cpuid_i++];
- c->function = HYPERV_CPUID_IMPLEMENT_LIMITS;
- c->eax = 0x40;
+ c->function = HV_CPUID_IMPLEMENT_LIMITS;
+
+ c->eax = cpu->hv_max_vps;
c->ebx = 0x40;
kvm_base = KVM_CPUID_SIGNATURE_NEXT;
c = &cpuid_data.entries[cpuid_i++];
c->function = KVM_CPUID_FEATURES | kvm_base;
c->eax = env->features[FEAT_KVM];
+ c->edx = env->features[FEAT_KVM_HINTS];
}
cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
c = &cpuid_data.entries[cpuid_i++];
}
break;
+ case 0x14: {
+ uint32_t times;
+
+ c->function = i;
+ c->index = 0;
+ c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ times = c->eax;
+
+ for (j = 1; j <= times; ++j) {
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ fprintf(stderr, "cpuid_data is full, no space for "
+ "cpuid(eax:0x14,ecx:0x%x)\n", j);
+ abort();
+ }
+ c = &cpuid_data.entries[cpuid_i++];
+ c->function = i;
+ c->index = j;
+ c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ }
+ break;
+ }
default:
c->function = i;
c->flags = 0;
}
if (limit >= 0x0a) {
- uint32_t ver;
+ uint32_t eax, edx;
- cpu_x86_cpuid(env, 0x0a, 0, &ver, &unused, &unused, &unused);
- if ((ver & 0xff) > 0) {
- has_msr_architectural_pmu = true;
- num_architectural_pmu_counters = (ver & 0xff00) >> 8;
+ cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx);
+
+ has_architectural_pmu_version = eax & 0xff;
+ if (has_architectural_pmu_version > 0) {
+ num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8;
/* Shouldn't be more than 32, since that's the number of bits
* available in EBX to tell us _which_ counters are available.
* Play it safe.
*/
- if (num_architectural_pmu_counters > MAX_GP_COUNTERS) {
- num_architectural_pmu_counters = MAX_GP_COUNTERS;
+ if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
+ num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
+ }
+
+ if (has_architectural_pmu_version > 1) {
+ num_architectural_pmu_fixed_counters = edx & 0x1f;
+
+ if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
+ num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
+ }
}
}
}
error_report("kvm: LMCE not supported");
return -ENOTSUP;
}
- error_report("warning: Unsupported MCG_CAP bits: 0x%" PRIx64,
- unsupported_caps);
+ warn_report("Unsupported MCG_CAP bits: 0x%" PRIx64,
+ unsupported_caps);
}
env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK;
}
}
- r = kvm_arch_set_tsc_khz(cs);
- if (r < 0) {
- goto fail;
- }
-
- /* vcpu's TSC frequency is either specified by user, or following
- * the value used by KVM if the former is not present. In the
- * latter case, we query it from KVM and record in env->tsc_khz,
- * so that vcpu's TSC frequency can be migrated later via this field.
- */
- if (!env->tsc_khz) {
- r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
- kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
- -ENOTSUP;
- if (r > 0) {
- env->tsc_khz = r;
- }
- }
-
if (cpu->vmware_cpuid_freq
/* Guests depend on 0x40000000 to detect this feature, so only expose
* it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */
&& cpu->expose_kvm
&& kvm_base == KVM_CPUID_SIGNATURE
/* TSC clock must be stable and known for this feature. */
- && ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC)
- || env->user_tsc_khz != 0)
- && env->tsc_khz != 0) {
+ && tsc_is_stable_and_known(env)) {
c = &cpuid_data.entries[cpuid_i++];
c->function = KVM_CPUID_SIGNATURE | 0x10;
{
CPUX86State *env = &cpu->env;
- env->exception_injected = -1;
- env->interrupt_injected = -1;
env->xcr0 = 1;
if (kvm_irqchip_in_kernel()) {
env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
} else {
env->mp_state = KVM_MP_STATE_RUNNABLE;
}
+
+ if (cpu->hyperv_synic) {
+ int i;
+ for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
+ env->msr_hv_synic_sint[i] = HV_SINT_MASKED;
+ }
+ }
}
void kvm_arch_do_init_vcpu(X86CPU *cpu)
int i;
for (i = 0; i < kvm_msr_list->nmsrs; i++) {
- if (kvm_msr_list->indices[i] == MSR_STAR) {
+ switch (kvm_msr_list->indices[i]) {
+ case MSR_STAR:
has_msr_star = true;
- continue;
- }
- if (kvm_msr_list->indices[i] == MSR_VM_HSAVE_PA) {
+ break;
+ case MSR_VM_HSAVE_PA:
has_msr_hsave_pa = true;
- continue;
- }
- if (kvm_msr_list->indices[i] == MSR_TSC_AUX) {
+ break;
+ case MSR_TSC_AUX:
has_msr_tsc_aux = true;
- continue;
- }
- if (kvm_msr_list->indices[i] == MSR_TSC_ADJUST) {
+ break;
+ case MSR_TSC_ADJUST:
has_msr_tsc_adjust = true;
- continue;
- }
- if (kvm_msr_list->indices[i] == MSR_IA32_TSCDEADLINE) {
+ break;
+ case MSR_IA32_TSCDEADLINE:
has_msr_tsc_deadline = true;
- continue;
- }
- if (kvm_msr_list->indices[i] == MSR_IA32_SMBASE) {
+ break;
+ case MSR_IA32_SMBASE:
has_msr_smbase = true;
- continue;
- }
- if (kvm_msr_list->indices[i] == MSR_IA32_MISC_ENABLE) {
+ break;
+ case MSR_IA32_MISC_ENABLE:
has_msr_misc_enable = true;
- continue;
- }
- if (kvm_msr_list->indices[i] == MSR_IA32_BNDCFGS) {
+ break;
+ case MSR_IA32_BNDCFGS:
has_msr_bndcfgs = true;
- continue;
- }
- if (kvm_msr_list->indices[i] == MSR_IA32_XSS) {
+ break;
+ case MSR_IA32_XSS:
has_msr_xss = true;
- continue;
- }
- if (kvm_msr_list->indices[i] == HV_X64_MSR_CRASH_CTL) {
+ break;
+ case HV_X64_MSR_CRASH_CTL:
has_msr_hv_crash = true;
- continue;
- }
- if (kvm_msr_list->indices[i] == HV_X64_MSR_RESET) {
+ break;
+ case HV_X64_MSR_RESET:
has_msr_hv_reset = true;
- continue;
- }
- if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_INDEX) {
+ break;
+ case HV_X64_MSR_VP_INDEX:
has_msr_hv_vpindex = true;
- continue;
- }
- if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_RUNTIME) {
+ break;
+ case HV_X64_MSR_VP_RUNTIME:
has_msr_hv_runtime = true;
- continue;
- }
- if (kvm_msr_list->indices[i] == HV_X64_MSR_SCONTROL) {
+ break;
+ case HV_X64_MSR_SCONTROL:
has_msr_hv_synic = true;
- continue;
- }
- if (kvm_msr_list->indices[i] == HV_X64_MSR_STIMER0_CONFIG) {
+ break;
+ case HV_X64_MSR_STIMER0_CONFIG:
has_msr_hv_stimer = true;
- continue;
+ break;
+ case HV_X64_MSR_TSC_FREQUENCY:
+ has_msr_hv_frequencies = true;
+ break;
+ case MSR_IA32_SPEC_CTRL:
+ has_msr_spec_ctrl = true;
+ break;
}
}
}
}
}
- if (kvm_check_extension(s, KVM_CAP_X86_SMM)) {
+ if (kvm_check_extension(s, KVM_CAP_X86_SMM) &&
+ object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE) &&
+ pc_machine_is_smm_enabled(PC_MACHINE(ms))) {
smram_machine_done.notify = register_smram_listener;
qemu_add_machine_init_done_notifier(&smram_machine_done);
}
lhs->selector = rhs->selector;
lhs->base = rhs->base;
lhs->limit = rhs->limit;
- if (rhs->unusable) {
- lhs->flags = 0;
- } else {
- lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
- (rhs->present * DESC_P_MASK) |
- (rhs->dpl << DESC_DPL_SHIFT) |
- (rhs->db << DESC_B_SHIFT) |
- (rhs->s * DESC_S_MASK) |
- (rhs->l << DESC_L_SHIFT) |
- (rhs->g * DESC_G_MASK) |
- (rhs->avl * DESC_AVL_MASK);
- }
+ lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
+ ((rhs->present && !rhs->unusable) * DESC_P_MASK) |
+ (rhs->dpl << DESC_DPL_SHIFT) |
+ (rhs->db << DESC_B_SHIFT) |
+ (rhs->s * DESC_S_MASK) |
+ (rhs->l << DESC_L_SHIFT) |
+ (rhs->g * DESC_G_MASK) |
+ (rhs->avl * DESC_AVL_MASK);
}
static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
{
CPUX86State *env = &cpu->env;
X86XSaveArea *xsave = env->kvm_xsave_buf;
- uint16_t cwd, swd, twd;
- int i;
if (!has_xsave) {
return kvm_put_fpu(cpu);
}
+ x86_cpu_xsave_all_areas(cpu, xsave);
- memset(xsave, 0, sizeof(struct kvm_xsave));
- twd = 0;
- swd = env->fpus & ~(7 << 11);
- swd |= (env->fpstt & 7) << 11;
- cwd = env->fpuc;
- for (i = 0; i < 8; ++i) {
- twd |= (!env->fptags[i]) << i;
- }
- xsave->legacy.fcw = cwd;
- xsave->legacy.fsw = swd;
- xsave->legacy.ftw = twd;
- xsave->legacy.fpop = env->fpop;
- xsave->legacy.fpip = env->fpip;
- xsave->legacy.fpdp = env->fpdp;
- memcpy(&xsave->legacy.fpregs, env->fpregs,
- sizeof env->fpregs);
- xsave->legacy.mxcsr = env->mxcsr;
- xsave->header.xstate_bv = env->xstate_bv;
- memcpy(&xsave->bndreg_state.bnd_regs, env->bnd_regs,
- sizeof env->bnd_regs);
- xsave->bndcsr_state.bndcsr = env->bndcs_regs;
- memcpy(&xsave->opmask_state.opmask_regs, env->opmask_regs,
- sizeof env->opmask_regs);
-
- for (i = 0; i < CPU_NB_REGS; i++) {
- uint8_t *xmm = xsave->legacy.xmm_regs[i];
- uint8_t *ymmh = xsave->avx_state.ymmh[i];
- uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
- stq_p(xmm, env->xmm_regs[i].ZMM_Q(0));
- stq_p(xmm+8, env->xmm_regs[i].ZMM_Q(1));
- stq_p(ymmh, env->xmm_regs[i].ZMM_Q(2));
- stq_p(ymmh+8, env->xmm_regs[i].ZMM_Q(3));
- stq_p(zmmh, env->xmm_regs[i].ZMM_Q(4));
- stq_p(zmmh+8, env->xmm_regs[i].ZMM_Q(5));
- stq_p(zmmh+16, env->xmm_regs[i].ZMM_Q(6));
- stq_p(zmmh+24, env->xmm_regs[i].ZMM_Q(7));
- }
-
-#ifdef TARGET_X86_64
- memcpy(&xsave->hi16_zmm_state.hi16_zmm, &env->xmm_regs[16],
- 16 * sizeof env->xmm_regs[16]);
- memcpy(&xsave->pkru_state, &env->pkru, sizeof env->pkru);
-#endif
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
}
if (has_msr_xss) {
kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss);
}
+ if (has_msr_spec_ctrl) {
+ kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl);
+ }
#ifdef TARGET_X86_64
if (lm_capable_kernel) {
kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar);
}
#endif
+
/*
* The following MSRs have side effects on the guest or are too heavy
* for normal writeback. Limit them to reset or full state updates.
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
}
- if (has_msr_architectural_pmu) {
- /* Stop the counter. */
- kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
- kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
+ if (has_architectural_pmu_version > 0) {
+ if (has_architectural_pmu_version > 1) {
+ /* Stop the counter. */
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
+ }
/* Set the counter values. */
- for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
+ for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
env->msr_fixed_counters[i]);
}
- for (i = 0; i < num_architectural_pmu_counters; i++) {
+ for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
env->msr_gp_counters[i]);
kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
env->msr_gp_evtsel[i]);
}
- kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
- env->msr_global_status);
- kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
- env->msr_global_ovf_ctrl);
-
- /* Now start the PMU. */
- kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL,
- env->msr_fixed_ctr_ctrl);
- kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
- env->msr_global_ctrl);
- }
- if (has_msr_hv_hypercall) {
- kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
- env->msr_hv_guest_os_id);
- kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
- env->msr_hv_hypercall);
+ if (has_architectural_pmu_version > 1) {
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
+ env->msr_global_status);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
+ env->msr_global_ovf_ctrl);
+
+ /* Now start the PMU. */
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL,
+ env->msr_fixed_ctr_ctrl);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
+ env->msr_global_ctrl);
+ }
+ }
+ /*
+ * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add,
+ * only sync them to KVM on the first cpu
+ */
+ if (current_cpu == first_cpu) {
+ if (has_msr_hv_hypercall) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
+ env->msr_hv_guest_os_id);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
+ env->msr_hv_hypercall);
+ }
+ if (cpu->hyperv_time) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC,
+ env->msr_hv_tsc);
+ }
}
if (cpu->hyperv_vapic) {
kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
env->msr_hv_vapic);
}
- if (cpu->hyperv_time) {
- kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, env->msr_hv_tsc);
- }
if (has_msr_hv_crash) {
int j;
- for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++)
+ for (j = 0; j < HV_CRASH_PARAMS; j++)
kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j,
env->msr_hv_crash_params[j]);
- kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL,
- HV_X64_MSR_CRASH_CTL_NOTIFY);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_NOTIFY);
}
if (has_msr_hv_runtime) {
kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
if (cpu->hyperv_synic) {
int j;
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, HV_SYNIC_VERSION);
+
kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL,
env->msr_hv_synic_control);
- kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION,
- env->msr_hv_synic_version);
kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP,
env->msr_hv_synic_evt_page);
kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP,
kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask);
}
}
+ if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
+ int addr_num = kvm_arch_get_supported_cpuid(kvm_state,
+ 0x14, 1, R_EAX) & 0x7;
+
+ kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL,
+ env->msr_rtit_ctrl);
+ kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS,
+ env->msr_rtit_status);
+ kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE,
+ env->msr_rtit_output_base);
+ kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK,
+ env->msr_rtit_output_mask);
+ kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH,
+ env->msr_rtit_cr3_match);
+ for (i = 0; i < addr_num; i++) {
+ kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i,
+ env->msr_rtit_addrs[i]);
+ }
+ }
/* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
* kvm_put_msr_feature_control. */
return ret;
}
+ if (ret < cpu->kvm_msr_buf->nmsrs) {
+ struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
+ error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64,
+ (uint32_t)e->index, (uint64_t)e->data);
+ }
+
assert(ret == cpu->kvm_msr_buf->nmsrs);
return 0;
}
{
CPUX86State *env = &cpu->env;
X86XSaveArea *xsave = env->kvm_xsave_buf;
- int ret, i;
- uint16_t cwd, swd, twd;
+ int ret;
if (!has_xsave) {
return kvm_get_fpu(cpu);
if (ret < 0) {
return ret;
}
+ x86_cpu_xrstor_all_areas(cpu, xsave);
- cwd = xsave->legacy.fcw;
- swd = xsave->legacy.fsw;
- twd = xsave->legacy.ftw;
- env->fpop = xsave->legacy.fpop;
- env->fpstt = (swd >> 11) & 7;
- env->fpus = swd;
- env->fpuc = cwd;
- for (i = 0; i < 8; ++i) {
- env->fptags[i] = !((twd >> i) & 1);
- }
- env->fpip = xsave->legacy.fpip;
- env->fpdp = xsave->legacy.fpdp;
- env->mxcsr = xsave->legacy.mxcsr;
- memcpy(env->fpregs, &xsave->legacy.fpregs,
- sizeof env->fpregs);
- env->xstate_bv = xsave->header.xstate_bv;
- memcpy(env->bnd_regs, &xsave->bndreg_state.bnd_regs,
- sizeof env->bnd_regs);
- env->bndcs_regs = xsave->bndcsr_state.bndcsr;
- memcpy(env->opmask_regs, &xsave->opmask_state.opmask_regs,
- sizeof env->opmask_regs);
-
- for (i = 0; i < CPU_NB_REGS; i++) {
- uint8_t *xmm = xsave->legacy.xmm_regs[i];
- uint8_t *ymmh = xsave->avx_state.ymmh[i];
- uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
- env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm);
- env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm+8);
- env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
- env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh+8);
- env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
- env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh+8);
- env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh+16);
- env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh+24);
- }
-
-#ifdef TARGET_X86_64
- memcpy(&env->xmm_regs[16], &xsave->hi16_zmm_state.hi16_zmm,
- 16 * sizeof env->xmm_regs[16]);
- memcpy(&env->pkru, &xsave->pkru_state, sizeof env->pkru);
-#endif
return 0;
}
{
CPUX86State *env = &cpu->env;
struct kvm_sregs sregs;
- uint32_t hflags;
int bit, i, ret;
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
env->efer = sregs.efer;
/* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
-
-#define HFLAG_COPY_MASK \
- ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
- HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
- HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
- HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
-
- hflags = env->hflags & HFLAG_COPY_MASK;
- hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
- hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
- hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
- (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
- hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
-
- if (env->cr[4] & CR4_OSFXSR_MASK) {
- hflags |= HF_OSFXSR_MASK;
- }
-
- if (env->efer & MSR_EFER_LMA) {
- hflags |= HF_LMA_MASK;
- }
-
- if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
- hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
- } else {
- hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
- (DESC_B_SHIFT - HF_CS32_SHIFT);
- hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
- (DESC_B_SHIFT - HF_SS32_SHIFT);
- if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
- !(hflags & HF_CS32_MASK)) {
- hflags |= HF_ADDSEG_MASK;
- } else {
- hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
- env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
- }
- }
- env->hflags = hflags;
+ x86_update_hflags(env);
return 0;
}
if (has_msr_xss) {
kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0);
}
+ if (has_msr_spec_ctrl) {
+ kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0);
+ }
if (!env->tsc_valid) {
if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
}
- if (has_msr_architectural_pmu) {
- kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
- kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
- kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
- kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
- for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
+ if (has_architectural_pmu_version > 0) {
+ if (has_architectural_pmu_version > 1) {
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
+ }
+ for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
}
- for (i = 0; i < num_architectural_pmu_counters; i++) {
+ for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
}
if (has_msr_hv_crash) {
int j;
- for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++) {
+ for (j = 0; j < HV_CRASH_PARAMS; j++) {
kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0);
}
}
uint32_t msr;
kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0);
- kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, 0);
kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0);
kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0);
for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
}
}
+ if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
+ int addr_num =
+ kvm_arch_get_supported_cpuid(kvm_state, 0x14, 1, R_EAX) & 0x7;
+
+ kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, 0);
+ kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, 0);
+ kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, 0);
+ kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, 0);
+ kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, 0);
+ for (i = 0; i < addr_num; i++) {
+ kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, 0);
+ }
+ }
+
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
if (ret < 0) {
return ret;
}
+ if (ret < cpu->kvm_msr_buf->nmsrs) {
+ struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
+ error_report("error: failed to get MSR 0x%" PRIx32,
+ (uint32_t)e->index);
+ }
+
assert(ret == cpu->kvm_msr_buf->nmsrs);
/*
* MTRR masks: Each mask consists of 5 parts
case HV_X64_MSR_SCONTROL:
env->msr_hv_synic_control = msrs[i].data;
break;
- case HV_X64_MSR_SVERSION:
- env->msr_hv_synic_version = msrs[i].data;
- break;
case HV_X64_MSR_SIEFP:
env->msr_hv_synic_evt_page = msrs[i].data;
break;
env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
}
break;
+ case MSR_IA32_SPEC_CTRL:
+ env->spec_ctrl = msrs[i].data;
+ break;
+ case MSR_IA32_RTIT_CTL:
+ env->msr_rtit_ctrl = msrs[i].data;
+ break;
+ case MSR_IA32_RTIT_STATUS:
+ env->msr_rtit_status = msrs[i].data;
+ break;
+ case MSR_IA32_RTIT_OUTPUT_BASE:
+ env->msr_rtit_output_base = msrs[i].data;
+ break;
+ case MSR_IA32_RTIT_OUTPUT_MASK:
+ env->msr_rtit_output_mask = msrs[i].data;
+ break;
+ case MSR_IA32_RTIT_CR3_MATCH:
+ env->msr_rtit_cr3_match = msrs[i].data;
+ break;
+ case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
+ env->msr_rtit_addrs[index - MSR_IA32_RTIT_ADDR0_A] = msrs[i].data;
+ break;
}
}
events.smi.pending = 0;
events.smi.latched_init = 0;
}
- events.flags |= KVM_VCPUEVENT_VALID_SMM;
+ /* Stop SMI delivery on old machine types to avoid a reboot
+ * on an inward migration of an old VM.
+ */
+ if (!cpu->kvm_no_smi_migration) {
+ events.flags |= KVM_VCPUEVENT_VALID_SMM;
+ }
}
if (level >= KVM_PUT_RESET_STATE) {
- events.flags |=
- KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
+ events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
+ if (env->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
+ events.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR;
+ }
}
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
if (ret < 0) {
return ret;
}
+ ret = kvm_put_vcpu_events(x86_cpu, level);
+ if (ret < 0) {
+ return ret;
+ }
if (level >= KVM_PUT_RESET_STATE) {
ret = kvm_put_mp_state(x86_cpu);
if (ret < 0) {
if (ret < 0) {
return ret;
}
-
- ret = kvm_put_vcpu_events(x86_cpu, level);
- if (ret < 0) {
- return ret;
- }
ret = kvm_put_debugregs(x86_cpu);
if (ret < 0) {
return ret;
assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
- ret = kvm_getput_regs(cpu, 0);
+ ret = kvm_get_vcpu_events(cpu);
if (ret < 0) {
goto out;
}
- ret = kvm_get_xsave(cpu);
+ /*
+ * KVM_GET_MPSTATE can modify CS and RIP, call it before
+ * KVM_GET_REGS and KVM_GET_SREGS.
+ */
+ ret = kvm_get_mp_state(cpu);
if (ret < 0) {
goto out;
}
- ret = kvm_get_xcrs(cpu);
+ ret = kvm_getput_regs(cpu, 0);
if (ret < 0) {
goto out;
}
- ret = kvm_get_sregs(cpu);
+ ret = kvm_get_xsave(cpu);
if (ret < 0) {
goto out;
}
- ret = kvm_get_msrs(cpu);
+ ret = kvm_get_xcrs(cpu);
if (ret < 0) {
goto out;
}
- ret = kvm_get_mp_state(cpu);
+ ret = kvm_get_sregs(cpu);
if (ret < 0) {
goto out;
}
- ret = kvm_get_apic(cpu);
+ ret = kvm_get_msrs(cpu);
if (ret < 0) {
goto out;
}
- ret = kvm_get_vcpu_events(cpu);
+ ret = kvm_get_apic(cpu);
if (ret < 0) {
goto out;
}
if (env->exception_injected == EXCP08_DBLE) {
/* this means triple fault */
- qemu_system_reset_request();
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
cs->exit_request = 1;
return 0;
}
int cnt = 0;
MSIRouteEntry *entry;
MSIMessage msg;
+ PCIDevice *dev;
+
/* TODO: explicit route update */
QLIST_FOREACH(entry, &msi_route_list, list) {
cnt++;
- msg = pci_get_msi_message(entry->dev, entry->vector);
- kvm_irqchip_update_msi_route(kvm_state, entry->virq,
- msg, entry->dev);
+ dev = entry->dev;
+ if (!msix_enabled(dev) && !msi_enabled(dev)) {
+ continue;
+ }
+ msg = pci_get_msi_message(dev, entry->vector);
+ kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, dev);
}
kvm_irqchip_commit_routes(kvm_state);
trace_kvm_x86_update_msi_routes(cnt);
if (entry->virq == virq) {
trace_kvm_x86_remove_msi_route(virq);
QLIST_REMOVE(entry, list);
+ g_free(entry);
break;
}
}