#include "sysemu/sysemu.h"
#include "sysemu/hw_accel.h"
#include "sysemu/kvm_int.h"
+#include "sysemu/reset.h"
#include "kvm_i386.h"
#include "hyperv.h"
#include "hyperv-proto.h"
#include "exec/gdbstub.h"
#include "qemu/host-utils.h"
+#include "qemu/main-loop.h"
#include "qemu/config-file.h"
#include "qemu/error-report.h"
#include "hw/i386/pc.h"
return (ret == KVM_CLOCK_TSC_STABLE);
}
+bool kvm_has_exception_payload(void)
+{
+ return has_exception_payload;
+}
+
bool kvm_allows_irq0_override(void)
{
return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
CPUX86State *env = &cpu->env;
uint32_t r, fw, bits;
uint64_t deps;
- int i, dep_feat = 0;
+ int i, dep_feat;
if (!hyperv_feat_enabled(cpu, feature) && !cpu->hyperv_passthrough) {
return 0;
}
deps = kvm_hyperv_properties[feature].dependencies;
- while ((dep_feat = find_next_bit(&deps, 64, dep_feat)) < 64) {
+ while (deps) {
+ dep_feat = ctz64(deps);
if (!(hyperv_feat_enabled(cpu, dep_feat))) {
fprintf(stderr,
"Hyper-V %s requires Hyper-V %s\n",
kvm_hyperv_properties[dep_feat].desc);
return 1;
}
- dep_feat++;
+ deps &= ~(1ull << dep_feat);
}
for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties[feature].flags); i++) {
}
static Error *invtsc_mig_blocker;
-static Error *nested_virt_mig_blocker;
#define KVM_MAX_CPUID_ENTRIES 100
}
break;
}
+ case 0x1f:
+ if (env->nr_dies < 2) {
+ break;
+ }
case 4:
case 0xb:
case 0xd:
if (i == 0xd && j == 64) {
break;
}
+
+ if (i == 0x1f && j == 64) {
+ break;
+ }
+
c->function = i;
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
c->index = j;
if (i == 0xb && !(c->ecx & 0xff00)) {
break;
}
+ if (i == 0x1f && !(c->ecx & 0xff00)) {
+ break;
+ }
if (i == 0xd && c->eax == 0) {
continue;
}
!!(c->ecx & CPUID_EXT_SMX);
}
- if (cpu_has_vmx(env) && !nested_virt_mig_blocker &&
- ((kvm_max_nested_state_length() <= 0) || !has_exception_payload)) {
- error_setg(&nested_virt_mig_blocker,
- "Kernel do not provide required capabilities for "
- "nested virtualization migration. "
- "(CAP_NESTED_STATE=%d, CAP_EXCEPTION_PAYLOAD=%d)",
- kvm_max_nested_state_length() > 0,
- has_exception_payload);
- r = migrate_add_blocker(nested_virt_mig_blocker, &local_err);
- if (local_err) {
- error_report_err(local_err);
- error_free(nested_virt_mig_blocker);
- return r;
- }
- }
-
if (env->mcg_cap & MCG_LMCE_P) {
has_msr_mcg_ext_ctl = has_msr_feature_control = true;
}
if (local_err) {
error_report_err(local_err);
error_free(invtsc_mig_blocker);
- goto fail2;
+ return r;
}
}
}
max_nested_state_len = kvm_max_nested_state_length();
if (max_nested_state_len > 0) {
assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data));
- env->nested_state = g_malloc0(max_nested_state_len);
-
- env->nested_state->size = max_nested_state_len;
- if (IS_INTEL_CPU(env)) {
- struct kvm_vmx_nested_state_hdr *vmx_hdr =
- &env->nested_state->hdr.vmx;
+ if (cpu_has_vmx(env)) {
+ struct kvm_vmx_nested_state_hdr *vmx_hdr;
+ env->nested_state = g_malloc0(max_nested_state_len);
+ env->nested_state->size = max_nested_state_len;
env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX;
+
+ vmx_hdr = &env->nested_state->hdr.vmx;
vmx_hdr->vmxon_pa = -1ull;
vmx_hdr->vmcs12_pa = -1ull;
}
fail:
migrate_del_blocker(invtsc_mig_blocker);
- fail2:
- migrate_del_blocker(nested_virt_mig_blocker);
return r;
}
CPUX86State *env = &cpu->env;
int max_nested_state_len = kvm_max_nested_state_length();
- if (max_nested_state_len <= 0) {
+ if (!env->nested_state) {
return 0;
}
int max_nested_state_len = kvm_max_nested_state_length();
int ret;
- if (max_nested_state_len <= 0) {
+ if (!env->nested_state) {
return 0;
}
assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
- ret = kvm_put_nested_state(x86_cpu);
- if (ret < 0) {
- return ret;
- }
-
if (level >= KVM_PUT_RESET_STATE) {
+ ret = kvm_put_nested_state(x86_cpu);
+ if (ret < 0) {
+ return ret;
+ }
+
ret = kvm_put_msr_feature_control(x86_cpu);
if (ret < 0) {
return ret;