#include "hw/pc.h"
#include "hw/apic.h"
#include "ioport.h"
+#include "hyperv.h"
//#define DEBUG_KVM
static bool has_msr_star;
static bool has_msr_hsave_pa;
+static bool has_msr_tsc_deadline;
static bool has_msr_async_pf_en;
+static bool has_msr_misc_enable;
static int lm_capable_kernel;
static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
if ((env->mcg_cap & MCG_SER_P) && addr
&& (code == BUS_MCEERR_AR || code == BUS_MCEERR_AO)) {
if (qemu_ram_addr_from_host(addr, &ram_addr) ||
- !kvm_physical_memory_addr_from_ram(env->kvm_state, ram_addr,
- &paddr)) {
+ !kvm_physical_memory_addr_from_host(env->kvm_state, addr, &paddr)) {
fprintf(stderr, "Hardware memory error for memory used by "
"QEMU itself instead of guest system!\n");
/* Hope we are lucky for AO MCE */
/* Hope we are lucky for AO MCE */
if (qemu_ram_addr_from_host(addr, &ram_addr) ||
- !kvm_physical_memory_addr_from_ram(first_cpu->kvm_state, ram_addr,
- &paddr)) {
+ !kvm_physical_memory_addr_from_host(first_cpu->kvm_state, addr,
+ &paddr)) {
fprintf(stderr, "Hardware memory error for memory used by "
"QEMU itself instead of guest system!: %p\n", addr);
return 0;
return 0;
}
-static void cpu_update_state(void *opaque, int running, int reason)
+static void cpu_update_state(void *opaque, int running, RunState state)
{
CPUState *env = opaque;
struct {
struct kvm_cpuid2 cpuid;
struct kvm_cpuid_entry2 entries[100];
- } __attribute__((packed)) cpuid_data;
+ } QEMU_PACKED cpuid_data;
KVMState *s = env->kvm_state;
uint32_t limit, i, j, cpuid_i;
uint32_t unused;
cpuid_i = 0;
/* Paravirtualization CPUIDs */
- memcpy(signature, "KVMKVMKVM\0\0\0", 12);
c = &cpuid_data.entries[cpuid_i++];
memset(c, 0, sizeof(*c));
c->function = KVM_CPUID_SIGNATURE;
- c->eax = 0;
+ if (!hyperv_enabled()) {
+ memcpy(signature, "KVMKVMKVM\0\0\0", 12);
+ c->eax = 0;
+ } else {
+ memcpy(signature, "Microsoft Hv", 12);
+ c->eax = HYPERV_CPUID_MIN;
+ }
c->ebx = signature[0];
c->ecx = signature[1];
c->edx = signature[2];
c->eax = env->cpuid_kvm_features &
kvm_arch_get_supported_cpuid(s, KVM_CPUID_FEATURES, 0, R_EAX);
+ if (hyperv_enabled()) {
+ memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
+ c->eax = signature[0];
+
+ c = &cpuid_data.entries[cpuid_i++];
+ memset(c, 0, sizeof(*c));
+ c->function = HYPERV_CPUID_VERSION;
+ c->eax = 0x00001bbc;
+ c->ebx = 0x00060001;
+
+ c = &cpuid_data.entries[cpuid_i++];
+ memset(c, 0, sizeof(*c));
+ c->function = HYPERV_CPUID_FEATURES;
+ if (hyperv_relaxed_timing_enabled()) {
+ c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
+ }
+ if (hyperv_vapic_recommended()) {
+ c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
+ c->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
+ }
+
+ c = &cpuid_data.entries[cpuid_i++];
+ memset(c, 0, sizeof(*c));
+ c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
+ if (hyperv_relaxed_timing_enabled()) {
+ c->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
+ }
+ if (hyperv_vapic_recommended()) {
+ c->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
+ }
+ c->ebx = hyperv_get_spinlock_retries();
+
+ c = &cpuid_data.entries[cpuid_i++];
+ memset(c, 0, sizeof(*c));
+ c->function = HYPERV_CPUID_IMPLEMENT_LIMITS;
+ c->eax = 0x40;
+ c->ebx = 0x40;
+
+ c = &cpuid_data.entries[cpuid_i++];
+ memset(c, 0, sizeof(*c));
+ c->function = KVM_CPUID_SIGNATURE_NEXT;
+ memcpy(signature, "KVMKVMKVM\0\0\0", 12);
+ c->eax = 0;
+ c->ebx = signature[0];
+ c->ecx = signature[1];
+ c->edx = signature[2];
+ }
+
has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF);
cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
}
}
+ if (kvm_has_xsave()) {
+ env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
+ }
+
return 0;
}
has_msr_hsave_pa = true;
continue;
}
+ if (kvm_msr_list->indices[i] == MSR_IA32_TSCDEADLINE) {
+ has_msr_tsc_deadline = true;
+ continue;
+ }
+ if (kvm_msr_list->indices[i] == MSR_IA32_MISC_ENABLE) {
+ has_msr_misc_enable = true;
+ continue;
+ }
}
}
int kvm_arch_init(KVMState *s)
{
+ QemuOptsList *list = qemu_find_opts("machine");
uint64_t identity_base = 0xfffbc000;
+ uint64_t shadow_mem;
int ret;
struct utsname utsname;
}
qemu_register_reset(kvm_unpoison_all, NULL);
+ if (!QTAILQ_EMPTY(&list->head)) {
+ shadow_mem = qemu_opt_get_size(QTAILQ_FIRST(&list->head),
+ "kvm_shadow_mem", -1);
+ if (shadow_mem != -1) {
+ shadow_mem /= 4096;
+ ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+ }
return 0;
}
return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
}
+#define XSAVE_FCW_FSW 0
+#define XSAVE_FTW_FOP 1
#define XSAVE_CWD_RIP 2
#define XSAVE_CWD_RDP 4
#define XSAVE_MXCSR 6
static int kvm_put_xsave(CPUState *env)
{
- int i, r;
- struct kvm_xsave* xsave;
+ struct kvm_xsave* xsave = env->kvm_xsave_buf;
uint16_t cwd, swd, twd;
+ int i, r;
if (!kvm_has_xsave()) {
return kvm_put_fpu(env);
}
- xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
memset(xsave, 0, sizeof(struct kvm_xsave));
- cwd = swd = twd = 0;
+ twd = 0;
swd = env->fpus & ~(7 << 11);
swd |= (env->fpstt & 7) << 11;
cwd = env->fpuc;
for (i = 0; i < 8; ++i) {
twd |= (!env->fptags[i]) << i;
}
- xsave->region[0] = (uint32_t)(swd << 16) + cwd;
- xsave->region[1] = (uint32_t)(env->fpop << 16) + twd;
+ xsave->region[XSAVE_FCW_FSW] = (uint32_t)(swd << 16) + cwd;
+ xsave->region[XSAVE_FTW_FOP] = (uint32_t)(env->fpop << 16) + twd;
memcpy(&xsave->region[XSAVE_CWD_RIP], &env->fpip, sizeof(env->fpip));
memcpy(&xsave->region[XSAVE_CWD_RDP], &env->fpdp, sizeof(env->fpdp));
memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs,
memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs,
sizeof env->ymmh_regs);
r = kvm_vcpu_ioctl(env, KVM_SET_XSAVE, xsave);
- g_free(xsave);
return r;
}
if (has_msr_hsave_pa) {
kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
}
+ if (has_msr_tsc_deadline) {
+ kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSCDEADLINE, env->tsc_deadline);
+ }
+ if (has_msr_misc_enable) {
+ kvm_msr_entry_set(&msrs[n++], MSR_IA32_MISC_ENABLE,
+ env->msr_ia32_misc_enable);
+ }
#ifdef TARGET_X86_64
if (lm_capable_kernel) {
kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
kvm_msr_entry_set(&msrs[n++], MSR_KVM_ASYNC_PF_EN,
env->async_pf_en_msr);
}
+ if (hyperv_hypercall_available()) {
+ kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_GUEST_OS_ID, 0);
+ kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_HYPERCALL, 0);
+ }
+ if (hyperv_vapic_recommended()) {
+ kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_APIC_ASSIST_PAGE, 0);
+ }
}
if (env->mcg_cap) {
int i;
static int kvm_get_xsave(CPUState *env)
{
- struct kvm_xsave* xsave;
+ struct kvm_xsave* xsave = env->kvm_xsave_buf;
int ret, i;
uint16_t cwd, swd, twd;
return kvm_get_fpu(env);
}
- xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
ret = kvm_vcpu_ioctl(env, KVM_GET_XSAVE, xsave);
if (ret < 0) {
- g_free(xsave);
return ret;
}
- cwd = (uint16_t)xsave->region[0];
- swd = (uint16_t)(xsave->region[0] >> 16);
- twd = (uint16_t)xsave->region[1];
- env->fpop = (uint16_t)(xsave->region[1] >> 16);
+ cwd = (uint16_t)xsave->region[XSAVE_FCW_FSW];
+ swd = (uint16_t)(xsave->region[XSAVE_FCW_FSW] >> 16);
+ twd = (uint16_t)xsave->region[XSAVE_FTW_FOP];
+ env->fpop = (uint16_t)(xsave->region[XSAVE_FTW_FOP] >> 16);
env->fpstt = (swd >> 11) & 7;
env->fpus = swd;
env->fpuc = cwd;
env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV];
memcpy(env->ymmh_regs, &xsave->region[XSAVE_YMMH_SPACE],
sizeof env->ymmh_regs);
- g_free(xsave);
return 0;
}
env->cr[3] = sregs.cr3;
env->cr[4] = sregs.cr4;
- cpu_set_apic_base(env->apic_state, sregs.apic_base);
-
env->efer = sregs.efer;
- //cpu_set_apic_tpr(env->apic_state, sregs.cr8);
+
+ /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
#define HFLAG_COPY_MASK \
~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
if (has_msr_hsave_pa) {
msrs[n++].index = MSR_VM_HSAVE_PA;
}
+ if (has_msr_tsc_deadline) {
+ msrs[n++].index = MSR_IA32_TSCDEADLINE;
+ }
+ if (has_msr_misc_enable) {
+ msrs[n++].index = MSR_IA32_MISC_ENABLE;
+ }
if (!env->tsc_valid) {
msrs[n++].index = MSR_IA32_TSC;
- env->tsc_valid = !vm_running;
+ env->tsc_valid = !runstate_is_running();
}
#ifdef TARGET_X86_64
case MSR_IA32_TSC:
env->tsc = msrs[i].data;
break;
+ case MSR_IA32_TSCDEADLINE:
+ env->tsc_deadline = msrs[i].data;
+ break;
case MSR_VM_HSAVE_PA:
env->vm_hsave = msrs[i].data;
break;
case MSR_MCG_CTL:
env->mcg_ctl = msrs[i].data;
break;
+ case MSR_IA32_MISC_ENABLE:
+ env->msr_ia32_misc_enable = msrs[i].data;
+ break;
default:
if (msrs[i].index >= MSR_MC0_CTL &&
msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
return 0;
}
+static int kvm_get_apic(CPUState *env)
+{
+ DeviceState *apic = env->apic_state;
+ struct kvm_lapic_state kapic;
+ int ret;
+
+ if (apic && kvm_irqchip_in_kernel()) {
+ ret = kvm_vcpu_ioctl(env, KVM_GET_LAPIC, &kapic);
+ if (ret < 0) {
+ return ret;
+ }
+
+ kvm_get_apic_state(apic, &kapic);
+ }
+ return 0;
+}
+
+static int kvm_put_apic(CPUState *env)
+{
+ DeviceState *apic = env->apic_state;
+ struct kvm_lapic_state kapic;
+
+ if (apic && kvm_irqchip_in_kernel()) {
+ kvm_put_apic_state(apic, &kapic);
+
+ return kvm_vcpu_ioctl(env, KVM_SET_LAPIC, &kapic);
+ }
+ return 0;
+}
+
static int kvm_put_vcpu_events(CPUState *env, int level)
{
struct kvm_vcpu_events events;
if (ret < 0) {
return ret;
}
+ ret = kvm_put_apic(env);
+ if (ret < 0) {
+ return ret;
+ }
}
ret = kvm_put_vcpu_events(env, level);
if (ret < 0) {
if (ret < 0) {
return ret;
}
+ ret = kvm_get_apic(env);
+ if (ret < 0) {
+ return ret;
+ }
ret = kvm_get_vcpu_events(env);
if (ret < 0) {
return ret;
code);
if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
fprintf(stderr,
- "\nIf you're runnning a guest on an Intel machine without "
+ "\nIf you're running a guest on an Intel machine without "
"unrestricted mode\n"
"support, the failure can be most likely due to the guest "
"entering an invalid\n"
return !(env->cr[0] & CR0_PE_MASK) ||
((env->segs[R_CS].selector & 3) != 3);
}
+
+void kvm_arch_init_irq_routing(KVMState *s)
+{
+ if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
+ /* If kernel can't do irq routing, interrupt source
+ * override 0->2 cannot be set up as required by HPET.
+ * So we have to disable it.
+ */
+ no_hpet = 1;
+ }
+}