4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "qapi/error.h"
17 #include <sys/ioctl.h>
18 #include <sys/utsname.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_para.h>
23 #include "qemu-common.h"
25 #include "sysemu/sysemu.h"
26 #include "sysemu/hw_accel.h"
27 #include "sysemu/kvm_int.h"
30 #include "hyperv-proto.h"
32 #include "exec/gdbstub.h"
33 #include "qemu/host-utils.h"
34 #include "qemu/config-file.h"
35 #include "qemu/error-report.h"
36 #include "hw/i386/pc.h"
37 #include "hw/i386/apic.h"
38 #include "hw/i386/apic_internal.h"
39 #include "hw/i386/apic-msidef.h"
40 #include "hw/i386/intel_iommu.h"
41 #include "hw/i386/x86-iommu.h"
43 #include "exec/ioport.h"
44 #include "hw/pci/pci.h"
45 #include "hw/pci/msi.h"
46 #include "hw/pci/msix.h"
47 #include "migration/blocker.h"
48 #include "exec/memattrs.h"
54 #define DPRINTF(fmt, ...) \
55 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
57 #define DPRINTF(fmt, ...) \
61 #define MSR_KVM_WALL_CLOCK 0x11
62 #define MSR_KVM_SYSTEM_TIME 0x12
64 /* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
65 * 255 kvm_msr_entry structs */
66 #define MSR_BUF_SIZE 4096
68 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
69 KVM_CAP_INFO(SET_TSS_ADDR),
70 KVM_CAP_INFO(EXT_CPUID),
71 KVM_CAP_INFO(MP_STATE),
75 static bool has_msr_star;
76 static bool has_msr_hsave_pa;
77 static bool has_msr_tsc_aux;
78 static bool has_msr_tsc_adjust;
79 static bool has_msr_tsc_deadline;
80 static bool has_msr_feature_control;
81 static bool has_msr_misc_enable;
82 static bool has_msr_smbase;
83 static bool has_msr_bndcfgs;
84 static int lm_capable_kernel;
85 static bool has_msr_hv_hypercall;
86 static bool has_msr_hv_crash;
87 static bool has_msr_hv_reset;
88 static bool has_msr_hv_vpindex;
89 static bool has_msr_hv_runtime;
90 static bool has_msr_hv_synic;
91 static bool has_msr_hv_stimer;
92 static bool has_msr_hv_frequencies;
93 static bool has_msr_xss;
95 static uint32_t has_architectural_pmu_version;
96 static uint32_t num_architectural_pmu_gp_counters;
97 static uint32_t num_architectural_pmu_fixed_counters;
101 static int has_pit_state2;
103 static bool has_msr_mcg_ext_ctl;
105 static struct kvm_cpuid2 *cpuid_cache;
107 int kvm_has_pit_state2(void)
109 return has_pit_state2;
112 bool kvm_has_smm(void)
114 return kvm_check_extension(kvm_state, KVM_CAP_X86_SMM);
117 bool kvm_has_adjust_clock_stable(void)
119 int ret = kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK);
121 return (ret == KVM_CLOCK_TSC_STABLE);
124 bool kvm_allows_irq0_override(void)
126 return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
129 static bool kvm_x2apic_api_set_flags(uint64_t flags)
131 KVMState *s = KVM_STATE(current_machine->accelerator);
133 return !kvm_vm_enable_cap(s, KVM_CAP_X2APIC_API, 0, flags);
136 #define MEMORIZE(fn, _result) \
138 static bool _memorized; \
147 static bool has_x2apic_api;
149 bool kvm_has_x2apic_api(void)
151 return has_x2apic_api;
154 bool kvm_enable_x2apic(void)
157 kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS |
158 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK),
162 static int kvm_get_tsc(CPUState *cs)
164 X86CPU *cpu = X86_CPU(cs);
165 CPUX86State *env = &cpu->env;
167 struct kvm_msrs info;
168 struct kvm_msr_entry entries[1];
172 if (env->tsc_valid) {
176 msr_data.info.nmsrs = 1;
177 msr_data.entries[0].index = MSR_IA32_TSC;
178 env->tsc_valid = !runstate_is_running();
180 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
186 env->tsc = msr_data.entries[0].data;
190 static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg)
195 void kvm_synchronize_all_tsc(void)
201 run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
206 static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
208 struct kvm_cpuid2 *cpuid;
211 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
212 cpuid = g_malloc0(size);
214 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
215 if (r == 0 && cpuid->nent >= max) {
223 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
231 /* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
234 static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
236 struct kvm_cpuid2 *cpuid;
239 if (cpuid_cache != NULL) {
242 while ((cpuid = try_get_cpuid(s, max)) == NULL) {
249 static const struct kvm_para_features {
252 } para_features[] = {
253 { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
254 { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
255 { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
256 { KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
259 static int get_para_features(KVMState *s)
263 for (i = 0; i < ARRAY_SIZE(para_features); i++) {
264 if (kvm_check_extension(s, para_features[i].cap)) {
265 features |= (1 << para_features[i].feature);
272 static bool host_tsx_blacklisted(void)
274 int family, model, stepping;\
275 char vendor[CPUID_VENDOR_SZ + 1];
277 host_vendor_fms(vendor, &family, &model, &stepping);
279 /* Check if we are running on a Haswell host known to have broken TSX */
280 return !strcmp(vendor, CPUID_VENDOR_INTEL) &&
282 ((model == 63 && stepping < 4) ||
283 model == 60 || model == 69 || model == 70);
286 /* Returns the value for a specific register on the cpuid entry
288 static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
308 /* Find matching entry for function/index on kvm_cpuid2 struct
310 static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
315 for (i = 0; i < cpuid->nent; ++i) {
316 if (cpuid->entries[i].function == function &&
317 cpuid->entries[i].index == index) {
318 return &cpuid->entries[i];
325 uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
326 uint32_t index, int reg)
328 struct kvm_cpuid2 *cpuid;
330 uint32_t cpuid_1_edx;
333 cpuid = get_supported_cpuid(s);
335 struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index);
338 ret = cpuid_entry_get_reg(entry, reg);
341 /* Fixups for the data returned by KVM, below */
343 if (function == 1 && reg == R_EDX) {
344 /* KVM before 2.6.30 misreports the following features */
345 ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
346 } else if (function == 1 && reg == R_ECX) {
347 /* We can set the hypervisor flag, even if KVM does not return it on
348 * GET_SUPPORTED_CPUID
350 ret |= CPUID_EXT_HYPERVISOR;
351 /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
352 * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
353 * and the irqchip is in the kernel.
355 if (kvm_irqchip_in_kernel() &&
356 kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
357 ret |= CPUID_EXT_TSC_DEADLINE_TIMER;
360 /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
361 * without the in-kernel irqchip
363 if (!kvm_irqchip_in_kernel()) {
364 ret &= ~CPUID_EXT_X2APIC;
366 } else if (function == 6 && reg == R_EAX) {
367 ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */
368 } else if (function == 7 && index == 0 && reg == R_EBX) {
369 if (host_tsx_blacklisted()) {
370 ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE);
372 } else if (function == 0x80000001 && reg == R_EDX) {
373 /* On Intel, kvm returns cpuid according to the Intel spec,
374 * so add missing bits according to the AMD spec:
376 cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
377 ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
378 } else if (function == KVM_CPUID_FEATURES && reg == R_EAX) {
379 /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't
380 * be enabled without the in-kernel irqchip
382 if (!kvm_irqchip_in_kernel()) {
383 ret &= ~(1U << KVM_FEATURE_PV_UNHALT);
387 /* fallback for older kernels */
388 if ((function == KVM_CPUID_FEATURES) && !found) {
389 ret = get_para_features(s);
395 typedef struct HWPoisonPage {
397 QLIST_ENTRY(HWPoisonPage) list;
400 static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
401 QLIST_HEAD_INITIALIZER(hwpoison_page_list);
403 static void kvm_unpoison_all(void *param)
405 HWPoisonPage *page, *next_page;
407 QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
408 QLIST_REMOVE(page, list);
409 qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
414 static void kvm_hwpoison_page_add(ram_addr_t ram_addr)
418 QLIST_FOREACH(page, &hwpoison_page_list, list) {
419 if (page->ram_addr == ram_addr) {
423 page = g_new(HWPoisonPage, 1);
424 page->ram_addr = ram_addr;
425 QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
428 static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
433 r = kvm_check_extension(s, KVM_CAP_MCE);
436 return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
441 static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
443 CPUState *cs = CPU(cpu);
444 CPUX86State *env = &cpu->env;
445 uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
446 MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
447 uint64_t mcg_status = MCG_STATUS_MCIP;
450 if (code == BUS_MCEERR_AR) {
451 status |= MCI_STATUS_AR | 0x134;
452 mcg_status |= MCG_STATUS_EIPV;
455 mcg_status |= MCG_STATUS_RIPV;
458 flags = cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0;
459 /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the
460 * guest kernel back into env->mcg_ext_ctl.
462 cpu_synchronize_state(cs);
463 if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) {
464 mcg_status |= MCG_STATUS_LMCE;
468 cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
469 (MCM_ADDR_PHYS << 6) | 0xc, flags);
472 static void hardware_memory_error(void)
474 fprintf(stderr, "Hardware memory error!\n");
478 void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
480 X86CPU *cpu = X86_CPU(c);
481 CPUX86State *env = &cpu->env;
485 /* If we get an action required MCE, it has been injected by KVM
486 * while the VM was running. An action optional MCE instead should
487 * be coming from the main thread, which qemu_init_sigbus identifies
488 * as the "early kill" thread.
490 assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
492 if ((env->mcg_cap & MCG_SER_P) && addr) {
493 ram_addr = qemu_ram_addr_from_host(addr);
494 if (ram_addr != RAM_ADDR_INVALID &&
495 kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
496 kvm_hwpoison_page_add(ram_addr);
497 kvm_mce_inject(cpu, paddr, code);
501 fprintf(stderr, "Hardware memory error for memory used by "
502 "QEMU itself instead of guest system!\n");
505 if (code == BUS_MCEERR_AR) {
506 hardware_memory_error();
509 /* Hope we are lucky for AO MCE */
512 static int kvm_inject_mce_oldstyle(X86CPU *cpu)
514 CPUX86State *env = &cpu->env;
516 if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) {
517 unsigned int bank, bank_num = env->mcg_cap & 0xff;
518 struct kvm_x86_mce mce;
520 env->exception_injected = -1;
523 * There must be at least one bank in use if an MCE is pending.
524 * Find it and use its values for the event injection.
526 for (bank = 0; bank < bank_num; bank++) {
527 if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
531 assert(bank < bank_num);
534 mce.status = env->mce_banks[bank * 4 + 1];
535 mce.mcg_status = env->mcg_status;
536 mce.addr = env->mce_banks[bank * 4 + 2];
537 mce.misc = env->mce_banks[bank * 4 + 3];
539 return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce);
544 static void cpu_update_state(void *opaque, int running, RunState state)
546 CPUX86State *env = opaque;
549 env->tsc_valid = false;
553 unsigned long kvm_arch_vcpu_id(CPUState *cs)
555 X86CPU *cpu = X86_CPU(cs);
559 #ifndef KVM_CPUID_SIGNATURE_NEXT
560 #define KVM_CPUID_SIGNATURE_NEXT 0x40000100
563 static bool hyperv_hypercall_available(X86CPU *cpu)
565 return cpu->hyperv_vapic ||
566 (cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY);
569 static bool hyperv_enabled(X86CPU *cpu)
571 CPUState *cs = CPU(cpu);
572 return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 &&
573 (hyperv_hypercall_available(cpu) ||
575 cpu->hyperv_relaxed_timing ||
578 cpu->hyperv_vpindex ||
579 cpu->hyperv_runtime ||
584 static int kvm_arch_set_tsc_khz(CPUState *cs)
586 X86CPU *cpu = X86_CPU(cs);
587 CPUX86State *env = &cpu->env;
594 r = kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL) ?
595 kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) :
598 /* When KVM_SET_TSC_KHZ fails, it's an error only if the current
599 * TSC frequency doesn't match the one we want.
601 int cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
602 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
604 if (cur_freq <= 0 || cur_freq != env->tsc_khz) {
605 warn_report("TSC frequency mismatch between "
606 "VM (%" PRId64 " kHz) and host (%d kHz), "
607 "and TSC scaling unavailable",
608 env->tsc_khz, cur_freq);
616 static bool tsc_is_stable_and_known(CPUX86State *env)
621 return (env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC)
622 || env->user_tsc_khz;
625 static int hyperv_handle_properties(CPUState *cs)
627 X86CPU *cpu = X86_CPU(cs);
628 CPUX86State *env = &cpu->env;
630 if (cpu->hyperv_time &&
631 kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) <= 0) {
632 cpu->hyperv_time = false;
635 if (cpu->hyperv_relaxed_timing) {
636 env->features[FEAT_HYPERV_EAX] |= HV_HYPERCALL_AVAILABLE;
638 if (cpu->hyperv_vapic) {
639 env->features[FEAT_HYPERV_EAX] |= HV_HYPERCALL_AVAILABLE;
640 env->features[FEAT_HYPERV_EAX] |= HV_APIC_ACCESS_AVAILABLE;
642 if (cpu->hyperv_time) {
643 env->features[FEAT_HYPERV_EAX] |= HV_HYPERCALL_AVAILABLE;
644 env->features[FEAT_HYPERV_EAX] |= HV_TIME_REF_COUNT_AVAILABLE;
645 env->features[FEAT_HYPERV_EAX] |= HV_REFERENCE_TSC_AVAILABLE;
647 if (has_msr_hv_frequencies && tsc_is_stable_and_known(env)) {
648 env->features[FEAT_HYPERV_EAX] |= HV_ACCESS_FREQUENCY_MSRS;
649 env->features[FEAT_HYPERV_EDX] |= HV_FREQUENCY_MSRS_AVAILABLE;
652 if (cpu->hyperv_crash && has_msr_hv_crash) {
653 env->features[FEAT_HYPERV_EDX] |= HV_GUEST_CRASH_MSR_AVAILABLE;
655 env->features[FEAT_HYPERV_EDX] |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
656 if (cpu->hyperv_reset && has_msr_hv_reset) {
657 env->features[FEAT_HYPERV_EAX] |= HV_RESET_AVAILABLE;
659 if (cpu->hyperv_vpindex && has_msr_hv_vpindex) {
660 env->features[FEAT_HYPERV_EAX] |= HV_VP_INDEX_AVAILABLE;
662 if (cpu->hyperv_runtime && has_msr_hv_runtime) {
663 env->features[FEAT_HYPERV_EAX] |= HV_VP_RUNTIME_AVAILABLE;
665 if (cpu->hyperv_synic) {
666 if (!has_msr_hv_synic ||
667 kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_SYNIC, 0)) {
668 fprintf(stderr, "Hyper-V SynIC is not supported by kernel\n");
672 env->features[FEAT_HYPERV_EAX] |= HV_SYNIC_AVAILABLE;
674 if (cpu->hyperv_stimer) {
675 if (!has_msr_hv_stimer) {
676 fprintf(stderr, "Hyper-V timers aren't supported by kernel\n");
679 env->features[FEAT_HYPERV_EAX] |= HV_SYNTIMERS_AVAILABLE;
684 static Error *invtsc_mig_blocker;
686 #define KVM_MAX_CPUID_ENTRIES 100
688 int kvm_arch_init_vcpu(CPUState *cs)
691 struct kvm_cpuid2 cpuid;
692 struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
693 } QEMU_PACKED cpuid_data;
694 X86CPU *cpu = X86_CPU(cs);
695 CPUX86State *env = &cpu->env;
696 uint32_t limit, i, j, cpuid_i;
698 struct kvm_cpuid_entry2 *c;
699 uint32_t signature[3];
700 int kvm_base = KVM_CPUID_SIGNATURE;
702 Error *local_err = NULL;
704 memset(&cpuid_data, 0, sizeof(cpuid_data));
708 r = kvm_arch_set_tsc_khz(cs);
713 /* vcpu's TSC frequency is either specified by user, or following
714 * the value used by KVM if the former is not present. In the
715 * latter case, we query it from KVM and record in env->tsc_khz,
716 * so that vcpu's TSC frequency can be migrated later via this field.
719 r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
720 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
727 /* Paravirtualization CPUIDs */
728 if (hyperv_enabled(cpu)) {
729 c = &cpuid_data.entries[cpuid_i++];
730 c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
731 if (!cpu->hyperv_vendor_id) {
732 memcpy(signature, "Microsoft Hv", 12);
734 size_t len = strlen(cpu->hyperv_vendor_id);
737 error_report("hv-vendor-id truncated to 12 characters");
740 memset(signature, 0, 12);
741 memcpy(signature, cpu->hyperv_vendor_id, len);
743 c->eax = HV_CPUID_MIN;
744 c->ebx = signature[0];
745 c->ecx = signature[1];
746 c->edx = signature[2];
748 c = &cpuid_data.entries[cpuid_i++];
749 c->function = HV_CPUID_INTERFACE;
750 memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
751 c->eax = signature[0];
756 c = &cpuid_data.entries[cpuid_i++];
757 c->function = HV_CPUID_VERSION;
761 c = &cpuid_data.entries[cpuid_i++];
762 c->function = HV_CPUID_FEATURES;
763 r = hyperv_handle_properties(cs);
767 c->eax = env->features[FEAT_HYPERV_EAX];
768 c->ebx = env->features[FEAT_HYPERV_EBX];
769 c->edx = env->features[FEAT_HYPERV_EDX];
771 c = &cpuid_data.entries[cpuid_i++];
772 c->function = HV_CPUID_ENLIGHTMENT_INFO;
773 if (cpu->hyperv_relaxed_timing) {
774 c->eax |= HV_RELAXED_TIMING_RECOMMENDED;
776 if (cpu->hyperv_vapic) {
777 c->eax |= HV_APIC_ACCESS_RECOMMENDED;
779 c->ebx = cpu->hyperv_spinlock_attempts;
781 c = &cpuid_data.entries[cpuid_i++];
782 c->function = HV_CPUID_IMPLEMENT_LIMITS;
784 c->eax = cpu->hv_max_vps;
787 kvm_base = KVM_CPUID_SIGNATURE_NEXT;
788 has_msr_hv_hypercall = true;
791 if (cpu->expose_kvm) {
792 memcpy(signature, "KVMKVMKVM\0\0\0", 12);
793 c = &cpuid_data.entries[cpuid_i++];
794 c->function = KVM_CPUID_SIGNATURE | kvm_base;
795 c->eax = KVM_CPUID_FEATURES | kvm_base;
796 c->ebx = signature[0];
797 c->ecx = signature[1];
798 c->edx = signature[2];
800 c = &cpuid_data.entries[cpuid_i++];
801 c->function = KVM_CPUID_FEATURES | kvm_base;
802 c->eax = env->features[FEAT_KVM];
805 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
807 for (i = 0; i <= limit; i++) {
808 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
809 fprintf(stderr, "unsupported level value: 0x%x\n", limit);
812 c = &cpuid_data.entries[cpuid_i++];
816 /* Keep reading function 2 till all the input is received */
820 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
821 KVM_CPUID_FLAG_STATE_READ_NEXT;
822 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
823 times = c->eax & 0xff;
825 for (j = 1; j < times; ++j) {
826 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
827 fprintf(stderr, "cpuid_data is full, no space for "
828 "cpuid(eax:2):eax & 0xf = 0x%x\n", times);
831 c = &cpuid_data.entries[cpuid_i++];
833 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
834 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
842 if (i == 0xd && j == 64) {
846 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
848 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
850 if (i == 4 && c->eax == 0) {
853 if (i == 0xb && !(c->ecx & 0xff00)) {
856 if (i == 0xd && c->eax == 0) {
859 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
860 fprintf(stderr, "cpuid_data is full, no space for "
861 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
864 c = &cpuid_data.entries[cpuid_i++];
870 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
878 cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx);
880 has_architectural_pmu_version = eax & 0xff;
881 if (has_architectural_pmu_version > 0) {
882 num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8;
884 /* Shouldn't be more than 32, since that's the number of bits
885 * available in EBX to tell us _which_ counters are available.
888 if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
889 num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
892 if (has_architectural_pmu_version > 1) {
893 num_architectural_pmu_fixed_counters = edx & 0x1f;
895 if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
896 num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
902 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
904 for (i = 0x80000000; i <= limit; i++) {
905 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
906 fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
909 c = &cpuid_data.entries[cpuid_i++];
913 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
916 /* Call Centaur's CPUID instructions they are supported. */
917 if (env->cpuid_xlevel2 > 0) {
918 cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
920 for (i = 0xC0000000; i <= limit; i++) {
921 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
922 fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
925 c = &cpuid_data.entries[cpuid_i++];
929 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
933 cpuid_data.cpuid.nent = cpuid_i;
935 if (((env->cpuid_version >> 8)&0xF) >= 6
936 && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
937 (CPUID_MCE | CPUID_MCA)
938 && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) {
939 uint64_t mcg_cap, unsupported_caps;
943 ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
945 fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
949 if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) {
950 error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
951 (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks);
955 unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK);
956 if (unsupported_caps) {
957 if (unsupported_caps & MCG_LMCE_P) {
958 error_report("kvm: LMCE not supported");
961 warn_report("Unsupported MCG_CAP bits: 0x%" PRIx64,
965 env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK;
966 ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap);
968 fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
973 qemu_add_vm_change_state_handler(cpu_update_state, env);
975 c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0);
977 has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) ||
978 !!(c->ecx & CPUID_EXT_SMX);
981 if (env->mcg_cap & MCG_LMCE_P) {
982 has_msr_mcg_ext_ctl = has_msr_feature_control = true;
985 if (!env->user_tsc_khz) {
986 if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) &&
987 invtsc_mig_blocker == NULL) {
989 error_setg(&invtsc_mig_blocker,
990 "State blocked by non-migratable CPU device"
992 r = migrate_add_blocker(invtsc_mig_blocker, &local_err);
994 error_report_err(local_err);
995 error_free(invtsc_mig_blocker);
999 vmstate_x86_cpu.unmigratable = 1;
1003 if (cpu->vmware_cpuid_freq
1004 /* Guests depend on 0x40000000 to detect this feature, so only expose
1005 * it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */
1007 && kvm_base == KVM_CPUID_SIGNATURE
1008 /* TSC clock must be stable and known for this feature. */
1009 && tsc_is_stable_and_known(env)) {
1011 c = &cpuid_data.entries[cpuid_i++];
1012 c->function = KVM_CPUID_SIGNATURE | 0x10;
1013 c->eax = env->tsc_khz;
1014 /* LAPIC resolution of 1ns (freq: 1GHz) is hardcoded in KVM's
1015 * APIC_BUS_CYCLE_NS */
1017 c->ecx = c->edx = 0;
1019 c = cpuid_find_entry(&cpuid_data.cpuid, kvm_base, 0);
1020 c->eax = MAX(c->eax, KVM_CPUID_SIGNATURE | 0x10);
1023 cpuid_data.cpuid.nent = cpuid_i;
1025 cpuid_data.cpuid.padding = 0;
1026 r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
1032 env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
1034 cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);
1036 if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
1037 has_msr_tsc_aux = false;
1043 migrate_del_blocker(invtsc_mig_blocker);
1047 void kvm_arch_reset_vcpu(X86CPU *cpu)
1049 CPUX86State *env = &cpu->env;
1052 if (kvm_irqchip_in_kernel()) {
1053 env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
1054 KVM_MP_STATE_UNINITIALIZED;
1056 env->mp_state = KVM_MP_STATE_RUNNABLE;
1059 if (cpu->hyperv_synic) {
1061 for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
1062 env->msr_hv_synic_sint[i] = HV_SINT_MASKED;
1067 void kvm_arch_do_init_vcpu(X86CPU *cpu)
1069 CPUX86State *env = &cpu->env;
1071 /* APs get directly into wait-for-SIPI state. */
1072 if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) {
1073 env->mp_state = KVM_MP_STATE_INIT_RECEIVED;
1077 static int kvm_get_supported_msrs(KVMState *s)
1079 static int kvm_supported_msrs;
1083 if (kvm_supported_msrs == 0) {
1084 struct kvm_msr_list msr_list, *kvm_msr_list;
1086 kvm_supported_msrs = -1;
1088 /* Obtain MSR list from KVM. These are the MSRs that we must
1091 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
1092 if (ret < 0 && ret != -E2BIG) {
1095 /* Old kernel modules had a bug and could write beyond the provided
1096 memory. Allocate at least a safe amount of 1K. */
1097 kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
1099 sizeof(msr_list.indices[0])));
1101 kvm_msr_list->nmsrs = msr_list.nmsrs;
1102 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
1106 for (i = 0; i < kvm_msr_list->nmsrs; i++) {
1107 switch (kvm_msr_list->indices[i]) {
1109 has_msr_star = true;
1111 case MSR_VM_HSAVE_PA:
1112 has_msr_hsave_pa = true;
1115 has_msr_tsc_aux = true;
1117 case MSR_TSC_ADJUST:
1118 has_msr_tsc_adjust = true;
1120 case MSR_IA32_TSCDEADLINE:
1121 has_msr_tsc_deadline = true;
1123 case MSR_IA32_SMBASE:
1124 has_msr_smbase = true;
1126 case MSR_IA32_MISC_ENABLE:
1127 has_msr_misc_enable = true;
1129 case MSR_IA32_BNDCFGS:
1130 has_msr_bndcfgs = true;
1135 case HV_X64_MSR_CRASH_CTL:
1136 has_msr_hv_crash = true;
1138 case HV_X64_MSR_RESET:
1139 has_msr_hv_reset = true;
1141 case HV_X64_MSR_VP_INDEX:
1142 has_msr_hv_vpindex = true;
1144 case HV_X64_MSR_VP_RUNTIME:
1145 has_msr_hv_runtime = true;
1147 case HV_X64_MSR_SCONTROL:
1148 has_msr_hv_synic = true;
1150 case HV_X64_MSR_STIMER0_CONFIG:
1151 has_msr_hv_stimer = true;
1153 case HV_X64_MSR_TSC_FREQUENCY:
1154 has_msr_hv_frequencies = true;
1160 g_free(kvm_msr_list);
1166 static Notifier smram_machine_done;
1167 static KVMMemoryListener smram_listener;
1168 static AddressSpace smram_address_space;
1169 static MemoryRegion smram_as_root;
1170 static MemoryRegion smram_as_mem;
1172 static void register_smram_listener(Notifier *n, void *unused)
1174 MemoryRegion *smram =
1175 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
1177 /* Outer container... */
1178 memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull);
1179 memory_region_set_enabled(&smram_as_root, true);
1181 /* ... with two regions inside: normal system memory with low
1184 memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram",
1185 get_system_memory(), 0, ~0ull);
1186 memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0);
1187 memory_region_set_enabled(&smram_as_mem, true);
1190 /* ... SMRAM with higher priority */
1191 memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10);
1192 memory_region_set_enabled(smram, true);
1195 address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM");
1196 kvm_memory_listener_register(kvm_state, &smram_listener,
1197 &smram_address_space, 1);
1200 int kvm_arch_init(MachineState *ms, KVMState *s)
1202 uint64_t identity_base = 0xfffbc000;
1203 uint64_t shadow_mem;
1205 struct utsname utsname;
1207 #ifdef KVM_CAP_XSAVE
1208 has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
1212 has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
1215 #ifdef KVM_CAP_PIT_STATE2
1216 has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
1219 ret = kvm_get_supported_msrs(s);
1225 lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
1228 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
1229 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
1230 * Since these must be part of guest physical memory, we need to allocate
1231 * them, both by setting their start addresses in the kernel and by
1232 * creating a corresponding e820 entry. We need 4 pages before the BIOS.
1234 * Older KVM versions may not support setting the identity map base. In
1235 * that case we need to stick with the default, i.e. a 256K maximum BIOS
1238 if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
1239 /* Allows up to 16M BIOSes. */
1240 identity_base = 0xfeffc000;
1242 ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
1248 /* Set TSS base one page after EPT identity map. */
1249 ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
1254 /* Tell fw_cfg to notify the BIOS to reserve the range. */
1255 ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
1257 fprintf(stderr, "e820_add_entry() table is full\n");
1260 qemu_register_reset(kvm_unpoison_all, NULL);
1262 shadow_mem = machine_kvm_shadow_mem(ms);
1263 if (shadow_mem != -1) {
1265 ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
1271 if (kvm_check_extension(s, KVM_CAP_X86_SMM) &&
1272 object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE) &&
1273 pc_machine_is_smm_enabled(PC_MACHINE(ms))) {
1274 smram_machine_done.notify = register_smram_listener;
1275 qemu_add_machine_init_done_notifier(&smram_machine_done);
1280 static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
1282 lhs->selector = rhs->selector;
1283 lhs->base = rhs->base;
1284 lhs->limit = rhs->limit;
1296 static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
1298 unsigned flags = rhs->flags;
1299 lhs->selector = rhs->selector;
1300 lhs->base = rhs->base;
1301 lhs->limit = rhs->limit;
1302 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
1303 lhs->present = (flags & DESC_P_MASK) != 0;
1304 lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
1305 lhs->db = (flags >> DESC_B_SHIFT) & 1;
1306 lhs->s = (flags & DESC_S_MASK) != 0;
1307 lhs->l = (flags >> DESC_L_SHIFT) & 1;
1308 lhs->g = (flags & DESC_G_MASK) != 0;
1309 lhs->avl = (flags & DESC_AVL_MASK) != 0;
1310 lhs->unusable = !lhs->present;
1314 static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
1316 lhs->selector = rhs->selector;
1317 lhs->base = rhs->base;
1318 lhs->limit = rhs->limit;
1319 lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
1320 ((rhs->present && !rhs->unusable) * DESC_P_MASK) |
1321 (rhs->dpl << DESC_DPL_SHIFT) |
1322 (rhs->db << DESC_B_SHIFT) |
1323 (rhs->s * DESC_S_MASK) |
1324 (rhs->l << DESC_L_SHIFT) |
1325 (rhs->g * DESC_G_MASK) |
1326 (rhs->avl * DESC_AVL_MASK);
1329 static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
1332 *kvm_reg = *qemu_reg;
1334 *qemu_reg = *kvm_reg;
1338 static int kvm_getput_regs(X86CPU *cpu, int set)
1340 CPUX86State *env = &cpu->env;
1341 struct kvm_regs regs;
1345 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, ®s);
1351 kvm_getput_reg(®s.rax, &env->regs[R_EAX], set);
1352 kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set);
1353 kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set);
1354 kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set);
1355 kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set);
1356 kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set);
1357 kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set);
1358 kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set);
1359 #ifdef TARGET_X86_64
1360 kvm_getput_reg(®s.r8, &env->regs[8], set);
1361 kvm_getput_reg(®s.r9, &env->regs[9], set);
1362 kvm_getput_reg(®s.r10, &env->regs[10], set);
1363 kvm_getput_reg(®s.r11, &env->regs[11], set);
1364 kvm_getput_reg(®s.r12, &env->regs[12], set);
1365 kvm_getput_reg(®s.r13, &env->regs[13], set);
1366 kvm_getput_reg(®s.r14, &env->regs[14], set);
1367 kvm_getput_reg(®s.r15, &env->regs[15], set);
1370 kvm_getput_reg(®s.rflags, &env->eflags, set);
1371 kvm_getput_reg(®s.rip, &env->eip, set);
1374 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, ®s);
1380 static int kvm_put_fpu(X86CPU *cpu)
1382 CPUX86State *env = &cpu->env;
1386 memset(&fpu, 0, sizeof fpu);
1387 fpu.fsw = env->fpus & ~(7 << 11);
1388 fpu.fsw |= (env->fpstt & 7) << 11;
1389 fpu.fcw = env->fpuc;
1390 fpu.last_opcode = env->fpop;
1391 fpu.last_ip = env->fpip;
1392 fpu.last_dp = env->fpdp;
1393 for (i = 0; i < 8; ++i) {
1394 fpu.ftwx |= (!env->fptags[i]) << i;
1396 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
1397 for (i = 0; i < CPU_NB_REGS; i++) {
1398 stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0));
1399 stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1));
1401 fpu.mxcsr = env->mxcsr;
1403 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
1406 #define XSAVE_FCW_FSW 0
1407 #define XSAVE_FTW_FOP 1
1408 #define XSAVE_CWD_RIP 2
1409 #define XSAVE_CWD_RDP 4
1410 #define XSAVE_MXCSR 6
1411 #define XSAVE_ST_SPACE 8
1412 #define XSAVE_XMM_SPACE 40
1413 #define XSAVE_XSTATE_BV 128
1414 #define XSAVE_YMMH_SPACE 144
1415 #define XSAVE_BNDREGS 240
1416 #define XSAVE_BNDCSR 256
1417 #define XSAVE_OPMASK 272
1418 #define XSAVE_ZMM_Hi256 288
1419 #define XSAVE_Hi16_ZMM 416
1420 #define XSAVE_PKRU 672
1422 #define XSAVE_BYTE_OFFSET(word_offset) \
1423 ((word_offset) * sizeof(((struct kvm_xsave *)0)->region[0]))
1425 #define ASSERT_OFFSET(word_offset, field) \
1426 QEMU_BUILD_BUG_ON(XSAVE_BYTE_OFFSET(word_offset) != \
1427 offsetof(X86XSaveArea, field))
1429 ASSERT_OFFSET(XSAVE_FCW_FSW, legacy.fcw);
1430 ASSERT_OFFSET(XSAVE_FTW_FOP, legacy.ftw);
1431 ASSERT_OFFSET(XSAVE_CWD_RIP, legacy.fpip);
1432 ASSERT_OFFSET(XSAVE_CWD_RDP, legacy.fpdp);
1433 ASSERT_OFFSET(XSAVE_MXCSR, legacy.mxcsr);
1434 ASSERT_OFFSET(XSAVE_ST_SPACE, legacy.fpregs);
1435 ASSERT_OFFSET(XSAVE_XMM_SPACE, legacy.xmm_regs);
1436 ASSERT_OFFSET(XSAVE_XSTATE_BV, header.xstate_bv);
1437 ASSERT_OFFSET(XSAVE_YMMH_SPACE, avx_state);
1438 ASSERT_OFFSET(XSAVE_BNDREGS, bndreg_state);
1439 ASSERT_OFFSET(XSAVE_BNDCSR, bndcsr_state);
1440 ASSERT_OFFSET(XSAVE_OPMASK, opmask_state);
1441 ASSERT_OFFSET(XSAVE_ZMM_Hi256, zmm_hi256_state);
1442 ASSERT_OFFSET(XSAVE_Hi16_ZMM, hi16_zmm_state);
1443 ASSERT_OFFSET(XSAVE_PKRU, pkru_state);
1445 static int kvm_put_xsave(X86CPU *cpu)
1447 CPUX86State *env = &cpu->env;
1448 X86XSaveArea *xsave = env->kvm_xsave_buf;
1451 return kvm_put_fpu(cpu);
1453 x86_cpu_xsave_all_areas(cpu, xsave);
1455 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
1458 static int kvm_put_xcrs(X86CPU *cpu)
1460 CPUX86State *env = &cpu->env;
1461 struct kvm_xcrs xcrs = {};
1469 xcrs.xcrs[0].xcr = 0;
1470 xcrs.xcrs[0].value = env->xcr0;
1471 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs);
1474 static int kvm_put_sregs(X86CPU *cpu)
1476 CPUX86State *env = &cpu->env;
1477 struct kvm_sregs sregs;
1479 memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
1480 if (env->interrupt_injected >= 0) {
1481 sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
1482 (uint64_t)1 << (env->interrupt_injected % 64);
1485 if ((env->eflags & VM_MASK)) {
1486 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
1487 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
1488 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
1489 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
1490 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
1491 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
1493 set_seg(&sregs.cs, &env->segs[R_CS]);
1494 set_seg(&sregs.ds, &env->segs[R_DS]);
1495 set_seg(&sregs.es, &env->segs[R_ES]);
1496 set_seg(&sregs.fs, &env->segs[R_FS]);
1497 set_seg(&sregs.gs, &env->segs[R_GS]);
1498 set_seg(&sregs.ss, &env->segs[R_SS]);
1501 set_seg(&sregs.tr, &env->tr);
1502 set_seg(&sregs.ldt, &env->ldt);
1504 sregs.idt.limit = env->idt.limit;
1505 sregs.idt.base = env->idt.base;
1506 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
1507 sregs.gdt.limit = env->gdt.limit;
1508 sregs.gdt.base = env->gdt.base;
1509 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
1511 sregs.cr0 = env->cr[0];
1512 sregs.cr2 = env->cr[2];
1513 sregs.cr3 = env->cr[3];
1514 sregs.cr4 = env->cr[4];
1516 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
1517 sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
1519 sregs.efer = env->efer;
1521 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
1524 static void kvm_msr_buf_reset(X86CPU *cpu)
1526 memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE);
1529 static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value)
1531 struct kvm_msrs *msrs = cpu->kvm_msr_buf;
1532 void *limit = ((void *)msrs) + MSR_BUF_SIZE;
1533 struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs];
1535 assert((void *)(entry + 1) <= limit);
1537 entry->index = index;
1538 entry->reserved = 0;
1539 entry->data = value;
1543 static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value)
1545 kvm_msr_buf_reset(cpu);
1546 kvm_msr_entry_add(cpu, index, value);
1548 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
1551 void kvm_put_apicbase(X86CPU *cpu, uint64_t value)
1555 ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value);
1559 static int kvm_put_tscdeadline_msr(X86CPU *cpu)
1561 CPUX86State *env = &cpu->env;
1564 if (!has_msr_tsc_deadline) {
1568 ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline);
1578 * Provide a separate write service for the feature control MSR in order to
1579 * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
1580 * before writing any other state because forcibly leaving nested mode
1581 * invalidates the VCPU state.
1583 static int kvm_put_msr_feature_control(X86CPU *cpu)
1587 if (!has_msr_feature_control) {
1591 ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL,
1592 cpu->env.msr_ia32_feature_control);
1601 static int kvm_put_msrs(X86CPU *cpu, int level)
1603 CPUX86State *env = &cpu->env;
1607 kvm_msr_buf_reset(cpu);
1609 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs);
1610 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
1611 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
1612 kvm_msr_entry_add(cpu, MSR_PAT, env->pat);
1614 kvm_msr_entry_add(cpu, MSR_STAR, env->star);
1616 if (has_msr_hsave_pa) {
1617 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave);
1619 if (has_msr_tsc_aux) {
1620 kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux);
1622 if (has_msr_tsc_adjust) {
1623 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust);
1625 if (has_msr_misc_enable) {
1626 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE,
1627 env->msr_ia32_misc_enable);
1629 if (has_msr_smbase) {
1630 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase);
1632 if (has_msr_bndcfgs) {
1633 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs);
1636 kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss);
1638 #ifdef TARGET_X86_64
1639 if (lm_capable_kernel) {
1640 kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
1641 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase);
1642 kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask);
1643 kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar);
1647 * The following MSRs have side effects on the guest or are too heavy
1648 * for normal writeback. Limit them to reset or full state updates.
1650 if (level >= KVM_PUT_RESET_STATE) {
1651 kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
1652 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
1653 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
1654 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
1655 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
1657 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
1658 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
1660 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
1661 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
1663 if (has_architectural_pmu_version > 0) {
1664 if (has_architectural_pmu_version > 1) {
1665 /* Stop the counter. */
1666 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
1667 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
1670 /* Set the counter values. */
1671 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
1672 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
1673 env->msr_fixed_counters[i]);
1675 for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
1676 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
1677 env->msr_gp_counters[i]);
1678 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
1679 env->msr_gp_evtsel[i]);
1681 if (has_architectural_pmu_version > 1) {
1682 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
1683 env->msr_global_status);
1684 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
1685 env->msr_global_ovf_ctrl);
1687 /* Now start the PMU. */
1688 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL,
1689 env->msr_fixed_ctr_ctrl);
1690 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
1691 env->msr_global_ctrl);
1695 * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add,
1696 * only sync them to KVM on the first cpu
1698 if (current_cpu == first_cpu) {
1699 if (has_msr_hv_hypercall) {
1700 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
1701 env->msr_hv_guest_os_id);
1702 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
1703 env->msr_hv_hypercall);
1705 if (cpu->hyperv_time) {
1706 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC,
1710 if (cpu->hyperv_vapic) {
1711 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
1714 if (has_msr_hv_crash) {
1717 for (j = 0; j < HV_CRASH_PARAMS; j++)
1718 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j,
1719 env->msr_hv_crash_params[j]);
1721 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_NOTIFY);
1723 if (has_msr_hv_runtime) {
1724 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
1726 if (cpu->hyperv_synic) {
1729 kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, HV_SYNIC_VERSION);
1731 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL,
1732 env->msr_hv_synic_control);
1733 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP,
1734 env->msr_hv_synic_evt_page);
1735 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP,
1736 env->msr_hv_synic_msg_page);
1738 for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
1739 kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j,
1740 env->msr_hv_synic_sint[j]);
1743 if (has_msr_hv_stimer) {
1746 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) {
1747 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2,
1748 env->msr_hv_stimer_config[j]);
1751 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) {
1752 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2,
1753 env->msr_hv_stimer_count[j]);
1756 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
1757 uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits);
1759 kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype);
1760 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
1761 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
1762 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
1763 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
1764 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
1765 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
1766 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
1767 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
1768 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
1769 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
1770 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
1771 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
1772 /* The CPU GPs if we write to a bit above the physical limit of
1773 * the host CPU (and KVM emulates that)
1775 uint64_t mask = env->mtrr_var[i].mask;
1778 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i),
1779 env->mtrr_var[i].base);
1780 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask);
1784 /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
1785 * kvm_put_msr_feature_control. */
1790 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status);
1791 kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl);
1792 if (has_msr_mcg_ext_ctl) {
1793 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl);
1795 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
1796 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]);
1800 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
1805 if (ret < cpu->kvm_msr_buf->nmsrs) {
1806 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
1807 error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64,
1808 (uint32_t)e->index, (uint64_t)e->data);
1811 assert(ret == cpu->kvm_msr_buf->nmsrs);
1816 static int kvm_get_fpu(X86CPU *cpu)
1818 CPUX86State *env = &cpu->env;
1822 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu);
1827 env->fpstt = (fpu.fsw >> 11) & 7;
1828 env->fpus = fpu.fsw;
1829 env->fpuc = fpu.fcw;
1830 env->fpop = fpu.last_opcode;
1831 env->fpip = fpu.last_ip;
1832 env->fpdp = fpu.last_dp;
1833 for (i = 0; i < 8; ++i) {
1834 env->fptags[i] = !((fpu.ftwx >> i) & 1);
1836 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
1837 for (i = 0; i < CPU_NB_REGS; i++) {
1838 env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
1839 env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
1841 env->mxcsr = fpu.mxcsr;
1846 static int kvm_get_xsave(X86CPU *cpu)
1848 CPUX86State *env = &cpu->env;
1849 X86XSaveArea *xsave = env->kvm_xsave_buf;
1853 return kvm_get_fpu(cpu);
1856 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave);
1860 x86_cpu_xrstor_all_areas(cpu, xsave);
1865 static int kvm_get_xcrs(X86CPU *cpu)
1867 CPUX86State *env = &cpu->env;
1869 struct kvm_xcrs xcrs;
1875 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs);
1880 for (i = 0; i < xcrs.nr_xcrs; i++) {
1881 /* Only support xcr0 now */
1882 if (xcrs.xcrs[i].xcr == 0) {
1883 env->xcr0 = xcrs.xcrs[i].value;
1890 static int kvm_get_sregs(X86CPU *cpu)
1892 CPUX86State *env = &cpu->env;
1893 struct kvm_sregs sregs;
1896 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
1901 /* There can only be one pending IRQ set in the bitmap at a time, so try
1902 to find it and save its number instead (-1 for none). */
1903 env->interrupt_injected = -1;
1904 for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
1905 if (sregs.interrupt_bitmap[i]) {
1906 bit = ctz64(sregs.interrupt_bitmap[i]);
1907 env->interrupt_injected = i * 64 + bit;
1912 get_seg(&env->segs[R_CS], &sregs.cs);
1913 get_seg(&env->segs[R_DS], &sregs.ds);
1914 get_seg(&env->segs[R_ES], &sregs.es);
1915 get_seg(&env->segs[R_FS], &sregs.fs);
1916 get_seg(&env->segs[R_GS], &sregs.gs);
1917 get_seg(&env->segs[R_SS], &sregs.ss);
1919 get_seg(&env->tr, &sregs.tr);
1920 get_seg(&env->ldt, &sregs.ldt);
1922 env->idt.limit = sregs.idt.limit;
1923 env->idt.base = sregs.idt.base;
1924 env->gdt.limit = sregs.gdt.limit;
1925 env->gdt.base = sregs.gdt.base;
1927 env->cr[0] = sregs.cr0;
1928 env->cr[2] = sregs.cr2;
1929 env->cr[3] = sregs.cr3;
1930 env->cr[4] = sregs.cr4;
1932 env->efer = sregs.efer;
1934 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
1935 x86_update_hflags(env);
1940 static int kvm_get_msrs(X86CPU *cpu)
1942 CPUX86State *env = &cpu->env;
1943 struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
1945 uint64_t mtrr_top_bits;
1947 kvm_msr_buf_reset(cpu);
1949 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0);
1950 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0);
1951 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0);
1952 kvm_msr_entry_add(cpu, MSR_PAT, 0);
1954 kvm_msr_entry_add(cpu, MSR_STAR, 0);
1956 if (has_msr_hsave_pa) {
1957 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0);
1959 if (has_msr_tsc_aux) {
1960 kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0);
1962 if (has_msr_tsc_adjust) {
1963 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0);
1965 if (has_msr_tsc_deadline) {
1966 kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0);
1968 if (has_msr_misc_enable) {
1969 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0);
1971 if (has_msr_smbase) {
1972 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0);
1974 if (has_msr_feature_control) {
1975 kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0);
1977 if (has_msr_bndcfgs) {
1978 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0);
1981 kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0);
1985 if (!env->tsc_valid) {
1986 kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
1987 env->tsc_valid = !runstate_is_running();
1990 #ifdef TARGET_X86_64
1991 if (lm_capable_kernel) {
1992 kvm_msr_entry_add(cpu, MSR_CSTAR, 0);
1993 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0);
1994 kvm_msr_entry_add(cpu, MSR_FMASK, 0);
1995 kvm_msr_entry_add(cpu, MSR_LSTAR, 0);
1998 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
1999 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
2000 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
2001 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
2003 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
2004 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
2006 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
2007 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
2009 if (has_architectural_pmu_version > 0) {
2010 if (has_architectural_pmu_version > 1) {
2011 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
2012 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
2013 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
2014 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
2016 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
2017 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
2019 for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
2020 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
2021 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
2026 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0);
2027 kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0);
2028 if (has_msr_mcg_ext_ctl) {
2029 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0);
2031 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
2032 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0);
2036 if (has_msr_hv_hypercall) {
2037 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0);
2038 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0);
2040 if (cpu->hyperv_vapic) {
2041 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0);
2043 if (cpu->hyperv_time) {
2044 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0);
2046 if (has_msr_hv_crash) {
2049 for (j = 0; j < HV_CRASH_PARAMS; j++) {
2050 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0);
2053 if (has_msr_hv_runtime) {
2054 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0);
2056 if (cpu->hyperv_synic) {
2059 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0);
2060 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0);
2061 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0);
2062 for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
2063 kvm_msr_entry_add(cpu, msr, 0);
2066 if (has_msr_hv_stimer) {
2069 for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT;
2071 kvm_msr_entry_add(cpu, msr, 0);
2074 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
2075 kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0);
2076 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0);
2077 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0);
2078 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0);
2079 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0);
2080 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0);
2081 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0);
2082 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0);
2083 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0);
2084 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0);
2085 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0);
2086 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0);
2087 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
2088 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0);
2089 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0);
2093 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
2098 if (ret < cpu->kvm_msr_buf->nmsrs) {
2099 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
2100 error_report("error: failed to get MSR 0x%" PRIx32,
2101 (uint32_t)e->index);
2104 assert(ret == cpu->kvm_msr_buf->nmsrs);
2106 * MTRR masks: Each mask consists of 5 parts
2107 * a 10..0: must be zero
2109 * c n-1.12: actual mask bits
2110 * d 51..n: reserved must be zero
2111 * e 63.52: reserved must be zero
2113 * 'n' is the number of physical bits supported by the CPU and is
2114 * apparently always <= 52. We know our 'n' but don't know what
2115 * the destinations 'n' is; it might be smaller, in which case
2116 * it masks (c) on loading. It might be larger, in which case
2117 * we fill 'd' so that d..c is consistent irrespetive of the 'n'
2118 * we're migrating to.
2121 if (cpu->fill_mtrr_mask) {
2122 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52);
2123 assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS);
2124 mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits);
2129 for (i = 0; i < ret; i++) {
2130 uint32_t index = msrs[i].index;
2132 case MSR_IA32_SYSENTER_CS:
2133 env->sysenter_cs = msrs[i].data;
2135 case MSR_IA32_SYSENTER_ESP:
2136 env->sysenter_esp = msrs[i].data;
2138 case MSR_IA32_SYSENTER_EIP:
2139 env->sysenter_eip = msrs[i].data;
2142 env->pat = msrs[i].data;
2145 env->star = msrs[i].data;
2147 #ifdef TARGET_X86_64
2149 env->cstar = msrs[i].data;
2151 case MSR_KERNELGSBASE:
2152 env->kernelgsbase = msrs[i].data;
2155 env->fmask = msrs[i].data;
2158 env->lstar = msrs[i].data;
2162 env->tsc = msrs[i].data;
2165 env->tsc_aux = msrs[i].data;
2167 case MSR_TSC_ADJUST:
2168 env->tsc_adjust = msrs[i].data;
2170 case MSR_IA32_TSCDEADLINE:
2171 env->tsc_deadline = msrs[i].data;
2173 case MSR_VM_HSAVE_PA:
2174 env->vm_hsave = msrs[i].data;
2176 case MSR_KVM_SYSTEM_TIME:
2177 env->system_time_msr = msrs[i].data;
2179 case MSR_KVM_WALL_CLOCK:
2180 env->wall_clock_msr = msrs[i].data;
2182 case MSR_MCG_STATUS:
2183 env->mcg_status = msrs[i].data;
2186 env->mcg_ctl = msrs[i].data;
2188 case MSR_MCG_EXT_CTL:
2189 env->mcg_ext_ctl = msrs[i].data;
2191 case MSR_IA32_MISC_ENABLE:
2192 env->msr_ia32_misc_enable = msrs[i].data;
2194 case MSR_IA32_SMBASE:
2195 env->smbase = msrs[i].data;
2197 case MSR_IA32_FEATURE_CONTROL:
2198 env->msr_ia32_feature_control = msrs[i].data;
2200 case MSR_IA32_BNDCFGS:
2201 env->msr_bndcfgs = msrs[i].data;
2204 env->xss = msrs[i].data;
2207 if (msrs[i].index >= MSR_MC0_CTL &&
2208 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
2209 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
2212 case MSR_KVM_ASYNC_PF_EN:
2213 env->async_pf_en_msr = msrs[i].data;
2215 case MSR_KVM_PV_EOI_EN:
2216 env->pv_eoi_en_msr = msrs[i].data;
2218 case MSR_KVM_STEAL_TIME:
2219 env->steal_time_msr = msrs[i].data;
2221 case MSR_CORE_PERF_FIXED_CTR_CTRL:
2222 env->msr_fixed_ctr_ctrl = msrs[i].data;
2224 case MSR_CORE_PERF_GLOBAL_CTRL:
2225 env->msr_global_ctrl = msrs[i].data;
2227 case MSR_CORE_PERF_GLOBAL_STATUS:
2228 env->msr_global_status = msrs[i].data;
2230 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
2231 env->msr_global_ovf_ctrl = msrs[i].data;
2233 case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1:
2234 env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data;
2236 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1:
2237 env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data;
2239 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
2240 env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
2242 case HV_X64_MSR_HYPERCALL:
2243 env->msr_hv_hypercall = msrs[i].data;
2245 case HV_X64_MSR_GUEST_OS_ID:
2246 env->msr_hv_guest_os_id = msrs[i].data;
2248 case HV_X64_MSR_APIC_ASSIST_PAGE:
2249 env->msr_hv_vapic = msrs[i].data;
2251 case HV_X64_MSR_REFERENCE_TSC:
2252 env->msr_hv_tsc = msrs[i].data;
2254 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
2255 env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data;
2257 case HV_X64_MSR_VP_RUNTIME:
2258 env->msr_hv_runtime = msrs[i].data;
2260 case HV_X64_MSR_SCONTROL:
2261 env->msr_hv_synic_control = msrs[i].data;
2263 case HV_X64_MSR_SIEFP:
2264 env->msr_hv_synic_evt_page = msrs[i].data;
2266 case HV_X64_MSR_SIMP:
2267 env->msr_hv_synic_msg_page = msrs[i].data;
2269 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
2270 env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
2272 case HV_X64_MSR_STIMER0_CONFIG:
2273 case HV_X64_MSR_STIMER1_CONFIG:
2274 case HV_X64_MSR_STIMER2_CONFIG:
2275 case HV_X64_MSR_STIMER3_CONFIG:
2276 env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] =
2279 case HV_X64_MSR_STIMER0_COUNT:
2280 case HV_X64_MSR_STIMER1_COUNT:
2281 case HV_X64_MSR_STIMER2_COUNT:
2282 case HV_X64_MSR_STIMER3_COUNT:
2283 env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] =
2286 case MSR_MTRRdefType:
2287 env->mtrr_deftype = msrs[i].data;
2289 case MSR_MTRRfix64K_00000:
2290 env->mtrr_fixed[0] = msrs[i].data;
2292 case MSR_MTRRfix16K_80000:
2293 env->mtrr_fixed[1] = msrs[i].data;
2295 case MSR_MTRRfix16K_A0000:
2296 env->mtrr_fixed[2] = msrs[i].data;
2298 case MSR_MTRRfix4K_C0000:
2299 env->mtrr_fixed[3] = msrs[i].data;
2301 case MSR_MTRRfix4K_C8000:
2302 env->mtrr_fixed[4] = msrs[i].data;
2304 case MSR_MTRRfix4K_D0000:
2305 env->mtrr_fixed[5] = msrs[i].data;
2307 case MSR_MTRRfix4K_D8000:
2308 env->mtrr_fixed[6] = msrs[i].data;
2310 case MSR_MTRRfix4K_E0000:
2311 env->mtrr_fixed[7] = msrs[i].data;
2313 case MSR_MTRRfix4K_E8000:
2314 env->mtrr_fixed[8] = msrs[i].data;
2316 case MSR_MTRRfix4K_F0000:
2317 env->mtrr_fixed[9] = msrs[i].data;
2319 case MSR_MTRRfix4K_F8000:
2320 env->mtrr_fixed[10] = msrs[i].data;
2322 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
2324 env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data |
2327 env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
2336 static int kvm_put_mp_state(X86CPU *cpu)
2338 struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state };
2340 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
2343 static int kvm_get_mp_state(X86CPU *cpu)
2345 CPUState *cs = CPU(cpu);
2346 CPUX86State *env = &cpu->env;
2347 struct kvm_mp_state mp_state;
2350 ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
2354 env->mp_state = mp_state.mp_state;
2355 if (kvm_irqchip_in_kernel()) {
2356 cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
2361 static int kvm_get_apic(X86CPU *cpu)
2363 DeviceState *apic = cpu->apic_state;
2364 struct kvm_lapic_state kapic;
2367 if (apic && kvm_irqchip_in_kernel()) {
2368 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic);
2373 kvm_get_apic_state(apic, &kapic);
2378 static int kvm_put_vcpu_events(X86CPU *cpu, int level)
2380 CPUState *cs = CPU(cpu);
2381 CPUX86State *env = &cpu->env;
2382 struct kvm_vcpu_events events = {};
2384 if (!kvm_has_vcpu_events()) {
2388 events.exception.injected = (env->exception_injected >= 0);
2389 events.exception.nr = env->exception_injected;
2390 events.exception.has_error_code = env->has_error_code;
2391 events.exception.error_code = env->error_code;
2392 events.exception.pad = 0;
2394 events.interrupt.injected = (env->interrupt_injected >= 0);
2395 events.interrupt.nr = env->interrupt_injected;
2396 events.interrupt.soft = env->soft_interrupt;
2398 events.nmi.injected = env->nmi_injected;
2399 events.nmi.pending = env->nmi_pending;
2400 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
2403 events.sipi_vector = env->sipi_vector;
2406 if (has_msr_smbase) {
2407 events.smi.smm = !!(env->hflags & HF_SMM_MASK);
2408 events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK);
2409 if (kvm_irqchip_in_kernel()) {
2410 /* As soon as these are moved to the kernel, remove them
2411 * from cs->interrupt_request.
2413 events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
2414 events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
2415 cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
2417 /* Keep these in cs->interrupt_request. */
2418 events.smi.pending = 0;
2419 events.smi.latched_init = 0;
2421 /* Stop SMI delivery on old machine types to avoid a reboot
2422 * on an inward migration of an old VM.
2424 if (!cpu->kvm_no_smi_migration) {
2425 events.flags |= KVM_VCPUEVENT_VALID_SMM;
2429 if (level >= KVM_PUT_RESET_STATE) {
2430 events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
2431 if (env->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
2432 events.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR;
2436 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
2439 static int kvm_get_vcpu_events(X86CPU *cpu)
2441 CPUX86State *env = &cpu->env;
2442 struct kvm_vcpu_events events;
2445 if (!kvm_has_vcpu_events()) {
2449 memset(&events, 0, sizeof(events));
2450 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
2454 env->exception_injected =
2455 events.exception.injected ? events.exception.nr : -1;
2456 env->has_error_code = events.exception.has_error_code;
2457 env->error_code = events.exception.error_code;
2459 env->interrupt_injected =
2460 events.interrupt.injected ? events.interrupt.nr : -1;
2461 env->soft_interrupt = events.interrupt.soft;
2463 env->nmi_injected = events.nmi.injected;
2464 env->nmi_pending = events.nmi.pending;
2465 if (events.nmi.masked) {
2466 env->hflags2 |= HF2_NMI_MASK;
2468 env->hflags2 &= ~HF2_NMI_MASK;
2471 if (events.flags & KVM_VCPUEVENT_VALID_SMM) {
2472 if (events.smi.smm) {
2473 env->hflags |= HF_SMM_MASK;
2475 env->hflags &= ~HF_SMM_MASK;
2477 if (events.smi.pending) {
2478 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
2480 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
2482 if (events.smi.smm_inside_nmi) {
2483 env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
2485 env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
2487 if (events.smi.latched_init) {
2488 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
2490 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
2494 env->sipi_vector = events.sipi_vector;
2499 static int kvm_guest_debug_workarounds(X86CPU *cpu)
2501 CPUState *cs = CPU(cpu);
2502 CPUX86State *env = &cpu->env;
2504 unsigned long reinject_trap = 0;
2506 if (!kvm_has_vcpu_events()) {
2507 if (env->exception_injected == 1) {
2508 reinject_trap = KVM_GUESTDBG_INJECT_DB;
2509 } else if (env->exception_injected == 3) {
2510 reinject_trap = KVM_GUESTDBG_INJECT_BP;
2512 env->exception_injected = -1;
2516 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
2517 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
2518 * by updating the debug state once again if single-stepping is on.
2519 * Another reason to call kvm_update_guest_debug here is a pending debug
2520 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
2521 * reinject them via SET_GUEST_DEBUG.
2523 if (reinject_trap ||
2524 (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) {
2525 ret = kvm_update_guest_debug(cs, reinject_trap);
2530 static int kvm_put_debugregs(X86CPU *cpu)
2532 CPUX86State *env = &cpu->env;
2533 struct kvm_debugregs dbgregs;
2536 if (!kvm_has_debugregs()) {
2540 for (i = 0; i < 4; i++) {
2541 dbgregs.db[i] = env->dr[i];
2543 dbgregs.dr6 = env->dr[6];
2544 dbgregs.dr7 = env->dr[7];
2547 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs);
2550 static int kvm_get_debugregs(X86CPU *cpu)
2552 CPUX86State *env = &cpu->env;
2553 struct kvm_debugregs dbgregs;
2556 if (!kvm_has_debugregs()) {
2560 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
2564 for (i = 0; i < 4; i++) {
2565 env->dr[i] = dbgregs.db[i];
2567 env->dr[4] = env->dr[6] = dbgregs.dr6;
2568 env->dr[5] = env->dr[7] = dbgregs.dr7;
2573 int kvm_arch_put_registers(CPUState *cpu, int level)
2575 X86CPU *x86_cpu = X86_CPU(cpu);
2578 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
2580 if (level >= KVM_PUT_RESET_STATE) {
2581 ret = kvm_put_msr_feature_control(x86_cpu);
2587 if (level == KVM_PUT_FULL_STATE) {
2588 /* We don't check for kvm_arch_set_tsc_khz() errors here,
2589 * because TSC frequency mismatch shouldn't abort migration,
2590 * unless the user explicitly asked for a more strict TSC
2591 * setting (e.g. using an explicit "tsc-freq" option).
2593 kvm_arch_set_tsc_khz(cpu);
2596 ret = kvm_getput_regs(x86_cpu, 1);
2600 ret = kvm_put_xsave(x86_cpu);
2604 ret = kvm_put_xcrs(x86_cpu);
2608 ret = kvm_put_sregs(x86_cpu);
2612 /* must be before kvm_put_msrs */
2613 ret = kvm_inject_mce_oldstyle(x86_cpu);
2617 ret = kvm_put_msrs(x86_cpu, level);
2621 ret = kvm_put_vcpu_events(x86_cpu, level);
2625 if (level >= KVM_PUT_RESET_STATE) {
2626 ret = kvm_put_mp_state(x86_cpu);
2632 ret = kvm_put_tscdeadline_msr(x86_cpu);
2636 ret = kvm_put_debugregs(x86_cpu);
2641 ret = kvm_guest_debug_workarounds(x86_cpu);
2648 int kvm_arch_get_registers(CPUState *cs)
2650 X86CPU *cpu = X86_CPU(cs);
2653 assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
2655 ret = kvm_get_vcpu_events(cpu);
2660 * KVM_GET_MPSTATE can modify CS and RIP, call it before
2661 * KVM_GET_REGS and KVM_GET_SREGS.
2663 ret = kvm_get_mp_state(cpu);
2667 ret = kvm_getput_regs(cpu, 0);
2671 ret = kvm_get_xsave(cpu);
2675 ret = kvm_get_xcrs(cpu);
2679 ret = kvm_get_sregs(cpu);
2683 ret = kvm_get_msrs(cpu);
2687 ret = kvm_get_apic(cpu);
2691 ret = kvm_get_debugregs(cpu);
2697 cpu_sync_bndcs_hflags(&cpu->env);
2701 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
2703 X86CPU *x86_cpu = X86_CPU(cpu);
2704 CPUX86State *env = &x86_cpu->env;
2708 if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
2709 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
2710 qemu_mutex_lock_iothread();
2711 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
2712 qemu_mutex_unlock_iothread();
2713 DPRINTF("injected NMI\n");
2714 ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
2716 fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
2720 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
2721 qemu_mutex_lock_iothread();
2722 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
2723 qemu_mutex_unlock_iothread();
2724 DPRINTF("injected SMI\n");
2725 ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
2727 fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n",
2733 if (!kvm_pic_in_kernel()) {
2734 qemu_mutex_lock_iothread();
2737 /* Force the VCPU out of its inner loop to process any INIT requests
2738 * or (for userspace APIC, but it is cheap to combine the checks here)
2739 * pending TPR access reports.
2741 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
2742 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
2743 !(env->hflags & HF_SMM_MASK)) {
2744 cpu->exit_request = 1;
2746 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
2747 cpu->exit_request = 1;
2751 if (!kvm_pic_in_kernel()) {
2752 /* Try to inject an interrupt if the guest can accept it */
2753 if (run->ready_for_interrupt_injection &&
2754 (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
2755 (env->eflags & IF_MASK)) {
2758 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
2759 irq = cpu_get_pic_interrupt(env);
2761 struct kvm_interrupt intr;
2764 DPRINTF("injected interrupt %d\n", irq);
2765 ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
2768 "KVM: injection failed, interrupt lost (%s)\n",
2774 /* If we have an interrupt but the guest is not ready to receive an
2775 * interrupt, request an interrupt window exit. This will
2776 * cause a return to userspace as soon as the guest is ready to
2777 * receive interrupts. */
2778 if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
2779 run->request_interrupt_window = 1;
2781 run->request_interrupt_window = 0;
2784 DPRINTF("setting tpr\n");
2785 run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
2787 qemu_mutex_unlock_iothread();
2791 MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
2793 X86CPU *x86_cpu = X86_CPU(cpu);
2794 CPUX86State *env = &x86_cpu->env;
2796 if (run->flags & KVM_RUN_X86_SMM) {
2797 env->hflags |= HF_SMM_MASK;
2799 env->hflags &= ~HF_SMM_MASK;
2802 env->eflags |= IF_MASK;
2804 env->eflags &= ~IF_MASK;
2807 /* We need to protect the apic state against concurrent accesses from
2808 * different threads in case the userspace irqchip is used. */
2809 if (!kvm_irqchip_in_kernel()) {
2810 qemu_mutex_lock_iothread();
2812 cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
2813 cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
2814 if (!kvm_irqchip_in_kernel()) {
2815 qemu_mutex_unlock_iothread();
2817 return cpu_get_mem_attrs(env);
2820 int kvm_arch_process_async_events(CPUState *cs)
2822 X86CPU *cpu = X86_CPU(cs);
2823 CPUX86State *env = &cpu->env;
2825 if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
2826 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
2827 assert(env->mcg_cap);
2829 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
2831 kvm_cpu_synchronize_state(cs);
2833 if (env->exception_injected == EXCP08_DBLE) {
2834 /* this means triple fault */
2835 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
2836 cs->exit_request = 1;
2839 env->exception_injected = EXCP12_MCHK;
2840 env->has_error_code = 0;
2843 if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
2844 env->mp_state = KVM_MP_STATE_RUNNABLE;
2848 if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
2849 !(env->hflags & HF_SMM_MASK)) {
2850 kvm_cpu_synchronize_state(cs);
2854 if (kvm_irqchip_in_kernel()) {
2858 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
2859 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
2860 apic_poll_irq(cpu->apic_state);
2862 if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
2863 (env->eflags & IF_MASK)) ||
2864 (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
2867 if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
2868 kvm_cpu_synchronize_state(cs);
2871 if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
2872 cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
2873 kvm_cpu_synchronize_state(cs);
2874 apic_handle_tpr_access_report(cpu->apic_state, env->eip,
2875 env->tpr_access_type);
2881 static int kvm_handle_halt(X86CPU *cpu)
2883 CPUState *cs = CPU(cpu);
2884 CPUX86State *env = &cpu->env;
2886 if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
2887 (env->eflags & IF_MASK)) &&
2888 !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
2896 static int kvm_handle_tpr_access(X86CPU *cpu)
2898 CPUState *cs = CPU(cpu);
2899 struct kvm_run *run = cs->kvm_run;
2901 apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip,
2902 run->tpr_access.is_write ? TPR_ACCESS_WRITE
2907 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
2909 static const uint8_t int3 = 0xcc;
2911 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
2912 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) {
2918 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
2922 if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
2923 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
2935 static int nb_hw_breakpoint;
2937 static int find_hw_breakpoint(target_ulong addr, int len, int type)
2941 for (n = 0; n < nb_hw_breakpoint; n++) {
2942 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
2943 (hw_breakpoint[n].len == len || len == -1)) {
2950 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
2951 target_ulong len, int type)
2954 case GDB_BREAKPOINT_HW:
2957 case GDB_WATCHPOINT_WRITE:
2958 case GDB_WATCHPOINT_ACCESS:
2965 if (addr & (len - 1)) {
2977 if (nb_hw_breakpoint == 4) {
2980 if (find_hw_breakpoint(addr, len, type) >= 0) {
2983 hw_breakpoint[nb_hw_breakpoint].addr = addr;
2984 hw_breakpoint[nb_hw_breakpoint].len = len;
2985 hw_breakpoint[nb_hw_breakpoint].type = type;
2991 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
2992 target_ulong len, int type)
2996 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
3001 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
3006 void kvm_arch_remove_all_hw_breakpoints(void)
3008 nb_hw_breakpoint = 0;
3011 static CPUWatchpoint hw_watchpoint;
3013 static int kvm_handle_debug(X86CPU *cpu,
3014 struct kvm_debug_exit_arch *arch_info)
3016 CPUState *cs = CPU(cpu);
3017 CPUX86State *env = &cpu->env;
3021 if (arch_info->exception == 1) {
3022 if (arch_info->dr6 & (1 << 14)) {
3023 if (cs->singlestep_enabled) {
3027 for (n = 0; n < 4; n++) {
3028 if (arch_info->dr6 & (1 << n)) {
3029 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
3035 cs->watchpoint_hit = &hw_watchpoint;
3036 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
3037 hw_watchpoint.flags = BP_MEM_WRITE;
3041 cs->watchpoint_hit = &hw_watchpoint;
3042 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
3043 hw_watchpoint.flags = BP_MEM_ACCESS;
3049 } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) {
3053 cpu_synchronize_state(cs);
3054 assert(env->exception_injected == -1);
3057 env->exception_injected = arch_info->exception;
3058 env->has_error_code = 0;
3064 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
3066 const uint8_t type_code[] = {
3067 [GDB_BREAKPOINT_HW] = 0x0,
3068 [GDB_WATCHPOINT_WRITE] = 0x1,
3069 [GDB_WATCHPOINT_ACCESS] = 0x3
3071 const uint8_t len_code[] = {
3072 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
3076 if (kvm_sw_breakpoints_active(cpu)) {
3077 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
3079 if (nb_hw_breakpoint > 0) {
3080 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
3081 dbg->arch.debugreg[7] = 0x0600;
3082 for (n = 0; n < nb_hw_breakpoint; n++) {
3083 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
3084 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
3085 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
3086 ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
3091 static bool host_supports_vmx(void)
3093 uint32_t ecx, unused;
3095 host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
3096 return ecx & CPUID_EXT_VMX;
3099 #define VMX_INVALID_GUEST_STATE 0x80000021
3101 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
3103 X86CPU *cpu = X86_CPU(cs);
3107 switch (run->exit_reason) {
3109 DPRINTF("handle_hlt\n");
3110 qemu_mutex_lock_iothread();
3111 ret = kvm_handle_halt(cpu);
3112 qemu_mutex_unlock_iothread();
3114 case KVM_EXIT_SET_TPR:
3117 case KVM_EXIT_TPR_ACCESS:
3118 qemu_mutex_lock_iothread();
3119 ret = kvm_handle_tpr_access(cpu);
3120 qemu_mutex_unlock_iothread();
3122 case KVM_EXIT_FAIL_ENTRY:
3123 code = run->fail_entry.hardware_entry_failure_reason;
3124 fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
3126 if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
3128 "\nIf you're running a guest on an Intel machine without "
3129 "unrestricted mode\n"
3130 "support, the failure can be most likely due to the guest "
3131 "entering an invalid\n"
3132 "state for Intel VT. For example, the guest maybe running "
3133 "in big real mode\n"
3134 "which is not supported on less recent Intel processors."
3139 case KVM_EXIT_EXCEPTION:
3140 fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
3141 run->ex.exception, run->ex.error_code);
3144 case KVM_EXIT_DEBUG:
3145 DPRINTF("kvm_exit_debug\n");
3146 qemu_mutex_lock_iothread();
3147 ret = kvm_handle_debug(cpu, &run->debug.arch);
3148 qemu_mutex_unlock_iothread();
3150 case KVM_EXIT_HYPERV:
3151 ret = kvm_hv_handle_exit(cpu, &run->hyperv);
3153 case KVM_EXIT_IOAPIC_EOI:
3154 ioapic_eoi_broadcast(run->eoi.vector);
3158 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
3166 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
3168 X86CPU *cpu = X86_CPU(cs);
3169 CPUX86State *env = &cpu->env;
3171 kvm_cpu_synchronize_state(cs);
3172 return !(env->cr[0] & CR0_PE_MASK) ||
3173 ((env->segs[R_CS].selector & 3) != 3);
3176 void kvm_arch_init_irq_routing(KVMState *s)
3178 if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
3179 /* If kernel can't do irq routing, interrupt source
3180 * override 0->2 cannot be set up as required by HPET.
3181 * So we have to disable it.
3185 /* We know at this point that we're using the in-kernel
3186 * irqchip, so we can use irqfds, and on x86 we know
3187 * we can use msi via irqfd and GSI routing.
3189 kvm_msi_via_irqfd_allowed = true;
3190 kvm_gsi_routing_allowed = true;
3192 if (kvm_irqchip_is_split()) {
3195 /* If the ioapic is in QEMU and the lapics are in KVM, reserve
3196 MSI routes for signaling interrupts to the local apics. */
3197 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
3198 if (kvm_irqchip_add_msi_route(s, 0, NULL) < 0) {
3199 error_report("Could not enable split IRQ mode.");
3206 int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
3209 if (machine_kernel_irqchip_split(ms)) {
3210 ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24);
3212 error_report("Could not enable split irqchip mode: %s",
3216 DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
3217 kvm_split_irqchip = true;
3225 /* Classic KVM device assignment interface. Will remain x86 only. */
3226 int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr,
3227 uint32_t flags, uint32_t *dev_id)
3229 struct kvm_assigned_pci_dev dev_data = {
3230 .segnr = dev_addr->domain,
3231 .busnr = dev_addr->bus,
3232 .devfn = PCI_DEVFN(dev_addr->slot, dev_addr->function),
3237 dev_data.assigned_dev_id =
3238 (dev_addr->domain << 16) | (dev_addr->bus << 8) | dev_data.devfn;
3240 ret = kvm_vm_ioctl(s, KVM_ASSIGN_PCI_DEVICE, &dev_data);
3245 *dev_id = dev_data.assigned_dev_id;
3250 int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id)
3252 struct kvm_assigned_pci_dev dev_data = {
3253 .assigned_dev_id = dev_id,
3256 return kvm_vm_ioctl(s, KVM_DEASSIGN_PCI_DEVICE, &dev_data);
3259 static int kvm_assign_irq_internal(KVMState *s, uint32_t dev_id,
3260 uint32_t irq_type, uint32_t guest_irq)
3262 struct kvm_assigned_irq assigned_irq = {
3263 .assigned_dev_id = dev_id,
3264 .guest_irq = guest_irq,
3268 if (kvm_check_extension(s, KVM_CAP_ASSIGN_DEV_IRQ)) {
3269 return kvm_vm_ioctl(s, KVM_ASSIGN_DEV_IRQ, &assigned_irq);
3271 return kvm_vm_ioctl(s, KVM_ASSIGN_IRQ, &assigned_irq);
3275 int kvm_device_intx_assign(KVMState *s, uint32_t dev_id, bool use_host_msi,
3278 uint32_t irq_type = KVM_DEV_IRQ_GUEST_INTX |
3279 (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX);
3281 return kvm_assign_irq_internal(s, dev_id, irq_type, guest_irq);
3284 int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked)
3286 struct kvm_assigned_pci_dev dev_data = {
3287 .assigned_dev_id = dev_id,
3288 .flags = masked ? KVM_DEV_ASSIGN_MASK_INTX : 0,
3291 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_INTX_MASK, &dev_data);
3294 static int kvm_deassign_irq_internal(KVMState *s, uint32_t dev_id,
3297 struct kvm_assigned_irq assigned_irq = {
3298 .assigned_dev_id = dev_id,
3302 return kvm_vm_ioctl(s, KVM_DEASSIGN_DEV_IRQ, &assigned_irq);
3305 int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi)
3307 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_INTX |
3308 (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX));
3311 int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq)
3313 return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSI |
3314 KVM_DEV_IRQ_GUEST_MSI, virq);
3317 int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id)
3319 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSI |
3320 KVM_DEV_IRQ_HOST_MSI);
3323 bool kvm_device_msix_supported(KVMState *s)
3325 /* The kernel lacks a corresponding KVM_CAP, so we probe by calling
3326 * KVM_ASSIGN_SET_MSIX_NR with an invalid parameter. */
3327 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, NULL) == -EFAULT;
3330 int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id,
3331 uint32_t nr_vectors)
3333 struct kvm_assigned_msix_nr msix_nr = {
3334 .assigned_dev_id = dev_id,
3335 .entry_nr = nr_vectors,
3338 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, &msix_nr);
3341 int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector,
3344 struct kvm_assigned_msix_entry msix_entry = {
3345 .assigned_dev_id = dev_id,
3350 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_ENTRY, &msix_entry);
3353 int kvm_device_msix_assign(KVMState *s, uint32_t dev_id)
3355 return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSIX |
3356 KVM_DEV_IRQ_GUEST_MSIX, 0);
3359 int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id)
3361 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSIX |
3362 KVM_DEV_IRQ_HOST_MSIX);
3365 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
3366 uint64_t address, uint32_t data, PCIDevice *dev)
3368 X86IOMMUState *iommu = x86_iommu_get_default();
3372 MSIMessage src, dst;
3373 X86IOMMUClass *class = X86_IOMMU_GET_CLASS(iommu);
3375 src.address = route->u.msi.address_hi;
3376 src.address <<= VTD_MSI_ADDR_HI_SHIFT;
3377 src.address |= route->u.msi.address_lo;
3378 src.data = route->u.msi.data;
3380 ret = class->int_remap(iommu, &src, &dst, dev ? \
3381 pci_requester_id(dev) : \
3382 X86_IOMMU_SID_INVALID);
3384 trace_kvm_x86_fixup_msi_error(route->gsi);
3388 route->u.msi.address_hi = dst.address >> VTD_MSI_ADDR_HI_SHIFT;
3389 route->u.msi.address_lo = dst.address & VTD_MSI_ADDR_LO_MASK;
3390 route->u.msi.data = dst.data;
3396 typedef struct MSIRouteEntry MSIRouteEntry;
3398 struct MSIRouteEntry {
3399 PCIDevice *dev; /* Device pointer */
3400 int vector; /* MSI/MSIX vector index */
3401 int virq; /* Virtual IRQ index */
3402 QLIST_ENTRY(MSIRouteEntry) list;
3405 /* List of used GSI routes */
3406 static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \
3407 QLIST_HEAD_INITIALIZER(msi_route_list);
3409 static void kvm_update_msi_routes_all(void *private, bool global,
3410 uint32_t index, uint32_t mask)
3413 MSIRouteEntry *entry;
3417 /* TODO: explicit route update */
3418 QLIST_FOREACH(entry, &msi_route_list, list) {
3421 if (!msix_enabled(dev) && !msi_enabled(dev)) {
3424 msg = pci_get_msi_message(dev, entry->vector);
3425 kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, dev);
3427 kvm_irqchip_commit_routes(kvm_state);
3428 trace_kvm_x86_update_msi_routes(cnt);
3431 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
3432 int vector, PCIDevice *dev)
3434 static bool notify_list_inited = false;
3435 MSIRouteEntry *entry;
3438 /* These are (possibly) IOAPIC routes only used for split
3439 * kernel irqchip mode, while what we are housekeeping are
3440 * PCI devices only. */
3444 entry = g_new0(MSIRouteEntry, 1);
3446 entry->vector = vector;
3447 entry->virq = route->gsi;
3448 QLIST_INSERT_HEAD(&msi_route_list, entry, list);
3450 trace_kvm_x86_add_msi_route(route->gsi);
3452 if (!notify_list_inited) {
3453 /* For the first time we do add route, add ourselves into
3454 * IOMMU's IEC notify list if needed. */
3455 X86IOMMUState *iommu = x86_iommu_get_default();
3457 x86_iommu_iec_register_notifier(iommu,
3458 kvm_update_msi_routes_all,
3461 notify_list_inited = true;
3466 int kvm_arch_release_virq_post(int virq)
3468 MSIRouteEntry *entry, *next;
3469 QLIST_FOREACH_SAFE(entry, &msi_route_list, list, next) {
3470 if (entry->virq == virq) {
3471 trace_kvm_x86_remove_msi_route(virq);
3472 QLIST_REMOVE(entry, list);
3479 int kvm_arch_msi_data_to_gsi(uint32_t data)