4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include <sys/types.h>
16 #include <sys/ioctl.h>
18 #include <sys/utsname.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_para.h>
23 #include "qemu-common.h"
29 #include "host-utils.h"
39 #define DPRINTF(fmt, ...) \
40 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
42 #define DPRINTF(fmt, ...) \
46 #define MSR_KVM_WALL_CLOCK 0x11
47 #define MSR_KVM_SYSTEM_TIME 0x12
50 #define BUS_MCEERR_AR 4
53 #define BUS_MCEERR_AO 5
56 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
57 KVM_CAP_INFO(SET_TSS_ADDR),
58 KVM_CAP_INFO(EXT_CPUID),
59 KVM_CAP_INFO(MP_STATE),
63 static bool has_msr_star;
64 static bool has_msr_hsave_pa;
65 static bool has_msr_tsc_deadline;
66 static bool has_msr_async_pf_en;
67 static bool has_msr_pv_eoi_en;
68 static bool has_msr_misc_enable;
69 static int lm_capable_kernel;
71 bool kvm_allows_irq0_override(void)
73 return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
76 static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
78 struct kvm_cpuid2 *cpuid;
81 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
82 cpuid = (struct kvm_cpuid2 *)g_malloc0(size);
84 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
85 if (r == 0 && cpuid->nent >= max) {
93 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
101 struct kvm_para_features {
104 } para_features[] = {
105 { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
106 { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
107 { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
108 { KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
112 static int get_para_features(KVMState *s)
116 for (i = 0; i < ARRAY_SIZE(para_features) - 1; i++) {
117 if (kvm_check_extension(s, para_features[i].cap)) {
118 features |= (1 << para_features[i].feature);
126 uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
127 uint32_t index, int reg)
129 struct kvm_cpuid2 *cpuid;
132 uint32_t cpuid_1_edx;
133 int has_kvm_features = 0;
136 while ((cpuid = try_get_cpuid(s, max)) == NULL) {
140 for (i = 0; i < cpuid->nent; ++i) {
141 if (cpuid->entries[i].function == function &&
142 cpuid->entries[i].index == index) {
143 if (cpuid->entries[i].function == KVM_CPUID_FEATURES) {
144 has_kvm_features = 1;
148 ret = cpuid->entries[i].eax;
151 ret = cpuid->entries[i].ebx;
154 ret = cpuid->entries[i].ecx;
157 ret = cpuid->entries[i].edx;
160 /* KVM before 2.6.30 misreports the following features */
161 ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
164 /* On Intel, kvm returns cpuid according to the Intel spec,
165 * so add missing bits according to the AMD spec:
167 cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
168 ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
178 /* fallback for older kernels */
179 if (!has_kvm_features && (function == KVM_CPUID_FEATURES)) {
180 ret = get_para_features(s);
186 typedef struct HWPoisonPage {
188 QLIST_ENTRY(HWPoisonPage) list;
191 static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
192 QLIST_HEAD_INITIALIZER(hwpoison_page_list);
194 static void kvm_unpoison_all(void *param)
196 HWPoisonPage *page, *next_page;
198 QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
199 QLIST_REMOVE(page, list);
200 qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
205 static void kvm_hwpoison_page_add(ram_addr_t ram_addr)
209 QLIST_FOREACH(page, &hwpoison_page_list, list) {
210 if (page->ram_addr == ram_addr) {
214 page = g_malloc(sizeof(HWPoisonPage));
215 page->ram_addr = ram_addr;
216 QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
219 static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
224 r = kvm_check_extension(s, KVM_CAP_MCE);
227 return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
232 static void kvm_mce_inject(CPUX86State *env, hwaddr paddr, int code)
234 uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
235 MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
236 uint64_t mcg_status = MCG_STATUS_MCIP;
238 if (code == BUS_MCEERR_AR) {
239 status |= MCI_STATUS_AR | 0x134;
240 mcg_status |= MCG_STATUS_EIPV;
243 mcg_status |= MCG_STATUS_RIPV;
245 cpu_x86_inject_mce(NULL, env, 9, status, mcg_status, paddr,
246 (MCM_ADDR_PHYS << 6) | 0xc,
247 cpu_x86_support_mca_broadcast(env) ?
248 MCE_INJECT_BROADCAST : 0);
251 static void hardware_memory_error(void)
253 fprintf(stderr, "Hardware memory error!\n");
257 int kvm_arch_on_sigbus_vcpu(CPUX86State *env, int code, void *addr)
262 if ((env->mcg_cap & MCG_SER_P) && addr
263 && (code == BUS_MCEERR_AR || code == BUS_MCEERR_AO)) {
264 if (qemu_ram_addr_from_host(addr, &ram_addr) ||
265 !kvm_physical_memory_addr_from_host(env->kvm_state, addr, &paddr)) {
266 fprintf(stderr, "Hardware memory error for memory used by "
267 "QEMU itself instead of guest system!\n");
268 /* Hope we are lucky for AO MCE */
269 if (code == BUS_MCEERR_AO) {
272 hardware_memory_error();
275 kvm_hwpoison_page_add(ram_addr);
276 kvm_mce_inject(env, paddr, code);
278 if (code == BUS_MCEERR_AO) {
280 } else if (code == BUS_MCEERR_AR) {
281 hardware_memory_error();
289 int kvm_arch_on_sigbus(int code, void *addr)
291 if ((first_cpu->mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
295 /* Hope we are lucky for AO MCE */
296 if (qemu_ram_addr_from_host(addr, &ram_addr) ||
297 !kvm_physical_memory_addr_from_host(first_cpu->kvm_state, addr,
299 fprintf(stderr, "Hardware memory error for memory used by "
300 "QEMU itself instead of guest system!: %p\n", addr);
303 kvm_hwpoison_page_add(ram_addr);
304 kvm_mce_inject(first_cpu, paddr, code);
306 if (code == BUS_MCEERR_AO) {
308 } else if (code == BUS_MCEERR_AR) {
309 hardware_memory_error();
317 static int kvm_inject_mce_oldstyle(CPUX86State *env)
319 if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) {
320 unsigned int bank, bank_num = env->mcg_cap & 0xff;
321 struct kvm_x86_mce mce;
323 env->exception_injected = -1;
326 * There must be at least one bank in use if an MCE is pending.
327 * Find it and use its values for the event injection.
329 for (bank = 0; bank < bank_num; bank++) {
330 if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
334 assert(bank < bank_num);
337 mce.status = env->mce_banks[bank * 4 + 1];
338 mce.mcg_status = env->mcg_status;
339 mce.addr = env->mce_banks[bank * 4 + 2];
340 mce.misc = env->mce_banks[bank * 4 + 3];
342 return kvm_vcpu_ioctl(env, KVM_X86_SET_MCE, &mce);
347 static void cpu_update_state(void *opaque, int running, RunState state)
349 CPUX86State *env = opaque;
352 env->tsc_valid = false;
356 int kvm_arch_init_vcpu(CPUX86State *env)
359 struct kvm_cpuid2 cpuid;
360 struct kvm_cpuid_entry2 entries[100];
361 } QEMU_PACKED cpuid_data;
362 KVMState *s = env->kvm_state;
363 uint32_t limit, i, j, cpuid_i;
365 struct kvm_cpuid_entry2 *c;
366 uint32_t signature[3];
369 env->cpuid_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
371 i = env->cpuid_ext_features & CPUID_EXT_HYPERVISOR;
372 j = env->cpuid_ext_features & CPUID_EXT_TSC_DEADLINE_TIMER;
373 env->cpuid_ext_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX);
374 env->cpuid_ext_features |= i;
375 if (j && kvm_irqchip_in_kernel() &&
376 kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
377 env->cpuid_ext_features |= CPUID_EXT_TSC_DEADLINE_TIMER;
380 env->cpuid_ext2_features &= kvm_arch_get_supported_cpuid(s, 0x80000001,
382 env->cpuid_ext3_features &= kvm_arch_get_supported_cpuid(s, 0x80000001,
384 env->cpuid_svm_features &= kvm_arch_get_supported_cpuid(s, 0x8000000A,
389 /* Paravirtualization CPUIDs */
390 c = &cpuid_data.entries[cpuid_i++];
391 memset(c, 0, sizeof(*c));
392 c->function = KVM_CPUID_SIGNATURE;
393 if (!hyperv_enabled()) {
394 memcpy(signature, "KVMKVMKVM\0\0\0", 12);
397 memcpy(signature, "Microsoft Hv", 12);
398 c->eax = HYPERV_CPUID_MIN;
400 c->ebx = signature[0];
401 c->ecx = signature[1];
402 c->edx = signature[2];
404 c = &cpuid_data.entries[cpuid_i++];
405 memset(c, 0, sizeof(*c));
406 c->function = KVM_CPUID_FEATURES;
407 c->eax = env->cpuid_kvm_features &
408 kvm_arch_get_supported_cpuid(s, KVM_CPUID_FEATURES, 0, R_EAX);
410 if (hyperv_enabled()) {
411 memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
412 c->eax = signature[0];
414 c = &cpuid_data.entries[cpuid_i++];
415 memset(c, 0, sizeof(*c));
416 c->function = HYPERV_CPUID_VERSION;
420 c = &cpuid_data.entries[cpuid_i++];
421 memset(c, 0, sizeof(*c));
422 c->function = HYPERV_CPUID_FEATURES;
423 if (hyperv_relaxed_timing_enabled()) {
424 c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
426 if (hyperv_vapic_recommended()) {
427 c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
428 c->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
431 c = &cpuid_data.entries[cpuid_i++];
432 memset(c, 0, sizeof(*c));
433 c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
434 if (hyperv_relaxed_timing_enabled()) {
435 c->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
437 if (hyperv_vapic_recommended()) {
438 c->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
440 c->ebx = hyperv_get_spinlock_retries();
442 c = &cpuid_data.entries[cpuid_i++];
443 memset(c, 0, sizeof(*c));
444 c->function = HYPERV_CPUID_IMPLEMENT_LIMITS;
448 c = &cpuid_data.entries[cpuid_i++];
449 memset(c, 0, sizeof(*c));
450 c->function = KVM_CPUID_SIGNATURE_NEXT;
451 memcpy(signature, "KVMKVMKVM\0\0\0", 12);
453 c->ebx = signature[0];
454 c->ecx = signature[1];
455 c->edx = signature[2];
458 has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF);
460 has_msr_pv_eoi_en = c->eax & (1 << KVM_FEATURE_PV_EOI);
462 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
464 for (i = 0; i <= limit; i++) {
465 c = &cpuid_data.entries[cpuid_i++];
469 /* Keep reading function 2 till all the input is received */
473 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
474 KVM_CPUID_FLAG_STATE_READ_NEXT;
475 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
476 times = c->eax & 0xff;
478 for (j = 1; j < times; ++j) {
479 c = &cpuid_data.entries[cpuid_i++];
481 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
482 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
490 if (i == 0xd && j == 64) {
494 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
496 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
498 if (i == 4 && c->eax == 0) {
501 if (i == 0xb && !(c->ecx & 0xff00)) {
504 if (i == 0xd && c->eax == 0) {
507 c = &cpuid_data.entries[cpuid_i++];
513 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
517 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
519 for (i = 0x80000000; i <= limit; i++) {
520 c = &cpuid_data.entries[cpuid_i++];
524 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
527 /* Call Centaur's CPUID instructions they are supported. */
528 if (env->cpuid_xlevel2 > 0) {
529 env->cpuid_ext4_features &=
530 kvm_arch_get_supported_cpuid(s, 0xC0000001, 0, R_EDX);
531 cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
533 for (i = 0xC0000000; i <= limit; i++) {
534 c = &cpuid_data.entries[cpuid_i++];
538 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
542 cpuid_data.cpuid.nent = cpuid_i;
544 if (((env->cpuid_version >> 8)&0xF) >= 6
545 && (env->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)
546 && kvm_check_extension(env->kvm_state, KVM_CAP_MCE) > 0) {
551 ret = kvm_get_mce_cap_supported(env->kvm_state, &mcg_cap, &banks);
553 fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
557 if (banks > MCE_BANKS_DEF) {
558 banks = MCE_BANKS_DEF;
560 mcg_cap &= MCE_CAP_DEF;
562 ret = kvm_vcpu_ioctl(env, KVM_X86_SETUP_MCE, &mcg_cap);
564 fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
568 env->mcg_cap = mcg_cap;
571 qemu_add_vm_change_state_handler(cpu_update_state, env);
573 cpuid_data.cpuid.padding = 0;
574 r = kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
579 r = kvm_check_extension(env->kvm_state, KVM_CAP_TSC_CONTROL);
580 if (r && env->tsc_khz) {
581 r = kvm_vcpu_ioctl(env, KVM_SET_TSC_KHZ, env->tsc_khz);
583 fprintf(stderr, "KVM_SET_TSC_KHZ failed\n");
588 if (kvm_has_xsave()) {
589 env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
595 void kvm_arch_reset_vcpu(CPUX86State *env)
597 X86CPU *cpu = x86_env_get_cpu(env);
599 env->exception_injected = -1;
600 env->interrupt_injected = -1;
602 if (kvm_irqchip_in_kernel()) {
603 env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
604 KVM_MP_STATE_UNINITIALIZED;
606 env->mp_state = KVM_MP_STATE_RUNNABLE;
610 static int kvm_get_supported_msrs(KVMState *s)
612 static int kvm_supported_msrs;
616 if (kvm_supported_msrs == 0) {
617 struct kvm_msr_list msr_list, *kvm_msr_list;
619 kvm_supported_msrs = -1;
621 /* Obtain MSR list from KVM. These are the MSRs that we must
624 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
625 if (ret < 0 && ret != -E2BIG) {
628 /* Old kernel modules had a bug and could write beyond the provided
629 memory. Allocate at least a safe amount of 1K. */
630 kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
632 sizeof(msr_list.indices[0])));
634 kvm_msr_list->nmsrs = msr_list.nmsrs;
635 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
639 for (i = 0; i < kvm_msr_list->nmsrs; i++) {
640 if (kvm_msr_list->indices[i] == MSR_STAR) {
644 if (kvm_msr_list->indices[i] == MSR_VM_HSAVE_PA) {
645 has_msr_hsave_pa = true;
648 if (kvm_msr_list->indices[i] == MSR_IA32_TSCDEADLINE) {
649 has_msr_tsc_deadline = true;
652 if (kvm_msr_list->indices[i] == MSR_IA32_MISC_ENABLE) {
653 has_msr_misc_enable = true;
659 g_free(kvm_msr_list);
665 int kvm_arch_init(KVMState *s)
667 QemuOptsList *list = qemu_find_opts("machine");
668 uint64_t identity_base = 0xfffbc000;
671 struct utsname utsname;
673 ret = kvm_get_supported_msrs(s);
679 lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
682 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
683 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
684 * Since these must be part of guest physical memory, we need to allocate
685 * them, both by setting their start addresses in the kernel and by
686 * creating a corresponding e820 entry. We need 4 pages before the BIOS.
688 * Older KVM versions may not support setting the identity map base. In
689 * that case we need to stick with the default, i.e. a 256K maximum BIOS
692 if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
693 /* Allows up to 16M BIOSes. */
694 identity_base = 0xfeffc000;
696 ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
702 /* Set TSS base one page after EPT identity map. */
703 ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
708 /* Tell fw_cfg to notify the BIOS to reserve the range. */
709 ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
711 fprintf(stderr, "e820_add_entry() table is full\n");
714 qemu_register_reset(kvm_unpoison_all, NULL);
716 if (!QTAILQ_EMPTY(&list->head)) {
717 shadow_mem = qemu_opt_get_size(QTAILQ_FIRST(&list->head),
718 "kvm_shadow_mem", -1);
719 if (shadow_mem != -1) {
721 ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
730 static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
732 lhs->selector = rhs->selector;
733 lhs->base = rhs->base;
734 lhs->limit = rhs->limit;
746 static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
748 unsigned flags = rhs->flags;
749 lhs->selector = rhs->selector;
750 lhs->base = rhs->base;
751 lhs->limit = rhs->limit;
752 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
753 lhs->present = (flags & DESC_P_MASK) != 0;
754 lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
755 lhs->db = (flags >> DESC_B_SHIFT) & 1;
756 lhs->s = (flags & DESC_S_MASK) != 0;
757 lhs->l = (flags >> DESC_L_SHIFT) & 1;
758 lhs->g = (flags & DESC_G_MASK) != 0;
759 lhs->avl = (flags & DESC_AVL_MASK) != 0;
764 static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
766 lhs->selector = rhs->selector;
767 lhs->base = rhs->base;
768 lhs->limit = rhs->limit;
769 lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
770 (rhs->present * DESC_P_MASK) |
771 (rhs->dpl << DESC_DPL_SHIFT) |
772 (rhs->db << DESC_B_SHIFT) |
773 (rhs->s * DESC_S_MASK) |
774 (rhs->l << DESC_L_SHIFT) |
775 (rhs->g * DESC_G_MASK) |
776 (rhs->avl * DESC_AVL_MASK);
779 static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
782 *kvm_reg = *qemu_reg;
784 *qemu_reg = *kvm_reg;
788 static int kvm_getput_regs(CPUX86State *env, int set)
790 struct kvm_regs regs;
794 ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s);
800 kvm_getput_reg(®s.rax, &env->regs[R_EAX], set);
801 kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set);
802 kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set);
803 kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set);
804 kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set);
805 kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set);
806 kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set);
807 kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set);
809 kvm_getput_reg(®s.r8, &env->regs[8], set);
810 kvm_getput_reg(®s.r9, &env->regs[9], set);
811 kvm_getput_reg(®s.r10, &env->regs[10], set);
812 kvm_getput_reg(®s.r11, &env->regs[11], set);
813 kvm_getput_reg(®s.r12, &env->regs[12], set);
814 kvm_getput_reg(®s.r13, &env->regs[13], set);
815 kvm_getput_reg(®s.r14, &env->regs[14], set);
816 kvm_getput_reg(®s.r15, &env->regs[15], set);
819 kvm_getput_reg(®s.rflags, &env->eflags, set);
820 kvm_getput_reg(®s.rip, &env->eip, set);
823 ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, ®s);
829 static int kvm_put_fpu(CPUX86State *env)
834 memset(&fpu, 0, sizeof fpu);
835 fpu.fsw = env->fpus & ~(7 << 11);
836 fpu.fsw |= (env->fpstt & 7) << 11;
838 fpu.last_opcode = env->fpop;
839 fpu.last_ip = env->fpip;
840 fpu.last_dp = env->fpdp;
841 for (i = 0; i < 8; ++i) {
842 fpu.ftwx |= (!env->fptags[i]) << i;
844 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
845 memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
846 fpu.mxcsr = env->mxcsr;
848 return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
851 #define XSAVE_FCW_FSW 0
852 #define XSAVE_FTW_FOP 1
853 #define XSAVE_CWD_RIP 2
854 #define XSAVE_CWD_RDP 4
855 #define XSAVE_MXCSR 6
856 #define XSAVE_ST_SPACE 8
857 #define XSAVE_XMM_SPACE 40
858 #define XSAVE_XSTATE_BV 128
859 #define XSAVE_YMMH_SPACE 144
861 static int kvm_put_xsave(CPUX86State *env)
863 struct kvm_xsave* xsave = env->kvm_xsave_buf;
864 uint16_t cwd, swd, twd;
867 if (!kvm_has_xsave()) {
868 return kvm_put_fpu(env);
871 memset(xsave, 0, sizeof(struct kvm_xsave));
873 swd = env->fpus & ~(7 << 11);
874 swd |= (env->fpstt & 7) << 11;
876 for (i = 0; i < 8; ++i) {
877 twd |= (!env->fptags[i]) << i;
879 xsave->region[XSAVE_FCW_FSW] = (uint32_t)(swd << 16) + cwd;
880 xsave->region[XSAVE_FTW_FOP] = (uint32_t)(env->fpop << 16) + twd;
881 memcpy(&xsave->region[XSAVE_CWD_RIP], &env->fpip, sizeof(env->fpip));
882 memcpy(&xsave->region[XSAVE_CWD_RDP], &env->fpdp, sizeof(env->fpdp));
883 memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs,
885 memcpy(&xsave->region[XSAVE_XMM_SPACE], env->xmm_regs,
886 sizeof env->xmm_regs);
887 xsave->region[XSAVE_MXCSR] = env->mxcsr;
888 *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv;
889 memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs,
890 sizeof env->ymmh_regs);
891 r = kvm_vcpu_ioctl(env, KVM_SET_XSAVE, xsave);
895 static int kvm_put_xcrs(CPUX86State *env)
897 struct kvm_xcrs xcrs;
899 if (!kvm_has_xcrs()) {
905 xcrs.xcrs[0].xcr = 0;
906 xcrs.xcrs[0].value = env->xcr0;
907 return kvm_vcpu_ioctl(env, KVM_SET_XCRS, &xcrs);
910 static int kvm_put_sregs(CPUX86State *env)
912 struct kvm_sregs sregs;
914 memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
915 if (env->interrupt_injected >= 0) {
916 sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
917 (uint64_t)1 << (env->interrupt_injected % 64);
920 if ((env->eflags & VM_MASK)) {
921 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
922 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
923 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
924 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
925 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
926 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
928 set_seg(&sregs.cs, &env->segs[R_CS]);
929 set_seg(&sregs.ds, &env->segs[R_DS]);
930 set_seg(&sregs.es, &env->segs[R_ES]);
931 set_seg(&sregs.fs, &env->segs[R_FS]);
932 set_seg(&sregs.gs, &env->segs[R_GS]);
933 set_seg(&sregs.ss, &env->segs[R_SS]);
936 set_seg(&sregs.tr, &env->tr);
937 set_seg(&sregs.ldt, &env->ldt);
939 sregs.idt.limit = env->idt.limit;
940 sregs.idt.base = env->idt.base;
941 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
942 sregs.gdt.limit = env->gdt.limit;
943 sregs.gdt.base = env->gdt.base;
944 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
946 sregs.cr0 = env->cr[0];
947 sregs.cr2 = env->cr[2];
948 sregs.cr3 = env->cr[3];
949 sregs.cr4 = env->cr[4];
951 sregs.cr8 = cpu_get_apic_tpr(env->apic_state);
952 sregs.apic_base = cpu_get_apic_base(env->apic_state);
954 sregs.efer = env->efer;
956 return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
959 static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
960 uint32_t index, uint64_t value)
962 entry->index = index;
966 static int kvm_put_msrs(CPUX86State *env, int level)
969 struct kvm_msrs info;
970 struct kvm_msr_entry entries[100];
972 struct kvm_msr_entry *msrs = msr_data.entries;
975 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
976 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
977 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
978 kvm_msr_entry_set(&msrs[n++], MSR_PAT, env->pat);
980 kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
982 if (has_msr_hsave_pa) {
983 kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
985 if (has_msr_tsc_deadline) {
986 kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSCDEADLINE, env->tsc_deadline);
988 if (has_msr_misc_enable) {
989 kvm_msr_entry_set(&msrs[n++], MSR_IA32_MISC_ENABLE,
990 env->msr_ia32_misc_enable);
993 if (lm_capable_kernel) {
994 kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
995 kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
996 kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
997 kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
1000 if (level == KVM_PUT_FULL_STATE) {
1002 * KVM is yet unable to synchronize TSC values of multiple VCPUs on
1003 * writeback. Until this is fixed, we only write the offset to SMP
1004 * guests after migration, desynchronizing the VCPUs, but avoiding
1005 * huge jump-backs that would occur without any writeback at all.
1007 if (smp_cpus == 1 || env->tsc != 0) {
1008 kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
1012 * The following paravirtual MSRs have side effects on the guest or are
1013 * too heavy for normal writeback. Limit them to reset or full state
1016 if (level >= KVM_PUT_RESET_STATE) {
1017 kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME,
1018 env->system_time_msr);
1019 kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
1020 if (has_msr_async_pf_en) {
1021 kvm_msr_entry_set(&msrs[n++], MSR_KVM_ASYNC_PF_EN,
1022 env->async_pf_en_msr);
1024 if (has_msr_pv_eoi_en) {
1025 kvm_msr_entry_set(&msrs[n++], MSR_KVM_PV_EOI_EN,
1026 env->pv_eoi_en_msr);
1028 if (hyperv_hypercall_available()) {
1029 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_GUEST_OS_ID, 0);
1030 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_HYPERCALL, 0);
1032 if (hyperv_vapic_recommended()) {
1033 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_APIC_ASSIST_PAGE, 0);
1039 kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
1040 kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl);
1041 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
1042 kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]);
1046 msr_data.info.nmsrs = n;
1048 return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
1053 static int kvm_get_fpu(CPUX86State *env)
1058 ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu);
1063 env->fpstt = (fpu.fsw >> 11) & 7;
1064 env->fpus = fpu.fsw;
1065 env->fpuc = fpu.fcw;
1066 env->fpop = fpu.last_opcode;
1067 env->fpip = fpu.last_ip;
1068 env->fpdp = fpu.last_dp;
1069 for (i = 0; i < 8; ++i) {
1070 env->fptags[i] = !((fpu.ftwx >> i) & 1);
1072 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
1073 memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
1074 env->mxcsr = fpu.mxcsr;
1079 static int kvm_get_xsave(CPUX86State *env)
1081 struct kvm_xsave* xsave = env->kvm_xsave_buf;
1083 uint16_t cwd, swd, twd;
1085 if (!kvm_has_xsave()) {
1086 return kvm_get_fpu(env);
1089 ret = kvm_vcpu_ioctl(env, KVM_GET_XSAVE, xsave);
1094 cwd = (uint16_t)xsave->region[XSAVE_FCW_FSW];
1095 swd = (uint16_t)(xsave->region[XSAVE_FCW_FSW] >> 16);
1096 twd = (uint16_t)xsave->region[XSAVE_FTW_FOP];
1097 env->fpop = (uint16_t)(xsave->region[XSAVE_FTW_FOP] >> 16);
1098 env->fpstt = (swd >> 11) & 7;
1101 for (i = 0; i < 8; ++i) {
1102 env->fptags[i] = !((twd >> i) & 1);
1104 memcpy(&env->fpip, &xsave->region[XSAVE_CWD_RIP], sizeof(env->fpip));
1105 memcpy(&env->fpdp, &xsave->region[XSAVE_CWD_RDP], sizeof(env->fpdp));
1106 env->mxcsr = xsave->region[XSAVE_MXCSR];
1107 memcpy(env->fpregs, &xsave->region[XSAVE_ST_SPACE],
1108 sizeof env->fpregs);
1109 memcpy(env->xmm_regs, &xsave->region[XSAVE_XMM_SPACE],
1110 sizeof env->xmm_regs);
1111 env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV];
1112 memcpy(env->ymmh_regs, &xsave->region[XSAVE_YMMH_SPACE],
1113 sizeof env->ymmh_regs);
1117 static int kvm_get_xcrs(CPUX86State *env)
1120 struct kvm_xcrs xcrs;
1122 if (!kvm_has_xcrs()) {
1126 ret = kvm_vcpu_ioctl(env, KVM_GET_XCRS, &xcrs);
1131 for (i = 0; i < xcrs.nr_xcrs; i++) {
1132 /* Only support xcr0 now */
1133 if (xcrs.xcrs[0].xcr == 0) {
1134 env->xcr0 = xcrs.xcrs[0].value;
1141 static int kvm_get_sregs(CPUX86State *env)
1143 struct kvm_sregs sregs;
1147 ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
1152 /* There can only be one pending IRQ set in the bitmap at a time, so try
1153 to find it and save its number instead (-1 for none). */
1154 env->interrupt_injected = -1;
1155 for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
1156 if (sregs.interrupt_bitmap[i]) {
1157 bit = ctz64(sregs.interrupt_bitmap[i]);
1158 env->interrupt_injected = i * 64 + bit;
1163 get_seg(&env->segs[R_CS], &sregs.cs);
1164 get_seg(&env->segs[R_DS], &sregs.ds);
1165 get_seg(&env->segs[R_ES], &sregs.es);
1166 get_seg(&env->segs[R_FS], &sregs.fs);
1167 get_seg(&env->segs[R_GS], &sregs.gs);
1168 get_seg(&env->segs[R_SS], &sregs.ss);
1170 get_seg(&env->tr, &sregs.tr);
1171 get_seg(&env->ldt, &sregs.ldt);
1173 env->idt.limit = sregs.idt.limit;
1174 env->idt.base = sregs.idt.base;
1175 env->gdt.limit = sregs.gdt.limit;
1176 env->gdt.base = sregs.gdt.base;
1178 env->cr[0] = sregs.cr0;
1179 env->cr[2] = sregs.cr2;
1180 env->cr[3] = sregs.cr3;
1181 env->cr[4] = sregs.cr4;
1183 env->efer = sregs.efer;
1185 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
1187 #define HFLAG_COPY_MASK \
1188 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
1189 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
1190 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
1191 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
1193 hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
1194 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
1195 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
1196 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
1197 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
1198 hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
1199 (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
1201 if (env->efer & MSR_EFER_LMA) {
1202 hflags |= HF_LMA_MASK;
1205 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
1206 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
1208 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
1209 (DESC_B_SHIFT - HF_CS32_SHIFT);
1210 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
1211 (DESC_B_SHIFT - HF_SS32_SHIFT);
1212 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
1213 !(hflags & HF_CS32_MASK)) {
1214 hflags |= HF_ADDSEG_MASK;
1216 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
1217 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
1220 env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
1225 static int kvm_get_msrs(CPUX86State *env)
1228 struct kvm_msrs info;
1229 struct kvm_msr_entry entries[100];
1231 struct kvm_msr_entry *msrs = msr_data.entries;
1235 msrs[n++].index = MSR_IA32_SYSENTER_CS;
1236 msrs[n++].index = MSR_IA32_SYSENTER_ESP;
1237 msrs[n++].index = MSR_IA32_SYSENTER_EIP;
1238 msrs[n++].index = MSR_PAT;
1240 msrs[n++].index = MSR_STAR;
1242 if (has_msr_hsave_pa) {
1243 msrs[n++].index = MSR_VM_HSAVE_PA;
1245 if (has_msr_tsc_deadline) {
1246 msrs[n++].index = MSR_IA32_TSCDEADLINE;
1248 if (has_msr_misc_enable) {
1249 msrs[n++].index = MSR_IA32_MISC_ENABLE;
1252 if (!env->tsc_valid) {
1253 msrs[n++].index = MSR_IA32_TSC;
1254 env->tsc_valid = !runstate_is_running();
1257 #ifdef TARGET_X86_64
1258 if (lm_capable_kernel) {
1259 msrs[n++].index = MSR_CSTAR;
1260 msrs[n++].index = MSR_KERNELGSBASE;
1261 msrs[n++].index = MSR_FMASK;
1262 msrs[n++].index = MSR_LSTAR;
1265 msrs[n++].index = MSR_KVM_SYSTEM_TIME;
1266 msrs[n++].index = MSR_KVM_WALL_CLOCK;
1267 if (has_msr_async_pf_en) {
1268 msrs[n++].index = MSR_KVM_ASYNC_PF_EN;
1270 if (has_msr_pv_eoi_en) {
1271 msrs[n++].index = MSR_KVM_PV_EOI_EN;
1275 msrs[n++].index = MSR_MCG_STATUS;
1276 msrs[n++].index = MSR_MCG_CTL;
1277 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
1278 msrs[n++].index = MSR_MC0_CTL + i;
1282 msr_data.info.nmsrs = n;
1283 ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data);
1288 for (i = 0; i < ret; i++) {
1289 switch (msrs[i].index) {
1290 case MSR_IA32_SYSENTER_CS:
1291 env->sysenter_cs = msrs[i].data;
1293 case MSR_IA32_SYSENTER_ESP:
1294 env->sysenter_esp = msrs[i].data;
1296 case MSR_IA32_SYSENTER_EIP:
1297 env->sysenter_eip = msrs[i].data;
1300 env->pat = msrs[i].data;
1303 env->star = msrs[i].data;
1305 #ifdef TARGET_X86_64
1307 env->cstar = msrs[i].data;
1309 case MSR_KERNELGSBASE:
1310 env->kernelgsbase = msrs[i].data;
1313 env->fmask = msrs[i].data;
1316 env->lstar = msrs[i].data;
1320 env->tsc = msrs[i].data;
1322 case MSR_IA32_TSCDEADLINE:
1323 env->tsc_deadline = msrs[i].data;
1325 case MSR_VM_HSAVE_PA:
1326 env->vm_hsave = msrs[i].data;
1328 case MSR_KVM_SYSTEM_TIME:
1329 env->system_time_msr = msrs[i].data;
1331 case MSR_KVM_WALL_CLOCK:
1332 env->wall_clock_msr = msrs[i].data;
1334 case MSR_MCG_STATUS:
1335 env->mcg_status = msrs[i].data;
1338 env->mcg_ctl = msrs[i].data;
1340 case MSR_IA32_MISC_ENABLE:
1341 env->msr_ia32_misc_enable = msrs[i].data;
1344 if (msrs[i].index >= MSR_MC0_CTL &&
1345 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
1346 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
1349 case MSR_KVM_ASYNC_PF_EN:
1350 env->async_pf_en_msr = msrs[i].data;
1352 case MSR_KVM_PV_EOI_EN:
1353 env->pv_eoi_en_msr = msrs[i].data;
1361 static int kvm_put_mp_state(CPUX86State *env)
1363 struct kvm_mp_state mp_state = { .mp_state = env->mp_state };
1365 return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state);
1368 static int kvm_get_mp_state(CPUX86State *env)
1370 struct kvm_mp_state mp_state;
1373 ret = kvm_vcpu_ioctl(env, KVM_GET_MP_STATE, &mp_state);
1377 env->mp_state = mp_state.mp_state;
1378 if (kvm_irqchip_in_kernel()) {
1379 env->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
1384 static int kvm_get_apic(CPUX86State *env)
1386 DeviceState *apic = env->apic_state;
1387 struct kvm_lapic_state kapic;
1390 if (apic && kvm_irqchip_in_kernel()) {
1391 ret = kvm_vcpu_ioctl(env, KVM_GET_LAPIC, &kapic);
1396 kvm_get_apic_state(apic, &kapic);
1401 static int kvm_put_apic(CPUX86State *env)
1403 DeviceState *apic = env->apic_state;
1404 struct kvm_lapic_state kapic;
1406 if (apic && kvm_irqchip_in_kernel()) {
1407 kvm_put_apic_state(apic, &kapic);
1409 return kvm_vcpu_ioctl(env, KVM_SET_LAPIC, &kapic);
1414 static int kvm_put_vcpu_events(CPUX86State *env, int level)
1416 struct kvm_vcpu_events events;
1418 if (!kvm_has_vcpu_events()) {
1422 events.exception.injected = (env->exception_injected >= 0);
1423 events.exception.nr = env->exception_injected;
1424 events.exception.has_error_code = env->has_error_code;
1425 events.exception.error_code = env->error_code;
1426 events.exception.pad = 0;
1428 events.interrupt.injected = (env->interrupt_injected >= 0);
1429 events.interrupt.nr = env->interrupt_injected;
1430 events.interrupt.soft = env->soft_interrupt;
1432 events.nmi.injected = env->nmi_injected;
1433 events.nmi.pending = env->nmi_pending;
1434 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
1437 events.sipi_vector = env->sipi_vector;
1440 if (level >= KVM_PUT_RESET_STATE) {
1442 KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
1445 return kvm_vcpu_ioctl(env, KVM_SET_VCPU_EVENTS, &events);
1448 static int kvm_get_vcpu_events(CPUX86State *env)
1450 struct kvm_vcpu_events events;
1453 if (!kvm_has_vcpu_events()) {
1457 ret = kvm_vcpu_ioctl(env, KVM_GET_VCPU_EVENTS, &events);
1461 env->exception_injected =
1462 events.exception.injected ? events.exception.nr : -1;
1463 env->has_error_code = events.exception.has_error_code;
1464 env->error_code = events.exception.error_code;
1466 env->interrupt_injected =
1467 events.interrupt.injected ? events.interrupt.nr : -1;
1468 env->soft_interrupt = events.interrupt.soft;
1470 env->nmi_injected = events.nmi.injected;
1471 env->nmi_pending = events.nmi.pending;
1472 if (events.nmi.masked) {
1473 env->hflags2 |= HF2_NMI_MASK;
1475 env->hflags2 &= ~HF2_NMI_MASK;
1478 env->sipi_vector = events.sipi_vector;
1483 static int kvm_guest_debug_workarounds(CPUX86State *env)
1486 unsigned long reinject_trap = 0;
1488 if (!kvm_has_vcpu_events()) {
1489 if (env->exception_injected == 1) {
1490 reinject_trap = KVM_GUESTDBG_INJECT_DB;
1491 } else if (env->exception_injected == 3) {
1492 reinject_trap = KVM_GUESTDBG_INJECT_BP;
1494 env->exception_injected = -1;
1498 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
1499 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
1500 * by updating the debug state once again if single-stepping is on.
1501 * Another reason to call kvm_update_guest_debug here is a pending debug
1502 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
1503 * reinject them via SET_GUEST_DEBUG.
1505 if (reinject_trap ||
1506 (!kvm_has_robust_singlestep() && env->singlestep_enabled)) {
1507 ret = kvm_update_guest_debug(env, reinject_trap);
1512 static int kvm_put_debugregs(CPUX86State *env)
1514 struct kvm_debugregs dbgregs;
1517 if (!kvm_has_debugregs()) {
1521 for (i = 0; i < 4; i++) {
1522 dbgregs.db[i] = env->dr[i];
1524 dbgregs.dr6 = env->dr[6];
1525 dbgregs.dr7 = env->dr[7];
1528 return kvm_vcpu_ioctl(env, KVM_SET_DEBUGREGS, &dbgregs);
1531 static int kvm_get_debugregs(CPUX86State *env)
1533 struct kvm_debugregs dbgregs;
1536 if (!kvm_has_debugregs()) {
1540 ret = kvm_vcpu_ioctl(env, KVM_GET_DEBUGREGS, &dbgregs);
1544 for (i = 0; i < 4; i++) {
1545 env->dr[i] = dbgregs.db[i];
1547 env->dr[4] = env->dr[6] = dbgregs.dr6;
1548 env->dr[5] = env->dr[7] = dbgregs.dr7;
1553 int kvm_arch_put_registers(CPUX86State *env, int level)
1557 assert(cpu_is_stopped(env) || qemu_cpu_is_self(env));
1559 ret = kvm_getput_regs(env, 1);
1563 ret = kvm_put_xsave(env);
1567 ret = kvm_put_xcrs(env);
1571 ret = kvm_put_sregs(env);
1575 /* must be before kvm_put_msrs */
1576 ret = kvm_inject_mce_oldstyle(env);
1580 ret = kvm_put_msrs(env, level);
1584 if (level >= KVM_PUT_RESET_STATE) {
1585 ret = kvm_put_mp_state(env);
1589 ret = kvm_put_apic(env);
1594 ret = kvm_put_vcpu_events(env, level);
1598 ret = kvm_put_debugregs(env);
1603 ret = kvm_guest_debug_workarounds(env);
1610 int kvm_arch_get_registers(CPUX86State *env)
1614 assert(cpu_is_stopped(env) || qemu_cpu_is_self(env));
1616 ret = kvm_getput_regs(env, 0);
1620 ret = kvm_get_xsave(env);
1624 ret = kvm_get_xcrs(env);
1628 ret = kvm_get_sregs(env);
1632 ret = kvm_get_msrs(env);
1636 ret = kvm_get_mp_state(env);
1640 ret = kvm_get_apic(env);
1644 ret = kvm_get_vcpu_events(env);
1648 ret = kvm_get_debugregs(env);
1655 void kvm_arch_pre_run(CPUX86State *env, struct kvm_run *run)
1660 if (env->interrupt_request & CPU_INTERRUPT_NMI) {
1661 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
1662 DPRINTF("injected NMI\n");
1663 ret = kvm_vcpu_ioctl(env, KVM_NMI);
1665 fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
1670 if (!kvm_irqchip_in_kernel()) {
1671 /* Force the VCPU out of its inner loop to process any INIT requests
1672 * or pending TPR access reports. */
1673 if (env->interrupt_request &
1674 (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
1675 env->exit_request = 1;
1678 /* Try to inject an interrupt if the guest can accept it */
1679 if (run->ready_for_interrupt_injection &&
1680 (env->interrupt_request & CPU_INTERRUPT_HARD) &&
1681 (env->eflags & IF_MASK)) {
1684 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
1685 irq = cpu_get_pic_interrupt(env);
1687 struct kvm_interrupt intr;
1690 DPRINTF("injected interrupt %d\n", irq);
1691 ret = kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr);
1694 "KVM: injection failed, interrupt lost (%s)\n",
1700 /* If we have an interrupt but the guest is not ready to receive an
1701 * interrupt, request an interrupt window exit. This will
1702 * cause a return to userspace as soon as the guest is ready to
1703 * receive interrupts. */
1704 if ((env->interrupt_request & CPU_INTERRUPT_HARD)) {
1705 run->request_interrupt_window = 1;
1707 run->request_interrupt_window = 0;
1710 DPRINTF("setting tpr\n");
1711 run->cr8 = cpu_get_apic_tpr(env->apic_state);
1715 void kvm_arch_post_run(CPUX86State *env, struct kvm_run *run)
1718 env->eflags |= IF_MASK;
1720 env->eflags &= ~IF_MASK;
1722 cpu_set_apic_tpr(env->apic_state, run->cr8);
1723 cpu_set_apic_base(env->apic_state, run->apic_base);
1726 int kvm_arch_process_async_events(CPUX86State *env)
1728 X86CPU *cpu = x86_env_get_cpu(env);
1730 if (env->interrupt_request & CPU_INTERRUPT_MCE) {
1731 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
1732 assert(env->mcg_cap);
1734 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
1736 kvm_cpu_synchronize_state(env);
1738 if (env->exception_injected == EXCP08_DBLE) {
1739 /* this means triple fault */
1740 qemu_system_reset_request();
1741 env->exit_request = 1;
1744 env->exception_injected = EXCP12_MCHK;
1745 env->has_error_code = 0;
1748 if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
1749 env->mp_state = KVM_MP_STATE_RUNNABLE;
1753 if (kvm_irqchip_in_kernel()) {
1757 if (env->interrupt_request & CPU_INTERRUPT_POLL) {
1758 env->interrupt_request &= ~CPU_INTERRUPT_POLL;
1759 apic_poll_irq(env->apic_state);
1761 if (((env->interrupt_request & CPU_INTERRUPT_HARD) &&
1762 (env->eflags & IF_MASK)) ||
1763 (env->interrupt_request & CPU_INTERRUPT_NMI)) {
1766 if (env->interrupt_request & CPU_INTERRUPT_INIT) {
1767 kvm_cpu_synchronize_state(env);
1770 if (env->interrupt_request & CPU_INTERRUPT_SIPI) {
1771 kvm_cpu_synchronize_state(env);
1774 if (env->interrupt_request & CPU_INTERRUPT_TPR) {
1775 env->interrupt_request &= ~CPU_INTERRUPT_TPR;
1776 kvm_cpu_synchronize_state(env);
1777 apic_handle_tpr_access_report(env->apic_state, env->eip,
1778 env->tpr_access_type);
1784 static int kvm_handle_halt(CPUX86State *env)
1786 if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
1787 (env->eflags & IF_MASK)) &&
1788 !(env->interrupt_request & CPU_INTERRUPT_NMI)) {
1796 static int kvm_handle_tpr_access(CPUX86State *env)
1798 struct kvm_run *run = env->kvm_run;
1800 apic_handle_tpr_access_report(env->apic_state, run->tpr_access.rip,
1801 run->tpr_access.is_write ? TPR_ACCESS_WRITE
1806 int kvm_arch_insert_sw_breakpoint(CPUX86State *env, struct kvm_sw_breakpoint *bp)
1808 static const uint8_t int3 = 0xcc;
1810 if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
1811 cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&int3, 1, 1)) {
1817 int kvm_arch_remove_sw_breakpoint(CPUX86State *env, struct kvm_sw_breakpoint *bp)
1821 if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
1822 cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
1834 static int nb_hw_breakpoint;
1836 static int find_hw_breakpoint(target_ulong addr, int len, int type)
1840 for (n = 0; n < nb_hw_breakpoint; n++) {
1841 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
1842 (hw_breakpoint[n].len == len || len == -1)) {
1849 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
1850 target_ulong len, int type)
1853 case GDB_BREAKPOINT_HW:
1856 case GDB_WATCHPOINT_WRITE:
1857 case GDB_WATCHPOINT_ACCESS:
1864 if (addr & (len - 1)) {
1876 if (nb_hw_breakpoint == 4) {
1879 if (find_hw_breakpoint(addr, len, type) >= 0) {
1882 hw_breakpoint[nb_hw_breakpoint].addr = addr;
1883 hw_breakpoint[nb_hw_breakpoint].len = len;
1884 hw_breakpoint[nb_hw_breakpoint].type = type;
1890 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
1891 target_ulong len, int type)
1895 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
1900 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
1905 void kvm_arch_remove_all_hw_breakpoints(void)
1907 nb_hw_breakpoint = 0;
1910 static CPUWatchpoint hw_watchpoint;
1912 static int kvm_handle_debug(struct kvm_debug_exit_arch *arch_info)
1917 if (arch_info->exception == 1) {
1918 if (arch_info->dr6 & (1 << 14)) {
1919 if (cpu_single_env->singlestep_enabled) {
1923 for (n = 0; n < 4; n++) {
1924 if (arch_info->dr6 & (1 << n)) {
1925 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
1931 cpu_single_env->watchpoint_hit = &hw_watchpoint;
1932 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
1933 hw_watchpoint.flags = BP_MEM_WRITE;
1937 cpu_single_env->watchpoint_hit = &hw_watchpoint;
1938 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
1939 hw_watchpoint.flags = BP_MEM_ACCESS;
1945 } else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc)) {
1949 cpu_synchronize_state(cpu_single_env);
1950 assert(cpu_single_env->exception_injected == -1);
1953 cpu_single_env->exception_injected = arch_info->exception;
1954 cpu_single_env->has_error_code = 0;
1960 void kvm_arch_update_guest_debug(CPUX86State *env, struct kvm_guest_debug *dbg)
1962 const uint8_t type_code[] = {
1963 [GDB_BREAKPOINT_HW] = 0x0,
1964 [GDB_WATCHPOINT_WRITE] = 0x1,
1965 [GDB_WATCHPOINT_ACCESS] = 0x3
1967 const uint8_t len_code[] = {
1968 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
1972 if (kvm_sw_breakpoints_active(env)) {
1973 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
1975 if (nb_hw_breakpoint > 0) {
1976 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
1977 dbg->arch.debugreg[7] = 0x0600;
1978 for (n = 0; n < nb_hw_breakpoint; n++) {
1979 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
1980 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
1981 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
1982 ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
1987 static bool host_supports_vmx(void)
1989 uint32_t ecx, unused;
1991 host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
1992 return ecx & CPUID_EXT_VMX;
1995 #define VMX_INVALID_GUEST_STATE 0x80000021
1997 int kvm_arch_handle_exit(CPUX86State *env, struct kvm_run *run)
2002 switch (run->exit_reason) {
2004 DPRINTF("handle_hlt\n");
2005 ret = kvm_handle_halt(env);
2007 case KVM_EXIT_SET_TPR:
2010 case KVM_EXIT_TPR_ACCESS:
2011 ret = kvm_handle_tpr_access(env);
2013 case KVM_EXIT_FAIL_ENTRY:
2014 code = run->fail_entry.hardware_entry_failure_reason;
2015 fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
2017 if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
2019 "\nIf you're running a guest on an Intel machine without "
2020 "unrestricted mode\n"
2021 "support, the failure can be most likely due to the guest "
2022 "entering an invalid\n"
2023 "state for Intel VT. For example, the guest maybe running "
2024 "in big real mode\n"
2025 "which is not supported on less recent Intel processors."
2030 case KVM_EXIT_EXCEPTION:
2031 fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
2032 run->ex.exception, run->ex.error_code);
2035 case KVM_EXIT_DEBUG:
2036 DPRINTF("kvm_exit_debug\n");
2037 ret = kvm_handle_debug(&run->debug.arch);
2040 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
2048 bool kvm_arch_stop_on_emulation_error(CPUX86State *env)
2050 kvm_cpu_synchronize_state(env);
2051 return !(env->cr[0] & CR0_PE_MASK) ||
2052 ((env->segs[R_CS].selector & 3) != 3);
2055 void kvm_arch_init_irq_routing(KVMState *s)
2057 if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
2058 /* If kernel can't do irq routing, interrupt source
2059 * override 0->2 cannot be set up as required by HPET.
2060 * So we have to disable it.
2064 /* We know at this point that we're using the in-kernel
2065 * irqchip, so we can use irqfds, and on x86 we know
2066 * we can use msi via irqfd and GSI routing.
2068 kvm_irqfds_allowed = true;
2069 kvm_msi_via_irqfd_allowed = true;
2070 kvm_gsi_routing_allowed = true;
2073 /* Classic KVM device assignment interface. Will remain x86 only. */
2074 int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr,
2075 uint32_t flags, uint32_t *dev_id)
2077 struct kvm_assigned_pci_dev dev_data = {
2078 .segnr = dev_addr->domain,
2079 .busnr = dev_addr->bus,
2080 .devfn = PCI_DEVFN(dev_addr->slot, dev_addr->function),
2085 dev_data.assigned_dev_id =
2086 (dev_addr->domain << 16) | (dev_addr->bus << 8) | dev_data.devfn;
2088 ret = kvm_vm_ioctl(s, KVM_ASSIGN_PCI_DEVICE, &dev_data);
2093 *dev_id = dev_data.assigned_dev_id;
2098 int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id)
2100 struct kvm_assigned_pci_dev dev_data = {
2101 .assigned_dev_id = dev_id,
2104 return kvm_vm_ioctl(s, KVM_DEASSIGN_PCI_DEVICE, &dev_data);
2107 static int kvm_assign_irq_internal(KVMState *s, uint32_t dev_id,
2108 uint32_t irq_type, uint32_t guest_irq)
2110 struct kvm_assigned_irq assigned_irq = {
2111 .assigned_dev_id = dev_id,
2112 .guest_irq = guest_irq,
2116 if (kvm_check_extension(s, KVM_CAP_ASSIGN_DEV_IRQ)) {
2117 return kvm_vm_ioctl(s, KVM_ASSIGN_DEV_IRQ, &assigned_irq);
2119 return kvm_vm_ioctl(s, KVM_ASSIGN_IRQ, &assigned_irq);
2123 int kvm_device_intx_assign(KVMState *s, uint32_t dev_id, bool use_host_msi,
2126 uint32_t irq_type = KVM_DEV_IRQ_GUEST_INTX |
2127 (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX);
2129 return kvm_assign_irq_internal(s, dev_id, irq_type, guest_irq);
2132 int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked)
2134 struct kvm_assigned_pci_dev dev_data = {
2135 .assigned_dev_id = dev_id,
2136 .flags = masked ? KVM_DEV_ASSIGN_MASK_INTX : 0,
2139 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_INTX_MASK, &dev_data);
2142 static int kvm_deassign_irq_internal(KVMState *s, uint32_t dev_id,
2145 struct kvm_assigned_irq assigned_irq = {
2146 .assigned_dev_id = dev_id,
2150 return kvm_vm_ioctl(s, KVM_DEASSIGN_DEV_IRQ, &assigned_irq);
2153 int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi)
2155 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_INTX |
2156 (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX));
2159 int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq)
2161 return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSI |
2162 KVM_DEV_IRQ_GUEST_MSI, virq);
2165 int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id)
2167 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSI |
2168 KVM_DEV_IRQ_HOST_MSI);
2171 bool kvm_device_msix_supported(KVMState *s)
2173 /* The kernel lacks a corresponding KVM_CAP, so we probe by calling
2174 * KVM_ASSIGN_SET_MSIX_NR with an invalid parameter. */
2175 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, NULL) == -EFAULT;
2178 int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id,
2179 uint32_t nr_vectors)
2181 struct kvm_assigned_msix_nr msix_nr = {
2182 .assigned_dev_id = dev_id,
2183 .entry_nr = nr_vectors,
2186 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, &msix_nr);
2189 int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector,
2192 struct kvm_assigned_msix_entry msix_entry = {
2193 .assigned_dev_id = dev_id,
2198 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_ENTRY, &msix_entry);
2201 int kvm_device_msix_assign(KVMState *s, uint32_t dev_id)
2203 return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSIX |
2204 KVM_DEV_IRQ_GUEST_MSIX, 0);
2207 int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id)
2209 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSIX |
2210 KVM_DEV_IRQ_HOST_MSIX);