1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * KVM Microsoft Hyper-V emulation
5 * derived from arch/x86/kvm/x86.c
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright (C) 2008 Qumranet, Inc.
9 * Copyright IBM Corporation, 2008
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
21 #ifndef __ARCH_X86_KVM_HYPERV_H__
22 #define __ARCH_X86_KVM_HYPERV_H__
24 #include <linux/kvm_host.h>
27 /* "Hv#1" signature */
28 #define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
31 * The #defines related to the synthetic debugger are required by KDNet, but
32 * they are not documented in the Hyper-V TLFS because the synthetic debugger
33 * functionality has been deprecated and is subject to removal in future
34 * versions of Windows.
36 #define HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS 0x40000080
37 #define HYPERV_CPUID_SYNDBG_INTERFACE 0x40000081
38 #define HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES 0x40000082
41 * Hyper-V synthetic debugger platform capabilities
42 * These are HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX bits.
44 #define HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING BIT(1)
46 /* Hyper-V Synthetic debug options MSR */
47 #define HV_X64_MSR_SYNDBG_CONTROL 0x400000F1
48 #define HV_X64_MSR_SYNDBG_STATUS 0x400000F2
49 #define HV_X64_MSR_SYNDBG_SEND_BUFFER 0x400000F3
50 #define HV_X64_MSR_SYNDBG_RECV_BUFFER 0x400000F4
51 #define HV_X64_MSR_SYNDBG_PENDING_BUFFER 0x400000F5
52 #define HV_X64_MSR_SYNDBG_OPTIONS 0x400000FF
54 /* Hyper-V HV_X64_MSR_SYNDBG_OPTIONS bits */
55 #define HV_X64_SYNDBG_OPTION_USE_HCALLS BIT(2)
57 static inline struct kvm_hv *to_kvm_hv(struct kvm *kvm)
59 return &kvm->arch.hyperv;
62 static inline struct kvm_vcpu_hv *to_hv_vcpu(struct kvm_vcpu *vcpu)
64 return vcpu->arch.hyperv;
67 static inline struct kvm_vcpu_hv_synic *to_hv_synic(struct kvm_vcpu *vcpu)
69 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
71 return &hv_vcpu->synic;
74 static inline struct kvm_vcpu *hv_synic_to_vcpu(struct kvm_vcpu_hv_synic *synic)
76 struct kvm_vcpu_hv *hv_vcpu = container_of(synic, struct kvm_vcpu_hv, synic);
81 static inline struct kvm_hv_syndbg *to_hv_syndbg(struct kvm_vcpu *vcpu)
83 return &vcpu->kvm->arch.hyperv.hv_syndbg;
86 static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
88 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
90 return hv_vcpu ? hv_vcpu->vp_index : vcpu->vcpu_idx;
93 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
94 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host);
96 static inline bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
98 return vcpu->arch.hyperv_enabled && to_kvm_hv(vcpu->kvm)->hv_guest_os_id;
101 int kvm_hv_hypercall(struct kvm_vcpu *vcpu);
103 void kvm_hv_irq_routing_update(struct kvm *kvm);
104 int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint);
105 void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector);
106 int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages);
108 void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
110 bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu);
111 int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu);
113 static inline struct kvm_vcpu_hv_stimer *to_hv_stimer(struct kvm_vcpu *vcpu,
116 return &to_hv_vcpu(vcpu)->stimer[timer_index];
119 static inline struct kvm_vcpu *hv_stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stimer)
121 struct kvm_vcpu_hv *hv_vcpu;
123 hv_vcpu = container_of(stimer - stimer->index, struct kvm_vcpu_hv,
125 return hv_vcpu->vcpu;
128 static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
130 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
135 return !bitmap_empty(hv_vcpu->stimer_pending_bitmap,
136 HV_SYNIC_STIMER_COUNT);
140 * With HV_ACCESS_TSC_INVARIANT feature, invariant TSC (CPUID.80000007H:EDX[8])
141 * is only observed after HV_X64_MSR_TSC_INVARIANT_CONTROL was written to.
143 static inline bool kvm_hv_invtsc_suppressed(struct kvm_vcpu *vcpu)
145 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
148 * If Hyper-V's invariant TSC control is not exposed to the guest,
149 * the invariant TSC CPUID flag is not suppressed, Windows guests were
150 * observed to be able to handle it correctly. Going forward, VMMs are
151 * encouraged to enable Hyper-V's invariant TSC control when invariant
152 * TSC CPUID flag is set to make KVM's behavior match genuine Hyper-V.
155 !(hv_vcpu->cpuid_cache.features_eax & HV_ACCESS_TSC_INVARIANT))
159 * If Hyper-V's invariant TSC control is exposed to the guest, KVM is
160 * responsible for suppressing the invariant TSC CPUID flag if the
161 * Hyper-V control is not enabled.
163 return !(to_kvm_hv(vcpu->kvm)->hv_invtsc_control & HV_EXPOSE_INVARIANT_TSC);
166 void kvm_hv_process_stimers(struct kvm_vcpu *vcpu);
168 void kvm_hv_setup_tsc_page(struct kvm *kvm,
169 struct pvclock_vcpu_time_info *hv_clock);
170 void kvm_hv_request_tsc_page_update(struct kvm *kvm);
172 void kvm_hv_init_vm(struct kvm *kvm);
173 void kvm_hv_destroy_vm(struct kvm *kvm);
174 int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
175 void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled);
176 int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce);
177 int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args);
178 int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
179 struct kvm_cpuid_entry2 __user *entries);
181 static inline struct kvm_vcpu_hv_tlb_flush_fifo *kvm_hv_get_tlb_flush_fifo(struct kvm_vcpu *vcpu,
184 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
185 int i = is_guest_mode ? HV_L2_TLB_FLUSH_FIFO :
186 HV_L1_TLB_FLUSH_FIFO;
188 return &hv_vcpu->tlb_flush_fifo[i];
191 static inline void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu)
193 struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
195 if (!to_hv_vcpu(vcpu) || !kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu))
198 tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(vcpu, is_guest_mode(vcpu));
200 kfifo_reset_out(&tlb_flush_fifo->entries);
203 static inline bool guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu *vcpu)
205 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
208 (hv_vcpu->cpuid_cache.nested_eax & HV_X64_NESTED_DIRECT_FLUSH);
211 static inline bool kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu *vcpu)
213 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
219 code = is_64_bit_hypercall(vcpu) ? kvm_rcx_read(vcpu) :
222 return (code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE ||
223 code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST ||
224 code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX ||
225 code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX);
228 static inline int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu)
230 if (!to_hv_vcpu(vcpu))
233 if (!kvm_hv_assist_page_enabled(vcpu))
236 return kvm_hv_get_assist_page(vcpu);
239 int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu);