]> Git Repo - linux.git/blob - arch/x86/kvm/xen.h
KVM: x86/xen: Support direct injection of event channel events
[linux.git] / arch / x86 / kvm / xen.h
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
4  * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5  *
6  * KVM Xen emulation
7  */
8
9 #ifndef __ARCH_X86_KVM_XEN_H__
10 #define __ARCH_X86_KVM_XEN_H__
11
12 #ifdef CONFIG_KVM_XEN
13 #include <linux/jump_label_ratelimit.h>
14
15 extern struct static_key_false_deferred kvm_xen_enabled;
16
17 int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
18 void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu);
19 int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
20 int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
21 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
22 int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
23 int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *evt);
24 int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data);
25 int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc);
26 void kvm_xen_init_vm(struct kvm *kvm);
27 void kvm_xen_destroy_vm(struct kvm *kvm);
28 void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu);
29 int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe,
30                             struct kvm *kvm);
31 int kvm_xen_setup_evtchn(struct kvm *kvm,
32                          struct kvm_kernel_irq_routing_entry *e,
33                          const struct kvm_irq_routing_entry *ue);
34
35 static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
36 {
37         return static_branch_unlikely(&kvm_xen_enabled.key) &&
38                 kvm->arch.xen_hvm_config.msr;
39 }
40
41 static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
42 {
43         return static_branch_unlikely(&kvm_xen_enabled.key) &&
44                 (kvm->arch.xen_hvm_config.flags &
45                  KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL);
46 }
47
48 static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
49 {
50         if (static_branch_unlikely(&kvm_xen_enabled.key) &&
51             vcpu->arch.xen.vcpu_info_cache.active &&
52             vcpu->kvm->arch.xen.upcall_vector)
53                 return __kvm_xen_has_interrupt(vcpu);
54
55         return 0;
56 }
57
58 static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu)
59 {
60         return static_branch_unlikely(&kvm_xen_enabled.key) &&
61                 vcpu->arch.xen.evtchn_pending_sel;
62 }
63
64 #else
65 static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
66 {
67         return 1;
68 }
69
70 static inline void kvm_xen_init_vm(struct kvm *kvm)
71 {
72 }
73
74 static inline void kvm_xen_destroy_vm(struct kvm *kvm)
75 {
76 }
77
78 static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
79 {
80 }
81
82 static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
83 {
84         return false;
85 }
86
87 static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
88 {
89         return false;
90 }
91
92 static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
93 {
94         return 0;
95 }
96
97 static inline void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu)
98 {
99 }
100
101 static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu)
102 {
103         return false;
104 }
105 #endif
106
107 int kvm_xen_hypercall(struct kvm_vcpu *vcpu);
108
109 #include <asm/pvclock-abi.h>
110 #include <asm/xen/interface.h>
111 #include <xen/interface/vcpu.h>
112
113 void kvm_xen_update_runstate_guest(struct kvm_vcpu *vcpu, int state);
114
115 static inline void kvm_xen_runstate_set_running(struct kvm_vcpu *vcpu)
116 {
117         kvm_xen_update_runstate_guest(vcpu, RUNSTATE_running);
118 }
119
120 static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu)
121 {
122         /*
123          * If the vCPU wasn't preempted but took a normal exit for
124          * some reason (hypercalls, I/O, etc.), that is accounted as
125          * still RUNSTATE_running, as the VMM is still operating on
126          * behalf of the vCPU. Only if the VMM does actually block
127          * does it need to enter RUNSTATE_blocked.
128          */
129         if (vcpu->preempted)
130                 kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable);
131 }
132
133 /* 32-bit compatibility definitions, also used natively in 32-bit build */
134 struct compat_arch_vcpu_info {
135         unsigned int cr2;
136         unsigned int pad[5];
137 };
138
139 struct compat_vcpu_info {
140         uint8_t evtchn_upcall_pending;
141         uint8_t evtchn_upcall_mask;
142         uint16_t pad;
143         uint32_t evtchn_pending_sel;
144         struct compat_arch_vcpu_info arch;
145         struct pvclock_vcpu_time_info time;
146 }; /* 64 bytes (x86) */
147
148 struct compat_arch_shared_info {
149         unsigned int max_pfn;
150         unsigned int pfn_to_mfn_frame_list_list;
151         unsigned int nmi_reason;
152         unsigned int p2m_cr3;
153         unsigned int p2m_vaddr;
154         unsigned int p2m_generation;
155         uint32_t wc_sec_hi;
156 };
157
158 struct compat_shared_info {
159         struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS];
160         uint32_t evtchn_pending[32];
161         uint32_t evtchn_mask[32];
162         struct pvclock_wall_clock wc;
163         struct compat_arch_shared_info arch;
164 };
165
166 #define COMPAT_EVTCHN_2L_NR_CHANNELS (8 *                               \
167                                       sizeof_field(struct compat_shared_info, \
168                                                    evtchn_pending))
169 struct compat_vcpu_runstate_info {
170     int state;
171     uint64_t state_entry_time;
172     uint64_t time[4];
173 } __attribute__((packed));
174
175 #endif /* __ARCH_X86_KVM_XEN_H__ */
This page took 0.045514 seconds and 4 git commands to generate.