]>
Commit | Line | Data |
---|---|---|
23200b7a JM |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright © 2019 Oracle and/or its affiliates. All rights reserved. | |
4 | * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. | |
5 | * | |
6 | * KVM Xen emulation | |
7 | */ | |
8 | ||
9 | #ifndef __ARCH_X86_KVM_XEN_H__ | |
10 | #define __ARCH_X86_KVM_XEN_H__ | |
11 | ||
f422f853 PD |
12 | #include <asm/xen/hypervisor.h> |
13 | ||
b59b153d | 14 | #ifdef CONFIG_KVM_XEN |
7d6bbebb DW |
15 | #include <linux/jump_label_ratelimit.h> |
16 | ||
17 | extern struct static_key_false_deferred kvm_xen_enabled; | |
18 | ||
40da8ccd | 19 | int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu); |
7caf9571 | 20 | void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu); |
8e62bf2b | 21 | void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *vcpu); |
3e324615 DW |
22 | int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); |
23 | int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); | |
a76b9641 JM |
24 | int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); |
25 | int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); | |
35025735 | 26 | int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *evt); |
23200b7a | 27 | int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data); |
78e9878c | 28 | int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc); |
319afe68 | 29 | void kvm_xen_init_vm(struct kvm *kvm); |
7d6bbebb | 30 | void kvm_xen_destroy_vm(struct kvm *kvm); |
942c2490 | 31 | void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu); |
a795cd43 | 32 | void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu); |
8733068b | 33 | int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, |
14243b38 DW |
34 | struct kvm *kvm); |
35 | int kvm_xen_setup_evtchn(struct kvm *kvm, | |
36 | struct kvm_kernel_irq_routing_entry *e, | |
37 | const struct kvm_irq_routing_entry *ue); | |
f422f853 | 38 | void kvm_xen_update_tsc_info(struct kvm_vcpu *vcpu); |
14243b38 | 39 | |
8e62bf2b DW |
40 | static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu) |
41 | { | |
42 | /* | |
43 | * The local APIC is being enabled. If the per-vCPU upcall vector is | |
44 | * set and the vCPU's evtchn_upcall_pending flag is set, inject the | |
45 | * interrupt. | |
46 | */ | |
47 | if (static_branch_unlikely(&kvm_xen_enabled.key) && | |
48 | vcpu->arch.xen.vcpu_info_cache.active && | |
49 | vcpu->arch.xen.upcall_vector && __kvm_xen_has_interrupt(vcpu)) | |
50 | kvm_xen_inject_vcpu_vector(vcpu); | |
51 | } | |
52 | ||
30b5c851 DW |
53 | static inline bool kvm_xen_msr_enabled(struct kvm *kvm) |
54 | { | |
55 | return static_branch_unlikely(&kvm_xen_enabled.key) && | |
56 | kvm->arch.xen_hvm_config.msr; | |
57 | } | |
58 | ||
23200b7a JM |
59 | static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm) |
60 | { | |
7d6bbebb DW |
61 | return static_branch_unlikely(&kvm_xen_enabled.key) && |
62 | (kvm->arch.xen_hvm_config.flags & | |
63 | KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL); | |
23200b7a JM |
64 | } |
65 | ||
40da8ccd DW |
66 | static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu) |
67 | { | |
68 | if (static_branch_unlikely(&kvm_xen_enabled.key) && | |
7caf9571 DW |
69 | vcpu->arch.xen.vcpu_info_cache.active && |
70 | vcpu->kvm->arch.xen.upcall_vector) | |
40da8ccd DW |
71 | return __kvm_xen_has_interrupt(vcpu); |
72 | ||
73 | return 0; | |
74 | } | |
7caf9571 DW |
75 | |
76 | static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu) | |
77 | { | |
78 | return static_branch_unlikely(&kvm_xen_enabled.key) && | |
79 | vcpu->arch.xen.evtchn_pending_sel; | |
80 | } | |
81 | ||
53639526 JM |
82 | static inline bool kvm_xen_timer_enabled(struct kvm_vcpu *vcpu) |
83 | { | |
84 | return !!vcpu->arch.xen.timer_virq; | |
85 | } | |
86 | ||
87 | static inline int kvm_xen_has_pending_timer(struct kvm_vcpu *vcpu) | |
88 | { | |
89 | if (kvm_xen_hypercall_enabled(vcpu->kvm) && kvm_xen_timer_enabled(vcpu)) | |
90 | return atomic_read(&vcpu->arch.xen.timer_pending); | |
91 | ||
92 | return 0; | |
93 | } | |
94 | ||
95 | void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu); | |
b59b153d PB |
96 | #else |
97 | static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) | |
98 | { | |
99 | return 1; | |
100 | } | |
101 | ||
319afe68 PB |
102 | static inline void kvm_xen_init_vm(struct kvm *kvm) |
103 | { | |
104 | } | |
105 | ||
b59b153d PB |
106 | static inline void kvm_xen_destroy_vm(struct kvm *kvm) |
107 | { | |
108 | } | |
109 | ||
942c2490 DW |
110 | static inline void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu) |
111 | { | |
112 | } | |
113 | ||
a795cd43 DW |
114 | static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu) |
115 | { | |
116 | } | |
117 | ||
8e62bf2b DW |
118 | static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu) |
119 | { | |
120 | } | |
121 | ||
30b5c851 DW |
122 | static inline bool kvm_xen_msr_enabled(struct kvm *kvm) |
123 | { | |
124 | return false; | |
125 | } | |
126 | ||
b59b153d PB |
127 | static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm) |
128 | { | |
129 | return false; | |
130 | } | |
131 | ||
132 | static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu) | |
133 | { | |
134 | return 0; | |
135 | } | |
7caf9571 DW |
136 | |
137 | static inline void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu) | |
138 | { | |
139 | } | |
140 | ||
141 | static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu) | |
142 | { | |
143 | return false; | |
144 | } | |
53639526 JM |
145 | |
146 | static inline int kvm_xen_has_pending_timer(struct kvm_vcpu *vcpu) | |
147 | { | |
148 | return 0; | |
149 | } | |
150 | ||
151 | static inline void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu) | |
152 | { | |
153 | } | |
154 | ||
155 | static inline bool kvm_xen_timer_enabled(struct kvm_vcpu *vcpu) | |
156 | { | |
157 | return false; | |
158 | } | |
f422f853 PD |
159 | |
160 | static inline void kvm_xen_update_tsc_info(struct kvm_vcpu *vcpu) | |
161 | { | |
162 | } | |
b59b153d PB |
163 | #endif |
164 | ||
165 | int kvm_xen_hypercall(struct kvm_vcpu *vcpu); | |
1ea9f2ed | 166 | |
1ea9f2ed DW |
167 | #include <asm/pvclock-abi.h> |
168 | #include <asm/xen/interface.h> | |
30b5c851 | 169 | #include <xen/interface/vcpu.h> |
1ea9f2ed | 170 | |
5ec3289b | 171 | void kvm_xen_update_runstate(struct kvm_vcpu *vcpu, int state); |
30b5c851 DW |
172 | |
173 | static inline void kvm_xen_runstate_set_running(struct kvm_vcpu *vcpu) | |
174 | { | |
5ec3289b | 175 | kvm_xen_update_runstate(vcpu, RUNSTATE_running); |
30b5c851 DW |
176 | } |
177 | ||
178 | static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu) | |
179 | { | |
180 | /* | |
181 | * If the vCPU wasn't preempted but took a normal exit for | |
182 | * some reason (hypercalls, I/O, etc.), that is accounted as | |
183 | * still RUNSTATE_running, as the VMM is still operating on | |
184 | * behalf of the vCPU. Only if the VMM does actually block | |
185 | * does it need to enter RUNSTATE_blocked. | |
186 | */ | |
54aa83c9 PB |
187 | if (WARN_ON_ONCE(!vcpu->preempted)) |
188 | return; | |
189 | ||
5ec3289b | 190 | kvm_xen_update_runstate(vcpu, RUNSTATE_runnable); |
30b5c851 DW |
191 | } |
192 | ||
193 | /* 32-bit compatibility definitions, also used natively in 32-bit build */ | |
1ea9f2ed DW |
194 | struct compat_arch_vcpu_info { |
195 | unsigned int cr2; | |
196 | unsigned int pad[5]; | |
197 | }; | |
198 | ||
199 | struct compat_vcpu_info { | |
7137b7ae SC |
200 | uint8_t evtchn_upcall_pending; |
201 | uint8_t evtchn_upcall_mask; | |
202 | uint16_t pad; | |
203 | uint32_t evtchn_pending_sel; | |
204 | struct compat_arch_vcpu_info arch; | |
205 | struct pvclock_vcpu_time_info time; | |
1ea9f2ed DW |
206 | }; /* 64 bytes (x86) */ |
207 | ||
208 | struct compat_arch_shared_info { | |
209 | unsigned int max_pfn; | |
210 | unsigned int pfn_to_mfn_frame_list_list; | |
211 | unsigned int nmi_reason; | |
212 | unsigned int p2m_cr3; | |
213 | unsigned int p2m_vaddr; | |
214 | unsigned int p2m_generation; | |
215 | uint32_t wc_sec_hi; | |
216 | }; | |
217 | ||
218 | struct compat_shared_info { | |
219 | struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS]; | |
220 | uint32_t evtchn_pending[32]; | |
221 | uint32_t evtchn_mask[32]; | |
222 | struct pvclock_wall_clock wc; | |
223 | struct compat_arch_shared_info arch; | |
224 | }; | |
225 | ||
14243b38 DW |
226 | #define COMPAT_EVTCHN_2L_NR_CHANNELS (8 * \ |
227 | sizeof_field(struct compat_shared_info, \ | |
228 | evtchn_pending)) | |
30b5c851 DW |
229 | struct compat_vcpu_runstate_info { |
230 | int state; | |
231 | uint64_t state_entry_time; | |
232 | uint64_t time[4]; | |
233 | } __attribute__((packed)); | |
234 | ||
214b0a88 MK |
235 | struct compat_sched_poll { |
236 | /* This is actually a guest virtual address which points to ports. */ | |
237 | uint32_t ports; | |
238 | unsigned int nr_ports; | |
239 | uint64_t timeout; | |
240 | }; | |
241 | ||
23200b7a | 242 | #endif /* __ARCH_X86_KVM_XEN_H__ */ |