]> Git Repo - linux.git/blame - arch/x86/kvm/xen.h
KVM: x86/xen: Support direct injection of event channel events
[linux.git] / arch / x86 / kvm / xen.h
CommitLineData
23200b7a
JM
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
4 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5 *
6 * KVM Xen emulation
7 */
8
9#ifndef __ARCH_X86_KVM_XEN_H__
10#define __ARCH_X86_KVM_XEN_H__
11
b59b153d 12#ifdef CONFIG_KVM_XEN
7d6bbebb
DW
13#include <linux/jump_label_ratelimit.h>
14
15extern struct static_key_false_deferred kvm_xen_enabled;
16
40da8ccd 17int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
7caf9571 18void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu);
3e324615
DW
19int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
20int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
a76b9641
JM
21int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
22int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
35025735 23int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *evt);
23200b7a 24int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data);
78e9878c 25int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc);
319afe68 26void kvm_xen_init_vm(struct kvm *kvm);
7d6bbebb 27void kvm_xen_destroy_vm(struct kvm *kvm);
a795cd43 28void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu);
8733068b 29int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe,
14243b38
DW
30 struct kvm *kvm);
31int kvm_xen_setup_evtchn(struct kvm *kvm,
32 struct kvm_kernel_irq_routing_entry *e,
33 const struct kvm_irq_routing_entry *ue);
34
30b5c851
DW
35static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
36{
37 return static_branch_unlikely(&kvm_xen_enabled.key) &&
38 kvm->arch.xen_hvm_config.msr;
39}
40
23200b7a
JM
41static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
42{
7d6bbebb
DW
43 return static_branch_unlikely(&kvm_xen_enabled.key) &&
44 (kvm->arch.xen_hvm_config.flags &
45 KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL);
23200b7a
JM
46}
47
40da8ccd
DW
48static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
49{
50 if (static_branch_unlikely(&kvm_xen_enabled.key) &&
7caf9571
DW
51 vcpu->arch.xen.vcpu_info_cache.active &&
52 vcpu->kvm->arch.xen.upcall_vector)
40da8ccd
DW
53 return __kvm_xen_has_interrupt(vcpu);
54
55 return 0;
56}
7caf9571
DW
57
58static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu)
59{
60 return static_branch_unlikely(&kvm_xen_enabled.key) &&
61 vcpu->arch.xen.evtchn_pending_sel;
62}
63
b59b153d
PB
64#else
65static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
66{
67 return 1;
68}
69
319afe68
PB
70static inline void kvm_xen_init_vm(struct kvm *kvm)
71{
72}
73
b59b153d
PB
74static inline void kvm_xen_destroy_vm(struct kvm *kvm)
75{
76}
77
a795cd43
DW
78static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
79{
80}
81
30b5c851
DW
82static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
83{
84 return false;
85}
86
b59b153d
PB
87static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
88{
89 return false;
90}
91
92static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
93{
94 return 0;
95}
7caf9571
DW
96
97static inline void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu)
98{
99}
100
101static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu)
102{
103 return false;
104}
b59b153d
PB
105#endif
106
107int kvm_xen_hypercall(struct kvm_vcpu *vcpu);
1ea9f2ed 108
1ea9f2ed
DW
109#include <asm/pvclock-abi.h>
110#include <asm/xen/interface.h>
30b5c851 111#include <xen/interface/vcpu.h>
1ea9f2ed 112
30b5c851
DW
113void kvm_xen_update_runstate_guest(struct kvm_vcpu *vcpu, int state);
114
115static inline void kvm_xen_runstate_set_running(struct kvm_vcpu *vcpu)
116{
117 kvm_xen_update_runstate_guest(vcpu, RUNSTATE_running);
118}
119
120static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu)
121{
122 /*
123 * If the vCPU wasn't preempted but took a normal exit for
124 * some reason (hypercalls, I/O, etc.), that is accounted as
125 * still RUNSTATE_running, as the VMM is still operating on
126 * behalf of the vCPU. Only if the VMM does actually block
127 * does it need to enter RUNSTATE_blocked.
128 */
129 if (vcpu->preempted)
130 kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable);
131}
132
133/* 32-bit compatibility definitions, also used natively in 32-bit build */
1ea9f2ed
DW
134struct compat_arch_vcpu_info {
135 unsigned int cr2;
136 unsigned int pad[5];
137};
138
139struct compat_vcpu_info {
7137b7ae
SC
140 uint8_t evtchn_upcall_pending;
141 uint8_t evtchn_upcall_mask;
142 uint16_t pad;
143 uint32_t evtchn_pending_sel;
144 struct compat_arch_vcpu_info arch;
145 struct pvclock_vcpu_time_info time;
1ea9f2ed
DW
146}; /* 64 bytes (x86) */
147
148struct compat_arch_shared_info {
149 unsigned int max_pfn;
150 unsigned int pfn_to_mfn_frame_list_list;
151 unsigned int nmi_reason;
152 unsigned int p2m_cr3;
153 unsigned int p2m_vaddr;
154 unsigned int p2m_generation;
155 uint32_t wc_sec_hi;
156};
157
158struct compat_shared_info {
159 struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS];
160 uint32_t evtchn_pending[32];
161 uint32_t evtchn_mask[32];
162 struct pvclock_wall_clock wc;
163 struct compat_arch_shared_info arch;
164};
165
14243b38
DW
166#define COMPAT_EVTCHN_2L_NR_CHANNELS (8 * \
167 sizeof_field(struct compat_shared_info, \
168 evtchn_pending))
30b5c851
DW
169struct compat_vcpu_runstate_info {
170 int state;
171 uint64_t state_entry_time;
172 uint64_t time[4];
173} __attribute__((packed));
174
23200b7a 175#endif /* __ARCH_X86_KVM_XEN_H__ */
This page took 0.158954 seconds and 4 git commands to generate.