]> Git Repo - linux.git/blame - arch/x86/kvm/xen.h
KVM: x86/xen: Add KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID
[linux.git] / arch / x86 / kvm / xen.h
CommitLineData
23200b7a
JM
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
4 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5 *
6 * KVM Xen emulation
7 */
8
9#ifndef __ARCH_X86_KVM_XEN_H__
10#define __ARCH_X86_KVM_XEN_H__
11
b59b153d 12#ifdef CONFIG_KVM_XEN
7d6bbebb
DW
13#include <linux/jump_label_ratelimit.h>
14
15extern struct static_key_false_deferred kvm_xen_enabled;
16
40da8ccd 17int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
7caf9571 18void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu);
3e324615
DW
19int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
20int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
a76b9641
JM
21int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
22int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
35025735 23int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *evt);
23200b7a 24int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data);
78e9878c 25int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc);
319afe68 26void kvm_xen_init_vm(struct kvm *kvm);
7d6bbebb 27void kvm_xen_destroy_vm(struct kvm *kvm);
942c2490 28void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu);
a795cd43 29void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu);
8733068b 30int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe,
14243b38
DW
31 struct kvm *kvm);
32int kvm_xen_setup_evtchn(struct kvm *kvm,
33 struct kvm_kernel_irq_routing_entry *e,
34 const struct kvm_irq_routing_entry *ue);
35
30b5c851
DW
36static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
37{
38 return static_branch_unlikely(&kvm_xen_enabled.key) &&
39 kvm->arch.xen_hvm_config.msr;
40}
41
23200b7a
JM
42static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
43{
7d6bbebb
DW
44 return static_branch_unlikely(&kvm_xen_enabled.key) &&
45 (kvm->arch.xen_hvm_config.flags &
46 KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL);
23200b7a
JM
47}
48
40da8ccd
DW
49static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
50{
51 if (static_branch_unlikely(&kvm_xen_enabled.key) &&
7caf9571
DW
52 vcpu->arch.xen.vcpu_info_cache.active &&
53 vcpu->kvm->arch.xen.upcall_vector)
40da8ccd
DW
54 return __kvm_xen_has_interrupt(vcpu);
55
56 return 0;
57}
7caf9571
DW
58
59static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu)
60{
61 return static_branch_unlikely(&kvm_xen_enabled.key) &&
62 vcpu->arch.xen.evtchn_pending_sel;
63}
64
b59b153d
PB
65#else
66static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
67{
68 return 1;
69}
70
319afe68
PB
71static inline void kvm_xen_init_vm(struct kvm *kvm)
72{
73}
74
b59b153d
PB
75static inline void kvm_xen_destroy_vm(struct kvm *kvm)
76{
77}
78
942c2490
DW
79static inline void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu)
80{
81}
82
a795cd43
DW
83static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
84{
85}
86
30b5c851
DW
87static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
88{
89 return false;
90}
91
b59b153d
PB
92static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
93{
94 return false;
95}
96
97static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
98{
99 return 0;
100}
7caf9571
DW
101
102static inline void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu)
103{
104}
105
106static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu)
107{
108 return false;
109}
b59b153d
PB
110#endif
111
112int kvm_xen_hypercall(struct kvm_vcpu *vcpu);
1ea9f2ed 113
1ea9f2ed
DW
114#include <asm/pvclock-abi.h>
115#include <asm/xen/interface.h>
30b5c851 116#include <xen/interface/vcpu.h>
1ea9f2ed 117
30b5c851
DW
118void kvm_xen_update_runstate_guest(struct kvm_vcpu *vcpu, int state);
119
120static inline void kvm_xen_runstate_set_running(struct kvm_vcpu *vcpu)
121{
122 kvm_xen_update_runstate_guest(vcpu, RUNSTATE_running);
123}
124
125static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu)
126{
127 /*
128 * If the vCPU wasn't preempted but took a normal exit for
129 * some reason (hypercalls, I/O, etc.), that is accounted as
130 * still RUNSTATE_running, as the VMM is still operating on
131 * behalf of the vCPU. Only if the VMM does actually block
132 * does it need to enter RUNSTATE_blocked.
133 */
134 if (vcpu->preempted)
135 kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable);
136}
137
138/* 32-bit compatibility definitions, also used natively in 32-bit build */
1ea9f2ed
DW
139struct compat_arch_vcpu_info {
140 unsigned int cr2;
141 unsigned int pad[5];
142};
143
144struct compat_vcpu_info {
7137b7ae
SC
145 uint8_t evtchn_upcall_pending;
146 uint8_t evtchn_upcall_mask;
147 uint16_t pad;
148 uint32_t evtchn_pending_sel;
149 struct compat_arch_vcpu_info arch;
150 struct pvclock_vcpu_time_info time;
1ea9f2ed
DW
151}; /* 64 bytes (x86) */
152
153struct compat_arch_shared_info {
154 unsigned int max_pfn;
155 unsigned int pfn_to_mfn_frame_list_list;
156 unsigned int nmi_reason;
157 unsigned int p2m_cr3;
158 unsigned int p2m_vaddr;
159 unsigned int p2m_generation;
160 uint32_t wc_sec_hi;
161};
162
163struct compat_shared_info {
164 struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS];
165 uint32_t evtchn_pending[32];
166 uint32_t evtchn_mask[32];
167 struct pvclock_wall_clock wc;
168 struct compat_arch_shared_info arch;
169};
170
14243b38
DW
171#define COMPAT_EVTCHN_2L_NR_CHANNELS (8 * \
172 sizeof_field(struct compat_shared_info, \
173 evtchn_pending))
30b5c851
DW
174struct compat_vcpu_runstate_info {
175 int state;
176 uint64_t state_entry_time;
177 uint64_t time[4];
178} __attribute__((packed));
179
23200b7a 180#endif /* __ARCH_X86_KVM_XEN_H__ */
This page took 0.166813 seconds and 4 git commands to generate.