]>
Commit | Line | Data |
---|---|---|
a656c8ef | 1 | /* |
043405e1 CO |
2 | * Kernel-based Virtual Machine driver for Linux |
3 | * | |
4 | * This header defines architecture specific interfaces, x86 version | |
5 | * | |
6 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
7 | * the COPYING file in the top-level directory. | |
8 | * | |
9 | */ | |
10 | ||
1965aae3 PA |
11 | #ifndef _ASM_X86_KVM_HOST_H |
12 | #define _ASM_X86_KVM_HOST_H | |
043405e1 | 13 | |
34c16eec ZX |
14 | #include <linux/types.h> |
15 | #include <linux/mm.h> | |
e930bffe | 16 | #include <linux/mmu_notifier.h> |
229456fc | 17 | #include <linux/tracepoint.h> |
f5f48ee1 | 18 | #include <linux/cpumask.h> |
f5132b01 | 19 | #include <linux/irq_work.h> |
34c16eec ZX |
20 | |
21 | #include <linux/kvm.h> | |
22 | #include <linux/kvm_para.h> | |
edf88417 | 23 | #include <linux/kvm_types.h> |
f5132b01 | 24 | #include <linux/perf_event.h> |
d828199e MT |
25 | #include <linux/pvclock_gtod.h> |
26 | #include <linux/clocksource.h> | |
87276880 | 27 | #include <linux/irqbypass.h> |
34c16eec | 28 | |
50d0a0f9 | 29 | #include <asm/pvclock-abi.h> |
e01a1b57 | 30 | #include <asm/desc.h> |
0bed3b56 | 31 | #include <asm/mtrr.h> |
9962d032 | 32 | #include <asm/msr-index.h> |
3ee89722 | 33 | #include <asm/asm.h> |
e01a1b57 | 34 | |
cbf64358 | 35 | #define KVM_MAX_VCPUS 255 |
a59cb29e | 36 | #define KVM_SOFT_MAX_VCPUS 160 |
1d4e7e3c | 37 | #define KVM_USER_MEM_SLOTS 509 |
0743247f AW |
38 | /* memory slots that are not exposed to userspace */ |
39 | #define KVM_PRIVATE_MEM_SLOTS 3 | |
bbacc0c1 | 40 | #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) |
93a5cef0 | 41 | |
69a9f69b | 42 | #define KVM_PIO_PAGE_OFFSET 1 |
542472b5 | 43 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 2 |
920552b2 | 44 | #define KVM_HALT_POLL_NS_DEFAULT 500000 |
69a9f69b | 45 | |
8175e5b7 AG |
46 | #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS |
47 | ||
cfec82cb JR |
48 | #define CR0_RESERVED_BITS \ |
49 | (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ | |
50 | | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ | |
51 | | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG)) | |
52 | ||
346874c9 | 53 | #define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL |
cfaa790a | 54 | #define CR3_PCID_INVD BIT_64(63) |
cfec82cb JR |
55 | #define CR4_RESERVED_BITS \ |
56 | (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ | |
57 | | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ | |
ad756a16 | 58 | | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ |
afcbf13f | 59 | | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \ |
56d6efc2 | 60 | | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE | X86_CR4_SMAP)) |
cfec82cb JR |
61 | |
62 | #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) | |
63 | ||
64 | ||
cd6e8f87 | 65 | |
cd6e8f87 | 66 | #define INVALID_PAGE (~(hpa_t)0) |
dd180b3e XG |
67 | #define VALID_PAGE(x) ((x) != INVALID_PAGE) |
68 | ||
cd6e8f87 ZX |
69 | #define UNMAPPED_GVA (~(gpa_t)0) |
70 | ||
ec04b260 | 71 | /* KVM Hugepage definitions for x86 */ |
04326caa | 72 | #define KVM_NR_PAGE_SIZES 3 |
82855413 JR |
73 | #define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9) |
74 | #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x)) | |
ec04b260 JR |
75 | #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) |
76 | #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) | |
77 | #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) | |
05da4558 | 78 | |
6d9d41e5 CD |
79 | static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) |
80 | { | |
81 | /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */ | |
82 | return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - | |
83 | (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); | |
84 | } | |
85 | ||
d657a98e ZX |
86 | #define KVM_PERMILLE_MMU_PAGES 20 |
87 | #define KVM_MIN_ALLOC_MMU_PAGES 64 | |
1ae0a13d DE |
88 | #define KVM_MMU_HASH_SHIFT 10 |
89 | #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) | |
d657a98e ZX |
90 | #define KVM_MIN_FREE_MMU_PAGES 5 |
91 | #define KVM_REFILL_PAGES 25 | |
73c1160c | 92 | #define KVM_MAX_CPUID_ENTRIES 80 |
0bed3b56 | 93 | #define KVM_NR_FIXED_MTRR_REGION 88 |
0d234daf | 94 | #define KVM_NR_VAR_MTRR 8 |
d657a98e | 95 | |
af585b92 GN |
96 | #define ASYNC_PF_PER_VCPU 64 |
97 | ||
5fdbf976 | 98 | enum kvm_reg { |
2b3ccfa0 ZX |
99 | VCPU_REGS_RAX = 0, |
100 | VCPU_REGS_RCX = 1, | |
101 | VCPU_REGS_RDX = 2, | |
102 | VCPU_REGS_RBX = 3, | |
103 | VCPU_REGS_RSP = 4, | |
104 | VCPU_REGS_RBP = 5, | |
105 | VCPU_REGS_RSI = 6, | |
106 | VCPU_REGS_RDI = 7, | |
107 | #ifdef CONFIG_X86_64 | |
108 | VCPU_REGS_R8 = 8, | |
109 | VCPU_REGS_R9 = 9, | |
110 | VCPU_REGS_R10 = 10, | |
111 | VCPU_REGS_R11 = 11, | |
112 | VCPU_REGS_R12 = 12, | |
113 | VCPU_REGS_R13 = 13, | |
114 | VCPU_REGS_R14 = 14, | |
115 | VCPU_REGS_R15 = 15, | |
116 | #endif | |
5fdbf976 | 117 | VCPU_REGS_RIP, |
2b3ccfa0 ZX |
118 | NR_VCPU_REGS |
119 | }; | |
120 | ||
6de4f3ad AK |
121 | enum kvm_reg_ex { |
122 | VCPU_EXREG_PDPTR = NR_VCPU_REGS, | |
aff48baa | 123 | VCPU_EXREG_CR3, |
6de12732 | 124 | VCPU_EXREG_RFLAGS, |
2fb92db1 | 125 | VCPU_EXREG_SEGMENTS, |
6de4f3ad AK |
126 | }; |
127 | ||
2b3ccfa0 | 128 | enum { |
81609e3e | 129 | VCPU_SREG_ES, |
2b3ccfa0 | 130 | VCPU_SREG_CS, |
81609e3e | 131 | VCPU_SREG_SS, |
2b3ccfa0 | 132 | VCPU_SREG_DS, |
2b3ccfa0 ZX |
133 | VCPU_SREG_FS, |
134 | VCPU_SREG_GS, | |
2b3ccfa0 ZX |
135 | VCPU_SREG_TR, |
136 | VCPU_SREG_LDTR, | |
137 | }; | |
138 | ||
56e82318 | 139 | #include <asm/kvm_emulate.h> |
2b3ccfa0 | 140 | |
d657a98e ZX |
141 | #define KVM_NR_MEM_OBJS 40 |
142 | ||
42dbaa5a JK |
143 | #define KVM_NR_DB_REGS 4 |
144 | ||
145 | #define DR6_BD (1 << 13) | |
146 | #define DR6_BS (1 << 14) | |
6f43ed01 NA |
147 | #define DR6_RTM (1 << 16) |
148 | #define DR6_FIXED_1 0xfffe0ff0 | |
149 | #define DR6_INIT 0xffff0ff0 | |
150 | #define DR6_VOLATILE 0x0001e00f | |
42dbaa5a JK |
151 | |
152 | #define DR7_BP_EN_MASK 0x000000ff | |
153 | #define DR7_GE (1 << 9) | |
154 | #define DR7_GD (1 << 13) | |
155 | #define DR7_FIXED_1 0x00000400 | |
6f43ed01 | 156 | #define DR7_VOLATILE 0xffff2bff |
42dbaa5a | 157 | |
c205fb7d NA |
158 | #define PFERR_PRESENT_BIT 0 |
159 | #define PFERR_WRITE_BIT 1 | |
160 | #define PFERR_USER_BIT 2 | |
161 | #define PFERR_RSVD_BIT 3 | |
162 | #define PFERR_FETCH_BIT 4 | |
163 | ||
164 | #define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT) | |
165 | #define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT) | |
166 | #define PFERR_USER_MASK (1U << PFERR_USER_BIT) | |
167 | #define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT) | |
168 | #define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT) | |
169 | ||
41383771 GN |
170 | /* apic attention bits */ |
171 | #define KVM_APIC_CHECK_VAPIC 0 | |
ae7a2a3f MT |
172 | /* |
173 | * The following bit is set with PV-EOI, unset on EOI. | |
174 | * We detect PV-EOI changes by guest by comparing | |
175 | * this bit with PV-EOI in guest memory. | |
176 | * See the implementation in apic_update_pv_eoi. | |
177 | */ | |
178 | #define KVM_APIC_PV_EOI_PENDING 1 | |
41383771 | 179 | |
d84f1e07 FW |
180 | struct kvm_kernel_irq_routing_entry; |
181 | ||
d657a98e ZX |
182 | /* |
183 | * We don't want allocation failures within the mmu code, so we preallocate | |
184 | * enough memory for a single page fault in a cache. | |
185 | */ | |
186 | struct kvm_mmu_memory_cache { | |
187 | int nobjs; | |
188 | void *objects[KVM_NR_MEM_OBJS]; | |
189 | }; | |
190 | ||
d657a98e ZX |
191 | union kvm_mmu_page_role { |
192 | unsigned word; | |
193 | struct { | |
7d76b4d3 | 194 | unsigned level:4; |
5b7e0102 | 195 | unsigned cr4_pae:1; |
7d76b4d3 | 196 | unsigned quadrant:2; |
f6e2c02b | 197 | unsigned direct:1; |
7d76b4d3 | 198 | unsigned access:3; |
2e53d63a | 199 | unsigned invalid:1; |
9645bb56 | 200 | unsigned nxe:1; |
3dbe1415 | 201 | unsigned cr0_wp:1; |
411c588d | 202 | unsigned smep_andnot_wp:1; |
0be0226f | 203 | unsigned smap_andnot_wp:1; |
699023e2 PB |
204 | unsigned :8; |
205 | ||
206 | /* | |
207 | * This is left at the top of the word so that | |
208 | * kvm_memslots_for_spte_role can extract it with a | |
209 | * simple shift. While there is room, give it a whole | |
210 | * byte so it is also faster to load it from memory. | |
211 | */ | |
212 | unsigned smm:8; | |
d657a98e ZX |
213 | }; |
214 | }; | |
215 | ||
216 | struct kvm_mmu_page { | |
217 | struct list_head link; | |
218 | struct hlist_node hash_link; | |
219 | ||
220 | /* | |
221 | * The following two entries are used to key the shadow page in the | |
222 | * hash table. | |
223 | */ | |
224 | gfn_t gfn; | |
225 | union kvm_mmu_page_role role; | |
226 | ||
227 | u64 *spt; | |
228 | /* hold the gfn of each spte inside spt */ | |
229 | gfn_t *gfns; | |
4731d4c7 | 230 | bool unsync; |
0571d366 | 231 | int root_count; /* Currently serving as active root */ |
60c8aec6 | 232 | unsigned int unsync_children; |
67052b35 | 233 | unsigned long parent_ptes; /* Reverse mapping for parent_pte */ |
f6f8adee XG |
234 | |
235 | /* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen. */ | |
5304b8d3 | 236 | unsigned long mmu_valid_gen; |
f6f8adee | 237 | |
0074ff63 | 238 | DECLARE_BITMAP(unsync_child_bitmap, 512); |
c2a2ac2b XG |
239 | |
240 | #ifdef CONFIG_X86_32 | |
accaefe0 XG |
241 | /* |
242 | * Used out of the mmu-lock to avoid reading spte values while an | |
243 | * update is in progress; see the comments in __get_spte_lockless(). | |
244 | */ | |
c2a2ac2b XG |
245 | int clear_spte_count; |
246 | #endif | |
247 | ||
0cbf8e43 | 248 | /* Number of writes since the last time traversal visited this page. */ |
a30f47cb | 249 | int write_flooding_count; |
d657a98e ZX |
250 | }; |
251 | ||
1c08364c AK |
252 | struct kvm_pio_request { |
253 | unsigned long count; | |
1c08364c AK |
254 | int in; |
255 | int port; | |
256 | int size; | |
1c08364c AK |
257 | }; |
258 | ||
a0a64f50 XG |
259 | struct rsvd_bits_validate { |
260 | u64 rsvd_bits_mask[2][4]; | |
261 | u64 bad_mt_xwr; | |
262 | }; | |
263 | ||
d657a98e ZX |
264 | /* |
265 | * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level | |
266 | * 32-bit). The kvm_mmu structure abstracts the details of the current mmu | |
267 | * mode. | |
268 | */ | |
269 | struct kvm_mmu { | |
f43addd4 | 270 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); |
5777ed34 | 271 | unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); |
e4e517b4 | 272 | u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index); |
78b2c54a XG |
273 | int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, |
274 | bool prefault); | |
6389ee94 AK |
275 | void (*inject_page_fault)(struct kvm_vcpu *vcpu, |
276 | struct x86_exception *fault); | |
1871c602 | 277 | gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, |
ab9ae313 | 278 | struct x86_exception *exception); |
54987b7a PB |
279 | gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, |
280 | struct x86_exception *exception); | |
e8bc217a | 281 | int (*sync_page)(struct kvm_vcpu *vcpu, |
a4a8e6f7 | 282 | struct kvm_mmu_page *sp); |
a7052897 | 283 | void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); |
0f53b5b1 | 284 | void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
7c562522 | 285 | u64 *spte, const void *pte); |
d657a98e ZX |
286 | hpa_t root_hpa; |
287 | int root_level; | |
288 | int shadow_root_level; | |
a770f6f2 | 289 | union kvm_mmu_page_role base_role; |
c5a78f2b | 290 | bool direct_map; |
d657a98e | 291 | |
97d64b78 AK |
292 | /* |
293 | * Bitmap; bit set = permission fault | |
294 | * Byte index: page fault error code [4:1] | |
295 | * Bit index: pte permissions in ACC_* format | |
296 | */ | |
297 | u8 permissions[16]; | |
298 | ||
d657a98e | 299 | u64 *pae_root; |
81407ca5 | 300 | u64 *lm_root; |
c258b62b XG |
301 | |
302 | /* | |
303 | * check zero bits on shadow page table entries, these | |
304 | * bits include not only hardware reserved bits but also | |
305 | * the bits spte never used. | |
306 | */ | |
307 | struct rsvd_bits_validate shadow_zero_check; | |
308 | ||
a0a64f50 | 309 | struct rsvd_bits_validate guest_rsvd_check; |
ff03a073 | 310 | |
6fd01b71 AK |
311 | /* |
312 | * Bitmap: bit set = last pte in walk | |
313 | * index[0:1]: level (zero-based) | |
314 | * index[2]: pte.ps | |
315 | */ | |
316 | u8 last_pte_bitmap; | |
317 | ||
2d48a985 JR |
318 | bool nx; |
319 | ||
ff03a073 | 320 | u64 pdptrs[4]; /* pae */ |
d657a98e ZX |
321 | }; |
322 | ||
f5132b01 GN |
323 | enum pmc_type { |
324 | KVM_PMC_GP = 0, | |
325 | KVM_PMC_FIXED, | |
326 | }; | |
327 | ||
328 | struct kvm_pmc { | |
329 | enum pmc_type type; | |
330 | u8 idx; | |
331 | u64 counter; | |
332 | u64 eventsel; | |
333 | struct perf_event *perf_event; | |
334 | struct kvm_vcpu *vcpu; | |
335 | }; | |
336 | ||
337 | struct kvm_pmu { | |
338 | unsigned nr_arch_gp_counters; | |
339 | unsigned nr_arch_fixed_counters; | |
340 | unsigned available_event_types; | |
341 | u64 fixed_ctr_ctrl; | |
342 | u64 global_ctrl; | |
343 | u64 global_status; | |
344 | u64 global_ovf_ctrl; | |
345 | u64 counter_bitmask[2]; | |
346 | u64 global_ctrl_mask; | |
103af0a9 | 347 | u64 reserved_bits; |
f5132b01 | 348 | u8 version; |
15c7ad51 RR |
349 | struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC]; |
350 | struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED]; | |
f5132b01 GN |
351 | struct irq_work irq_work; |
352 | u64 reprogram_pmi; | |
353 | }; | |
354 | ||
25462f7f WH |
355 | struct kvm_pmu_ops; |
356 | ||
360b948d PB |
357 | enum { |
358 | KVM_DEBUGREG_BP_ENABLED = 1, | |
c77fb5fe | 359 | KVM_DEBUGREG_WONT_EXIT = 2, |
ae561ede | 360 | KVM_DEBUGREG_RELOAD = 4, |
360b948d PB |
361 | }; |
362 | ||
86fd5270 XG |
363 | struct kvm_mtrr_range { |
364 | u64 base; | |
365 | u64 mask; | |
19efffa2 | 366 | struct list_head node; |
86fd5270 XG |
367 | }; |
368 | ||
70109e7d | 369 | struct kvm_mtrr { |
86fd5270 | 370 | struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR]; |
70109e7d | 371 | mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION]; |
10fac2dc | 372 | u64 deftype; |
19efffa2 XG |
373 | |
374 | struct list_head head; | |
70109e7d XG |
375 | }; |
376 | ||
e83d5887 AS |
377 | /* Hyper-V per vcpu emulation context */ |
378 | struct kvm_vcpu_hv { | |
379 | u64 hv_vapic; | |
9eec50b8 | 380 | s64 runtime_offset; |
e83d5887 AS |
381 | }; |
382 | ||
ad312c7c | 383 | struct kvm_vcpu_arch { |
5fdbf976 MT |
384 | /* |
385 | * rip and regs accesses must go through | |
386 | * kvm_{register,rip}_{read,write} functions. | |
387 | */ | |
388 | unsigned long regs[NR_VCPU_REGS]; | |
389 | u32 regs_avail; | |
390 | u32 regs_dirty; | |
34c16eec ZX |
391 | |
392 | unsigned long cr0; | |
e8467fda | 393 | unsigned long cr0_guest_owned_bits; |
34c16eec ZX |
394 | unsigned long cr2; |
395 | unsigned long cr3; | |
396 | unsigned long cr4; | |
fc78f519 | 397 | unsigned long cr4_guest_owned_bits; |
34c16eec | 398 | unsigned long cr8; |
1371d904 | 399 | u32 hflags; |
f6801dff | 400 | u64 efer; |
34c16eec ZX |
401 | u64 apic_base; |
402 | struct kvm_lapic *apic; /* kernel irqchip context */ | |
3bb345f3 | 403 | u64 eoi_exit_bitmap[4]; |
41383771 | 404 | unsigned long apic_attention; |
e1035715 | 405 | int32_t apic_arb_prio; |
34c16eec | 406 | int mp_state; |
34c16eec | 407 | u64 ia32_misc_enable_msr; |
64d60670 | 408 | u64 smbase; |
b209749f | 409 | bool tpr_access_reporting; |
20300099 | 410 | u64 ia32_xss; |
34c16eec | 411 | |
14dfe855 JR |
412 | /* |
413 | * Paging state of the vcpu | |
414 | * | |
415 | * If the vcpu runs in guest mode with two level paging this still saves | |
416 | * the paging mode of the l1 guest. This context is always used to | |
417 | * handle faults. | |
418 | */ | |
34c16eec | 419 | struct kvm_mmu mmu; |
8df25a32 | 420 | |
6539e738 JR |
421 | /* |
422 | * Paging state of an L2 guest (used for nested npt) | |
423 | * | |
424 | * This context will save all necessary information to walk page tables | |
425 | * of the an L2 guest. This context is only initialized for page table | |
426 | * walking and not for faulting since we never handle l2 page faults on | |
427 | * the host. | |
428 | */ | |
429 | struct kvm_mmu nested_mmu; | |
430 | ||
14dfe855 JR |
431 | /* |
432 | * Pointer to the mmu context currently used for | |
433 | * gva_to_gpa translations. | |
434 | */ | |
435 | struct kvm_mmu *walk_mmu; | |
436 | ||
53c07b18 | 437 | struct kvm_mmu_memory_cache mmu_pte_list_desc_cache; |
34c16eec ZX |
438 | struct kvm_mmu_memory_cache mmu_page_cache; |
439 | struct kvm_mmu_memory_cache mmu_page_header_cache; | |
440 | ||
98918833 | 441 | struct fpu guest_fpu; |
c447e76b | 442 | bool eager_fpu; |
2acf923e | 443 | u64 xcr0; |
d7876f1b | 444 | u64 guest_supported_xcr0; |
4344ee98 | 445 | u32 guest_xstate_size; |
34c16eec | 446 | |
34c16eec ZX |
447 | struct kvm_pio_request pio; |
448 | void *pio_data; | |
449 | ||
66fd3f7f GN |
450 | u8 event_exit_inst_len; |
451 | ||
298101da AK |
452 | struct kvm_queued_exception { |
453 | bool pending; | |
454 | bool has_error_code; | |
ce7ddec4 | 455 | bool reinject; |
298101da AK |
456 | u8 nr; |
457 | u32 error_code; | |
458 | } exception; | |
459 | ||
937a7eae AK |
460 | struct kvm_queued_interrupt { |
461 | bool pending; | |
66fd3f7f | 462 | bool soft; |
937a7eae AK |
463 | u8 nr; |
464 | } interrupt; | |
465 | ||
34c16eec ZX |
466 | int halt_request; /* real mode on Intel only */ |
467 | ||
468 | int cpuid_nent; | |
07716717 | 469 | struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES]; |
5a4f55cd EK |
470 | |
471 | int maxphyaddr; | |
472 | ||
34c16eec ZX |
473 | /* emulate context */ |
474 | ||
475 | struct x86_emulate_ctxt emulate_ctxt; | |
7ae441ea GN |
476 | bool emulate_regs_need_sync_to_vcpu; |
477 | bool emulate_regs_need_sync_from_vcpu; | |
716d51ab | 478 | int (*complete_userspace_io)(struct kvm_vcpu *vcpu); |
18068523 GOC |
479 | |
480 | gpa_t time; | |
50d0a0f9 | 481 | struct pvclock_vcpu_time_info hv_clock; |
e48672fa | 482 | unsigned int hw_tsc_khz; |
0b79459b AH |
483 | struct gfn_to_hva_cache pv_time; |
484 | bool pv_time_enabled; | |
51d59c6b MT |
485 | /* set guest stopped flag in pvclock flags field */ |
486 | bool pvclock_set_guest_stopped_request; | |
c9aaa895 GC |
487 | |
488 | struct { | |
489 | u64 msr_val; | |
490 | u64 last_steal; | |
491 | u64 accum_steal; | |
492 | struct gfn_to_hva_cache stime; | |
493 | struct kvm_steal_time steal; | |
494 | } st; | |
495 | ||
1d5f066e | 496 | u64 last_guest_tsc; |
6f526ec5 | 497 | u64 last_host_tsc; |
0dd6a6ed | 498 | u64 tsc_offset_adjustment; |
e26101b1 ZA |
499 | u64 this_tsc_nsec; |
500 | u64 this_tsc_write; | |
0d3da0d2 | 501 | u64 this_tsc_generation; |
c285545f | 502 | bool tsc_catchup; |
cc578287 ZA |
503 | bool tsc_always_catchup; |
504 | s8 virtual_tsc_shift; | |
505 | u32 virtual_tsc_mult; | |
506 | u32 virtual_tsc_khz; | |
ba904635 | 507 | s64 ia32_tsc_adjust_msr; |
3419ffc8 | 508 | |
7460fb4a AK |
509 | atomic_t nmi_queued; /* unprocessed asynchronous NMIs */ |
510 | unsigned nmi_pending; /* NMI queued after currently running handler */ | |
511 | bool nmi_injected; /* Trying to inject an NMI this entry */ | |
f077825a | 512 | bool smi_pending; /* SMI queued after currently running handler */ |
9ba075a6 | 513 | |
70109e7d | 514 | struct kvm_mtrr mtrr_state; |
7cb060a9 | 515 | u64 pat; |
42dbaa5a | 516 | |
360b948d | 517 | unsigned switch_db_regs; |
42dbaa5a JK |
518 | unsigned long db[KVM_NR_DB_REGS]; |
519 | unsigned long dr6; | |
520 | unsigned long dr7; | |
521 | unsigned long eff_db[KVM_NR_DB_REGS]; | |
c8639010 | 522 | unsigned long guest_debug_dr7; |
890ca9ae YH |
523 | |
524 | u64 mcg_cap; | |
525 | u64 mcg_status; | |
526 | u64 mcg_ctl; | |
527 | u64 *mce_banks; | |
94fe45da | 528 | |
bebb106a XG |
529 | /* Cache MMIO info */ |
530 | u64 mmio_gva; | |
531 | unsigned access; | |
532 | gfn_t mmio_gfn; | |
56f17dd3 | 533 | u64 mmio_gen; |
bebb106a | 534 | |
f5132b01 GN |
535 | struct kvm_pmu pmu; |
536 | ||
94fe45da | 537 | /* used for guest single stepping over the given code position */ |
94fe45da | 538 | unsigned long singlestep_rip; |
f92653ee | 539 | |
e83d5887 | 540 | struct kvm_vcpu_hv hyperv; |
f5f48ee1 SY |
541 | |
542 | cpumask_var_t wbinvd_dirty_mask; | |
af585b92 | 543 | |
1cb3f3ae XG |
544 | unsigned long last_retry_eip; |
545 | unsigned long last_retry_addr; | |
546 | ||
af585b92 GN |
547 | struct { |
548 | bool halted; | |
549 | gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)]; | |
344d9588 GN |
550 | struct gfn_to_hva_cache data; |
551 | u64 msr_val; | |
7c90705b | 552 | u32 id; |
6adba527 | 553 | bool send_user_only; |
af585b92 | 554 | } apf; |
2b036c6b BO |
555 | |
556 | /* OSVW MSRs (AMD only) */ | |
557 | struct { | |
558 | u64 length; | |
559 | u64 status; | |
560 | } osvw; | |
ae7a2a3f MT |
561 | |
562 | struct { | |
563 | u64 msr_val; | |
564 | struct gfn_to_hva_cache data; | |
565 | } pv_eoi; | |
93c05d3e XG |
566 | |
567 | /* | |
568 | * Indicate whether the access faults on its page table in guest | |
569 | * which is set when fix page fault and used to detect unhandeable | |
570 | * instruction. | |
571 | */ | |
572 | bool write_fault_to_shadow_pgtable; | |
25d92081 YZ |
573 | |
574 | /* set at EPT violation at this point */ | |
575 | unsigned long exit_qualification; | |
6aef266c SV |
576 | |
577 | /* pv related host specific info */ | |
578 | struct { | |
579 | bool pv_unhalted; | |
580 | } pv; | |
7543a635 SR |
581 | |
582 | int pending_ioapic_eoi; | |
1c1a9ce9 | 583 | int pending_external_vector; |
34c16eec ZX |
584 | }; |
585 | ||
db3fe4eb | 586 | struct kvm_lpage_info { |
db3fe4eb TY |
587 | int write_count; |
588 | }; | |
589 | ||
590 | struct kvm_arch_memory_slot { | |
d89cc617 | 591 | unsigned long *rmap[KVM_NR_PAGE_SIZES]; |
db3fe4eb TY |
592 | struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; |
593 | }; | |
594 | ||
3548a259 RK |
595 | /* |
596 | * We use as the mode the number of bits allocated in the LDR for the | |
597 | * logical processor ID. It happens that these are all powers of two. | |
598 | * This makes it is very easy to detect cases where the APICs are | |
599 | * configured for multiple modes; in that case, we cannot use the map and | |
600 | * hence cannot use kvm_irq_delivery_to_apic_fast either. | |
601 | */ | |
602 | #define KVM_APIC_MODE_XAPIC_CLUSTER 4 | |
603 | #define KVM_APIC_MODE_XAPIC_FLAT 8 | |
604 | #define KVM_APIC_MODE_X2APIC 16 | |
605 | ||
1e08ec4a GN |
606 | struct kvm_apic_map { |
607 | struct rcu_head rcu; | |
3548a259 | 608 | u8 mode; |
1e08ec4a GN |
609 | struct kvm_lapic *phys_map[256]; |
610 | /* first index is cluster id second is cpu id in a cluster */ | |
611 | struct kvm_lapic *logical_map[16][16]; | |
612 | }; | |
613 | ||
e83d5887 AS |
614 | /* Hyper-V emulation context */ |
615 | struct kvm_hv { | |
616 | u64 hv_guest_os_id; | |
617 | u64 hv_hypercall; | |
618 | u64 hv_tsc_page; | |
e7d9513b AS |
619 | |
620 | /* Hyper-v based guest crash (NT kernel bugcheck) parameters */ | |
621 | u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS]; | |
622 | u64 hv_crash_ctl; | |
e83d5887 AS |
623 | }; |
624 | ||
fef9cce0 | 625 | struct kvm_arch { |
49d5ca26 | 626 | unsigned int n_used_mmu_pages; |
f05e70ac | 627 | unsigned int n_requested_mmu_pages; |
39de71ec | 628 | unsigned int n_max_mmu_pages; |
332b207d | 629 | unsigned int indirect_shadow_pages; |
5304b8d3 | 630 | unsigned long mmu_valid_gen; |
f05e70ac ZX |
631 | struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; |
632 | /* | |
633 | * Hash table of struct kvm_mmu_page. | |
634 | */ | |
635 | struct list_head active_mmu_pages; | |
365c8868 XG |
636 | struct list_head zapped_obsolete_pages; |
637 | ||
4d5c5d0f | 638 | struct list_head assigned_dev_head; |
19de40a8 | 639 | struct iommu_domain *iommu_domain; |
d96eb2c6 | 640 | bool iommu_noncoherent; |
e0f0bbc5 AW |
641 | #define __KVM_HAVE_ARCH_NONCOHERENT_DMA |
642 | atomic_t noncoherent_dma_count; | |
5544eb9b PB |
643 | #define __KVM_HAVE_ARCH_ASSIGNED_DEVICE |
644 | atomic_t assigned_device_count; | |
d7deeeb0 ZX |
645 | struct kvm_pic *vpic; |
646 | struct kvm_ioapic *vioapic; | |
7837699f | 647 | struct kvm_pit *vpit; |
42720138 | 648 | atomic_t vapics_in_nmi_mode; |
1e08ec4a GN |
649 | struct mutex apic_map_lock; |
650 | struct kvm_apic_map *apic_map; | |
bfc6d222 | 651 | |
bfc6d222 | 652 | unsigned int tss_addr; |
c24ae0dc | 653 | bool apic_access_page_done; |
18068523 GOC |
654 | |
655 | gpa_t wall_clock; | |
b7ebfb05 | 656 | |
b7ebfb05 | 657 | bool ept_identity_pagetable_done; |
b927a3ce | 658 | gpa_t ept_identity_map_addr; |
5550af4d SY |
659 | |
660 | unsigned long irq_sources_bitmap; | |
afbcf7ab | 661 | s64 kvmclock_offset; |
038f8c11 | 662 | raw_spinlock_t tsc_write_lock; |
f38e098f | 663 | u64 last_tsc_nsec; |
f38e098f | 664 | u64 last_tsc_write; |
5d3cb0f6 | 665 | u32 last_tsc_khz; |
e26101b1 ZA |
666 | u64 cur_tsc_nsec; |
667 | u64 cur_tsc_write; | |
668 | u64 cur_tsc_offset; | |
0d3da0d2 | 669 | u64 cur_tsc_generation; |
b48aa97e | 670 | int nr_vcpus_matched_tsc; |
ffde22ac | 671 | |
d828199e MT |
672 | spinlock_t pvclock_gtod_sync_lock; |
673 | bool use_master_clock; | |
674 | u64 master_kernel_ns; | |
675 | cycle_t master_cycle_now; | |
7e44e449 | 676 | struct delayed_work kvmclock_update_work; |
332967a3 | 677 | struct delayed_work kvmclock_sync_work; |
d828199e | 678 | |
ffde22ac | 679 | struct kvm_xen_hvm_config xen_hvm_config; |
55cd8e5a | 680 | |
6ef768fa PB |
681 | /* reads protected by irq_srcu, writes by irq_lock */ |
682 | struct hlist_head mask_notifier_list; | |
683 | ||
e83d5887 | 684 | struct kvm_hv hyperv; |
b034cf01 XG |
685 | |
686 | #ifdef CONFIG_KVM_MMU_AUDIT | |
687 | int audit_point; | |
688 | #endif | |
54750f2c MT |
689 | |
690 | bool boot_vcpu_runs_old_kvmclock; | |
d71ba788 | 691 | u32 bsp_vcpu_id; |
90de4a18 NA |
692 | |
693 | u64 disabled_quirks; | |
49df6397 SR |
694 | |
695 | bool irqchip_split; | |
b053b2ae | 696 | u8 nr_reserved_ioapic_pins; |
d69fb81f ZX |
697 | }; |
698 | ||
0711456c ZX |
699 | struct kvm_vm_stat { |
700 | u32 mmu_shadow_zapped; | |
701 | u32 mmu_pte_write; | |
702 | u32 mmu_pte_updated; | |
703 | u32 mmu_pde_zapped; | |
704 | u32 mmu_flooded; | |
705 | u32 mmu_recycled; | |
dfc5aa00 | 706 | u32 mmu_cache_miss; |
4731d4c7 | 707 | u32 mmu_unsync; |
0711456c | 708 | u32 remote_tlb_flush; |
05da4558 | 709 | u32 lpages; |
0711456c ZX |
710 | }; |
711 | ||
77b4c255 ZX |
712 | struct kvm_vcpu_stat { |
713 | u32 pf_fixed; | |
714 | u32 pf_guest; | |
715 | u32 tlb_flush; | |
716 | u32 invlpg; | |
717 | ||
718 | u32 exits; | |
719 | u32 io_exits; | |
720 | u32 mmio_exits; | |
721 | u32 signal_exits; | |
722 | u32 irq_window_exits; | |
f08864b4 | 723 | u32 nmi_window_exits; |
77b4c255 | 724 | u32 halt_exits; |
f7819512 | 725 | u32 halt_successful_poll; |
62bea5bf | 726 | u32 halt_attempted_poll; |
77b4c255 ZX |
727 | u32 halt_wakeup; |
728 | u32 request_irq_exits; | |
729 | u32 irq_exits; | |
730 | u32 host_state_reload; | |
731 | u32 efer_reload; | |
732 | u32 fpu_reload; | |
733 | u32 insn_emulation; | |
734 | u32 insn_emulation_fail; | |
f11c3a8d | 735 | u32 hypercalls; |
fa89a817 | 736 | u32 irq_injections; |
c4abb7c9 | 737 | u32 nmi_injections; |
77b4c255 | 738 | }; |
ad312c7c | 739 | |
8a76d7f2 JR |
740 | struct x86_instruction_info; |
741 | ||
8fe8ab46 WA |
742 | struct msr_data { |
743 | bool host_initiated; | |
744 | u32 index; | |
745 | u64 data; | |
746 | }; | |
747 | ||
cb5281a5 PB |
748 | struct kvm_lapic_irq { |
749 | u32 vector; | |
b7cb2231 PB |
750 | u16 delivery_mode; |
751 | u16 dest_mode; | |
752 | bool level; | |
753 | u16 trig_mode; | |
cb5281a5 PB |
754 | u32 shorthand; |
755 | u32 dest_id; | |
93bbf0b8 | 756 | bool msi_redir_hint; |
cb5281a5 PB |
757 | }; |
758 | ||
ea4a5ff8 ZX |
759 | struct kvm_x86_ops { |
760 | int (*cpu_has_kvm_support)(void); /* __init */ | |
761 | int (*disabled_by_bios)(void); /* __init */ | |
13a34e06 RK |
762 | int (*hardware_enable)(void); |
763 | void (*hardware_disable)(void); | |
ea4a5ff8 ZX |
764 | void (*check_processor_compatibility)(void *rtn); |
765 | int (*hardware_setup)(void); /* __init */ | |
766 | void (*hardware_unsetup)(void); /* __exit */ | |
774ead3a | 767 | bool (*cpu_has_accelerated_tpr)(void); |
6d396b55 | 768 | bool (*cpu_has_high_real_mode_segbase)(void); |
0e851880 | 769 | void (*cpuid_update)(struct kvm_vcpu *vcpu); |
ea4a5ff8 ZX |
770 | |
771 | /* Create, but do not attach this VCPU */ | |
772 | struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); | |
773 | void (*vcpu_free)(struct kvm_vcpu *vcpu); | |
d28bc9dd | 774 | void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event); |
ea4a5ff8 ZX |
775 | |
776 | void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); | |
777 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); | |
778 | void (*vcpu_put)(struct kvm_vcpu *vcpu); | |
ea4a5ff8 | 779 | |
c8639010 | 780 | void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu); |
609e36d3 | 781 | int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); |
8fe8ab46 | 782 | int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); |
ea4a5ff8 ZX |
783 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); |
784 | void (*get_segment)(struct kvm_vcpu *vcpu, | |
785 | struct kvm_segment *var, int seg); | |
2e4d2653 | 786 | int (*get_cpl)(struct kvm_vcpu *vcpu); |
ea4a5ff8 ZX |
787 | void (*set_segment)(struct kvm_vcpu *vcpu, |
788 | struct kvm_segment *var, int seg); | |
789 | void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); | |
e8467fda | 790 | void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu); |
aff48baa | 791 | void (*decache_cr3)(struct kvm_vcpu *vcpu); |
ea4a5ff8 ZX |
792 | void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); |
793 | void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); | |
794 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); | |
5e1746d6 | 795 | int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); |
ea4a5ff8 | 796 | void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); |
89a27f4d GN |
797 | void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); |
798 | void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); | |
799 | void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); | |
800 | void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); | |
73aaf249 JK |
801 | u64 (*get_dr6)(struct kvm_vcpu *vcpu); |
802 | void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value); | |
c77fb5fe | 803 | void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu); |
020df079 | 804 | void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value); |
5fdbf976 | 805 | void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); |
ea4a5ff8 ZX |
806 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); |
807 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); | |
0fdd74f7 | 808 | void (*fpu_activate)(struct kvm_vcpu *vcpu); |
02daab21 | 809 | void (*fpu_deactivate)(struct kvm_vcpu *vcpu); |
ea4a5ff8 ZX |
810 | |
811 | void (*tlb_flush)(struct kvm_vcpu *vcpu); | |
ea4a5ff8 | 812 | |
851ba692 AK |
813 | void (*run)(struct kvm_vcpu *vcpu); |
814 | int (*handle_exit)(struct kvm_vcpu *vcpu); | |
ea4a5ff8 | 815 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); |
2809f5d2 | 816 | void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); |
37ccdcbe | 817 | u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu); |
ea4a5ff8 ZX |
818 | void (*patch_hypercall)(struct kvm_vcpu *vcpu, |
819 | unsigned char *hypercall_addr); | |
66fd3f7f | 820 | void (*set_irq)(struct kvm_vcpu *vcpu); |
95ba8273 | 821 | void (*set_nmi)(struct kvm_vcpu *vcpu); |
298101da | 822 | void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, |
ce7ddec4 JR |
823 | bool has_error_code, u32 error_code, |
824 | bool reinject); | |
b463a6f7 | 825 | void (*cancel_injection)(struct kvm_vcpu *vcpu); |
78646121 | 826 | int (*interrupt_allowed)(struct kvm_vcpu *vcpu); |
95ba8273 | 827 | int (*nmi_allowed)(struct kvm_vcpu *vcpu); |
3cfc3092 JK |
828 | bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); |
829 | void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked); | |
c9a7953f JK |
830 | void (*enable_nmi_window)(struct kvm_vcpu *vcpu); |
831 | void (*enable_irq_window)(struct kvm_vcpu *vcpu); | |
95ba8273 | 832 | void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); |
d50ab6c1 | 833 | int (*cpu_uses_apicv)(struct kvm_vcpu *vcpu); |
c7c9c56c YZ |
834 | void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); |
835 | void (*hwapic_isr_update)(struct kvm *kvm, int isr); | |
3bb345f3 | 836 | void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu); |
8d14695f | 837 | void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set); |
4256f43f | 838 | void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa); |
a20ed54d YZ |
839 | void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector); |
840 | void (*sync_pir_to_irr)(struct kvm_vcpu *vcpu); | |
ea4a5ff8 | 841 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); |
67253af5 | 842 | int (*get_tdp_level)(void); |
4b12f0de | 843 | u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); |
17cc3935 | 844 | int (*get_lpage_level)(void); |
4e47c7a6 | 845 | bool (*rdtscp_supported)(void); |
ad756a16 | 846 | bool (*invpcid_supported)(void); |
f1e2b260 | 847 | void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host); |
344f414f | 848 | |
1c97f0a0 JR |
849 | void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); |
850 | ||
d4330ef2 JR |
851 | void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry); |
852 | ||
f5f48ee1 SY |
853 | bool (*has_wbinvd_exit)(void); |
854 | ||
cc578287 | 855 | void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale); |
ba904635 | 856 | u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu); |
99e3e30a ZA |
857 | void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); |
858 | ||
857e4099 | 859 | u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc); |
886b470c | 860 | u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc); |
857e4099 | 861 | |
586f9607 | 862 | void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); |
8a76d7f2 JR |
863 | |
864 | int (*check_intercept)(struct kvm_vcpu *vcpu, | |
865 | struct x86_instruction_info *info, | |
866 | enum x86_intercept_stage stage); | |
a547c6db | 867 | void (*handle_external_intr)(struct kvm_vcpu *vcpu); |
da8999d3 | 868 | bool (*mpx_supported)(void); |
55412b2e | 869 | bool (*xsaves_supported)(void); |
b6b8a145 JK |
870 | |
871 | int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); | |
ae97a3b8 RK |
872 | |
873 | void (*sched_in)(struct kvm_vcpu *kvm, int cpu); | |
88178fd4 KH |
874 | |
875 | /* | |
876 | * Arch-specific dirty logging hooks. These hooks are only supposed to | |
877 | * be valid if the specific arch has hardware-accelerated dirty logging | |
878 | * mechanism. Currently only for PML on VMX. | |
879 | * | |
880 | * - slot_enable_log_dirty: | |
881 | * called when enabling log dirty mode for the slot. | |
882 | * - slot_disable_log_dirty: | |
883 | * called when disabling log dirty mode for the slot. | |
884 | * also called when slot is created with log dirty disabled. | |
885 | * - flush_log_dirty: | |
886 | * called before reporting dirty_bitmap to userspace. | |
887 | * - enable_log_dirty_pt_masked: | |
888 | * called when reenabling log dirty for the GFNs in the mask after | |
889 | * corresponding bits are cleared in slot->dirty_bitmap. | |
890 | */ | |
891 | void (*slot_enable_log_dirty)(struct kvm *kvm, | |
892 | struct kvm_memory_slot *slot); | |
893 | void (*slot_disable_log_dirty)(struct kvm *kvm, | |
894 | struct kvm_memory_slot *slot); | |
895 | void (*flush_log_dirty)(struct kvm *kvm); | |
896 | void (*enable_log_dirty_pt_masked)(struct kvm *kvm, | |
897 | struct kvm_memory_slot *slot, | |
898 | gfn_t offset, unsigned long mask); | |
25462f7f WH |
899 | /* pmu operations of sub-arch */ |
900 | const struct kvm_pmu_ops *pmu_ops; | |
efc64404 | 901 | |
bf9f6ac8 FW |
902 | /* |
903 | * Architecture specific hooks for vCPU blocking due to | |
904 | * HLT instruction. | |
905 | * Returns for .pre_block(): | |
906 | * - 0 means continue to block the vCPU. | |
907 | * - 1 means we cannot block the vCPU since some event | |
908 | * happens during this period, such as, 'ON' bit in | |
909 | * posted-interrupts descriptor is set. | |
910 | */ | |
911 | int (*pre_block)(struct kvm_vcpu *vcpu); | |
912 | void (*post_block)(struct kvm_vcpu *vcpu); | |
efc64404 FW |
913 | int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq, |
914 | uint32_t guest_irq, bool set); | |
ea4a5ff8 ZX |
915 | }; |
916 | ||
af585b92 | 917 | struct kvm_arch_async_pf { |
7c90705b | 918 | u32 token; |
af585b92 | 919 | gfn_t gfn; |
fb67e14f | 920 | unsigned long cr3; |
c4806acd | 921 | bool direct_map; |
af585b92 GN |
922 | }; |
923 | ||
97896d04 ZX |
924 | extern struct kvm_x86_ops *kvm_x86_ops; |
925 | ||
f1e2b260 MT |
926 | static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, |
927 | s64 adjustment) | |
928 | { | |
929 | kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, false); | |
930 | } | |
931 | ||
932 | static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) | |
933 | { | |
934 | kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, true); | |
935 | } | |
936 | ||
54f1585a ZX |
937 | int kvm_mmu_module_init(void); |
938 | void kvm_mmu_module_exit(void); | |
939 | ||
940 | void kvm_mmu_destroy(struct kvm_vcpu *vcpu); | |
941 | int kvm_mmu_create(struct kvm_vcpu *vcpu); | |
8a3c1a33 | 942 | void kvm_mmu_setup(struct kvm_vcpu *vcpu); |
7b52345e | 943 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, |
4b12f0de | 944 | u64 dirty_mask, u64 nx_mask, u64 x_mask); |
54f1585a | 945 | |
8a3c1a33 | 946 | void kvm_mmu_reset_context(struct kvm_vcpu *vcpu); |
1c91cad4 KH |
947 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, |
948 | struct kvm_memory_slot *memslot); | |
3ea3b7fa | 949 | void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, |
f36f3f28 | 950 | const struct kvm_memory_slot *memslot); |
f4b4b180 KH |
951 | void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, |
952 | struct kvm_memory_slot *memslot); | |
953 | void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, | |
954 | struct kvm_memory_slot *memslot); | |
955 | void kvm_mmu_slot_set_dirty(struct kvm *kvm, | |
956 | struct kvm_memory_slot *memslot); | |
957 | void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, | |
958 | struct kvm_memory_slot *slot, | |
959 | gfn_t gfn_offset, unsigned long mask); | |
54f1585a | 960 | void kvm_mmu_zap_all(struct kvm *kvm); |
54bf36aa | 961 | void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots); |
3ad82a7e | 962 | unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); |
54f1585a ZX |
963 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); |
964 | ||
ff03a073 | 965 | int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); |
cc4b6871 | 966 | |
3200f405 | 967 | int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, |
9f811285 | 968 | const void *val, int bytes); |
2f333bcb | 969 | |
6ef768fa PB |
970 | struct kvm_irq_mask_notifier { |
971 | void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); | |
972 | int irq; | |
973 | struct hlist_node link; | |
974 | }; | |
975 | ||
976 | void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, | |
977 | struct kvm_irq_mask_notifier *kimn); | |
978 | void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, | |
979 | struct kvm_irq_mask_notifier *kimn); | |
980 | void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, | |
981 | bool mask); | |
982 | ||
2f333bcb | 983 | extern bool tdp_enabled; |
9f811285 | 984 | |
a3e06bbe LJ |
985 | u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu); |
986 | ||
92a1f12d JR |
987 | /* control of guest tsc rate supported? */ |
988 | extern bool kvm_has_tsc_control; | |
92a1f12d JR |
989 | /* maximum supported tsc_khz for guests */ |
990 | extern u32 kvm_max_guest_tsc_khz; | |
991 | ||
54f1585a | 992 | enum emulation_result { |
ac0a48c3 PB |
993 | EMULATE_DONE, /* no further processing */ |
994 | EMULATE_USER_EXIT, /* kvm_run ready for userspace exit */ | |
54f1585a ZX |
995 | EMULATE_FAIL, /* can't emulate this instruction */ |
996 | }; | |
997 | ||
571008da SY |
998 | #define EMULTYPE_NO_DECODE (1 << 0) |
999 | #define EMULTYPE_TRAP_UD (1 << 1) | |
ba8afb6b | 1000 | #define EMULTYPE_SKIP (1 << 2) |
1cb3f3ae | 1001 | #define EMULTYPE_RETRY (1 << 3) |
991eebf9 | 1002 | #define EMULTYPE_NO_REEXECUTE (1 << 4) |
dc25e89e AP |
1003 | int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, |
1004 | int emulation_type, void *insn, int insn_len); | |
51d8b661 AP |
1005 | |
1006 | static inline int emulate_instruction(struct kvm_vcpu *vcpu, | |
1007 | int emulation_type) | |
1008 | { | |
dc25e89e | 1009 | return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); |
51d8b661 AP |
1010 | } |
1011 | ||
f2b4b7dd | 1012 | void kvm_enable_efer_bits(u64); |
384bb783 | 1013 | bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer); |
609e36d3 | 1014 | int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr); |
8fe8ab46 | 1015 | int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr); |
54f1585a ZX |
1016 | |
1017 | struct x86_emulate_ctxt; | |
1018 | ||
cf8f70bf | 1019 | int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port); |
54f1585a ZX |
1020 | void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); |
1021 | int kvm_emulate_halt(struct kvm_vcpu *vcpu); | |
5cb56059 | 1022 | int kvm_vcpu_halt(struct kvm_vcpu *vcpu); |
f5f48ee1 | 1023 | int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu); |
54f1585a | 1024 | |
3e6e0aab | 1025 | void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); |
c697518a | 1026 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); |
2b4a273b | 1027 | void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); |
3e6e0aab | 1028 | |
7f3d35fd KW |
1029 | int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, |
1030 | int reason, bool has_error_code, u32 error_code); | |
37817f29 | 1031 | |
49a9b07e | 1032 | int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); |
2390218b | 1033 | int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); |
a83b29c6 | 1034 | int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); |
eea1cff9 | 1035 | int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); |
020df079 GN |
1036 | int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); |
1037 | int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); | |
2d3ad1f4 AK |
1038 | unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); |
1039 | void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); | |
54f1585a | 1040 | void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); |
2acf923e | 1041 | int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr); |
54f1585a | 1042 | |
609e36d3 | 1043 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); |
8fe8ab46 | 1044 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); |
54f1585a | 1045 | |
91586a3b JK |
1046 | unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); |
1047 | void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); | |
022cd0e8 | 1048 | bool kvm_rdpmc(struct kvm_vcpu *vcpu); |
91586a3b | 1049 | |
298101da AK |
1050 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); |
1051 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | |
ce7ddec4 JR |
1052 | void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); |
1053 | void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | |
6389ee94 | 1054 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); |
ec92fe44 JR |
1055 | int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
1056 | gfn_t gfn, void *data, int offset, int len, | |
1057 | u32 access); | |
0a79b009 | 1058 | bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); |
16f8a6f9 | 1059 | bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr); |
298101da | 1060 | |
1a577b72 MT |
1061 | static inline int __kvm_irq_line_state(unsigned long *irq_state, |
1062 | int irq_source_id, int level) | |
1063 | { | |
1064 | /* Logical OR for level trig interrupt */ | |
1065 | if (level) | |
1066 | __set_bit(irq_source_id, irq_state); | |
1067 | else | |
1068 | __clear_bit(irq_source_id, irq_state); | |
1069 | ||
1070 | return !!(*irq_state); | |
1071 | } | |
1072 | ||
1073 | int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level); | |
1074 | void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id); | |
3de42dc0 | 1075 | |
3419ffc8 SY |
1076 | void kvm_inject_nmi(struct kvm_vcpu *vcpu); |
1077 | ||
54f1585a | 1078 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, |
f57f2ef5 | 1079 | const u8 *new, int bytes); |
1cb3f3ae | 1080 | int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn); |
54f1585a ZX |
1081 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); |
1082 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); | |
1083 | int kvm_mmu_load(struct kvm_vcpu *vcpu); | |
1084 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); | |
0ba73cda | 1085 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); |
54987b7a PB |
1086 | gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, |
1087 | struct x86_exception *exception); | |
ab9ae313 AK |
1088 | gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, |
1089 | struct x86_exception *exception); | |
1090 | gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, | |
1091 | struct x86_exception *exception); | |
1092 | gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, | |
1093 | struct x86_exception *exception); | |
1094 | gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, | |
1095 | struct x86_exception *exception); | |
54f1585a ZX |
1096 | |
1097 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); | |
1098 | ||
dc25e89e AP |
1099 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code, |
1100 | void *insn, int insn_len); | |
a7052897 | 1101 | void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); |
d8d173da | 1102 | void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu); |
34c16eec | 1103 | |
18552672 | 1104 | void kvm_enable_tdp(void); |
5f4cb662 | 1105 | void kvm_disable_tdp(void); |
18552672 | 1106 | |
54987b7a PB |
1107 | static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, |
1108 | struct x86_exception *exception) | |
e459e322 XG |
1109 | { |
1110 | return gpa; | |
1111 | } | |
1112 | ||
ec6d273d ZX |
1113 | static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) |
1114 | { | |
1115 | struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); | |
1116 | ||
1117 | return (struct kvm_mmu_page *)page_private(page); | |
1118 | } | |
1119 | ||
d6e88aec | 1120 | static inline u16 kvm_read_ldt(void) |
ec6d273d ZX |
1121 | { |
1122 | u16 ldt; | |
1123 | asm("sldt %0" : "=g"(ldt)); | |
1124 | return ldt; | |
1125 | } | |
1126 | ||
d6e88aec | 1127 | static inline void kvm_load_ldt(u16 sel) |
ec6d273d ZX |
1128 | { |
1129 | asm("lldt %0" : : "rm"(sel)); | |
1130 | } | |
ec6d273d | 1131 | |
ec6d273d ZX |
1132 | #ifdef CONFIG_X86_64 |
1133 | static inline unsigned long read_msr(unsigned long msr) | |
1134 | { | |
1135 | u64 value; | |
1136 | ||
1137 | rdmsrl(msr, value); | |
1138 | return value; | |
1139 | } | |
1140 | #endif | |
1141 | ||
ec6d273d ZX |
1142 | static inline u32 get_rdx_init_val(void) |
1143 | { | |
1144 | return 0x600; /* P6 family */ | |
1145 | } | |
1146 | ||
c1a5d4f9 AK |
1147 | static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) |
1148 | { | |
1149 | kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); | |
1150 | } | |
1151 | ||
854e8bb1 NA |
1152 | static inline u64 get_canonical(u64 la) |
1153 | { | |
1154 | return ((int64_t)la << 16) >> 16; | |
1155 | } | |
1156 | ||
1157 | static inline bool is_noncanonical_address(u64 la) | |
1158 | { | |
1159 | #ifdef CONFIG_X86_64 | |
1160 | return get_canonical(la) != la; | |
1161 | #else | |
1162 | return false; | |
1163 | #endif | |
1164 | } | |
1165 | ||
ec6d273d ZX |
1166 | #define TSS_IOPB_BASE_OFFSET 0x66 |
1167 | #define TSS_BASE_SIZE 0x68 | |
1168 | #define TSS_IOPB_SIZE (65536 / 8) | |
1169 | #define TSS_REDIRECTION_SIZE (256 / 8) | |
7d76b4d3 JP |
1170 | #define RMODE_TSS_SIZE \ |
1171 | (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) | |
53e0aa7b | 1172 | |
37817f29 IE |
1173 | enum { |
1174 | TASK_SWITCH_CALL = 0, | |
1175 | TASK_SWITCH_IRET = 1, | |
1176 | TASK_SWITCH_JMP = 2, | |
1177 | TASK_SWITCH_GATE = 3, | |
1178 | }; | |
1179 | ||
1371d904 | 1180 | #define HF_GIF_MASK (1 << 0) |
3d6368ef AG |
1181 | #define HF_HIF_MASK (1 << 1) |
1182 | #define HF_VINTR_MASK (1 << 2) | |
95ba8273 | 1183 | #define HF_NMI_MASK (1 << 3) |
44c11430 | 1184 | #define HF_IRET_MASK (1 << 4) |
ec9e60b2 | 1185 | #define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */ |
f077825a PB |
1186 | #define HF_SMM_MASK (1 << 6) |
1187 | #define HF_SMM_INSIDE_NMI_MASK (1 << 7) | |
1371d904 | 1188 | |
699023e2 PB |
1189 | #define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE |
1190 | #define KVM_ADDRESS_SPACE_NUM 2 | |
1191 | ||
1192 | #define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0) | |
1193 | #define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm) | |
1371d904 | 1194 | |
4ecac3fd AK |
1195 | /* |
1196 | * Hardware virtualization extension instructions may fault if a | |
1197 | * reboot turns off virtualization while processes are running. | |
1198 | * Trap the fault and ignore the instruction if that happens. | |
1199 | */ | |
b7c4145b | 1200 | asmlinkage void kvm_spurious_fault(void); |
4ecac3fd | 1201 | |
5e520e62 | 1202 | #define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \ |
4ecac3fd | 1203 | "666: " insn "\n\t" \ |
b7c4145b | 1204 | "668: \n\t" \ |
18b13e54 | 1205 | ".pushsection .fixup, \"ax\" \n" \ |
4ecac3fd | 1206 | "667: \n\t" \ |
5e520e62 | 1207 | cleanup_insn "\n\t" \ |
b7c4145b AK |
1208 | "cmpb $0, kvm_rebooting \n\t" \ |
1209 | "jne 668b \n\t" \ | |
8ceed347 | 1210 | __ASM_SIZE(push) " $666b \n\t" \ |
b7c4145b | 1211 | "call kvm_spurious_fault \n\t" \ |
4ecac3fd | 1212 | ".popsection \n\t" \ |
3ee89722 | 1213 | _ASM_EXTABLE(666b, 667b) |
4ecac3fd | 1214 | |
5e520e62 AK |
1215 | #define __kvm_handle_fault_on_reboot(insn) \ |
1216 | ____kvm_handle_fault_on_reboot(insn, "") | |
1217 | ||
e930bffe AA |
1218 | #define KVM_ARCH_WANT_MMU_NOTIFIER |
1219 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | |
b3ae2096 | 1220 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); |
57128468 | 1221 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); |
8ee53820 | 1222 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); |
3da0dd43 | 1223 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); |
c7c9c56c | 1224 | int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); |
a1b37100 GN |
1225 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); |
1226 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); | |
0b71785d | 1227 | int kvm_cpu_get_interrupt(struct kvm_vcpu *v); |
d28bc9dd | 1228 | void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); |
4256f43f | 1229 | void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu); |
fe71557a TC |
1230 | void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, |
1231 | unsigned long address); | |
e930bffe | 1232 | |
18863bdd | 1233 | void kvm_define_shared_msr(unsigned index, u32 msr); |
8b3c3104 | 1234 | int kvm_set_shared_msr(unsigned index, u64 val, u64 mask); |
18863bdd | 1235 | |
82b32774 | 1236 | unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu); |
f92653ee JK |
1237 | bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); |
1238 | ||
af585b92 GN |
1239 | void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, |
1240 | struct kvm_async_pf *work); | |
1241 | void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, | |
1242 | struct kvm_async_pf *work); | |
56028d08 GN |
1243 | void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, |
1244 | struct kvm_async_pf *work); | |
7c90705b | 1245 | bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu); |
af585b92 GN |
1246 | extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); |
1247 | ||
db8fcefa AP |
1248 | void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); |
1249 | ||
f5132b01 GN |
1250 | int kvm_is_in_guest(void); |
1251 | ||
1d8007bd PB |
1252 | int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size); |
1253 | int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size); | |
d71ba788 PB |
1254 | bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu); |
1255 | bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu); | |
f5132b01 | 1256 | |
8feb4a04 FW |
1257 | bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq, |
1258 | struct kvm_vcpu **dest_vcpu); | |
1259 | ||
d84f1e07 FW |
1260 | void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e, |
1261 | struct kvm_lapic_irq *irq); | |
197a4f4b | 1262 | |
3217f7c2 CD |
1263 | static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} |
1264 | static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} | |
1265 | ||
1965aae3 | 1266 | #endif /* _ASM_X86_KVM_HOST_H */ |