]>
Commit | Line | Data |
---|---|---|
a656c8ef | 1 | /* |
043405e1 CO |
2 | * Kernel-based Virtual Machine driver for Linux |
3 | * | |
4 | * This header defines architecture specific interfaces, x86 version | |
5 | * | |
6 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
7 | * the COPYING file in the top-level directory. | |
8 | * | |
9 | */ | |
10 | ||
1965aae3 PA |
11 | #ifndef _ASM_X86_KVM_HOST_H |
12 | #define _ASM_X86_KVM_HOST_H | |
043405e1 | 13 | |
34c16eec ZX |
14 | #include <linux/types.h> |
15 | #include <linux/mm.h> | |
e930bffe | 16 | #include <linux/mmu_notifier.h> |
229456fc | 17 | #include <linux/tracepoint.h> |
f5f48ee1 | 18 | #include <linux/cpumask.h> |
f5132b01 | 19 | #include <linux/irq_work.h> |
34c16eec ZX |
20 | |
21 | #include <linux/kvm.h> | |
22 | #include <linux/kvm_para.h> | |
edf88417 | 23 | #include <linux/kvm_types.h> |
f5132b01 | 24 | #include <linux/perf_event.h> |
d828199e MT |
25 | #include <linux/pvclock_gtod.h> |
26 | #include <linux/clocksource.h> | |
34c16eec | 27 | |
50d0a0f9 | 28 | #include <asm/pvclock-abi.h> |
e01a1b57 | 29 | #include <asm/desc.h> |
0bed3b56 | 30 | #include <asm/mtrr.h> |
9962d032 | 31 | #include <asm/msr-index.h> |
3ee89722 | 32 | #include <asm/asm.h> |
e01a1b57 | 33 | |
cbf64358 | 34 | #define KVM_MAX_VCPUS 255 |
a59cb29e | 35 | #define KVM_SOFT_MAX_VCPUS 160 |
0f888f5a | 36 | #define KVM_USER_MEM_SLOTS 125 |
0743247f AW |
37 | /* memory slots that are not exposed to userspace */ |
38 | #define KVM_PRIVATE_MEM_SLOTS 3 | |
bbacc0c1 | 39 | #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) |
93a5cef0 | 40 | |
cef4dea0 | 41 | #define KVM_MMIO_SIZE 16 |
69a9f69b AK |
42 | |
43 | #define KVM_PIO_PAGE_OFFSET 1 | |
542472b5 | 44 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 2 |
69a9f69b | 45 | |
8175e5b7 AG |
46 | #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS |
47 | ||
cfec82cb JR |
48 | #define CR0_RESERVED_BITS \ |
49 | (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ | |
50 | | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ | |
51 | | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG)) | |
52 | ||
346874c9 | 53 | #define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL |
cfec82cb JR |
54 | #define CR4_RESERVED_BITS \ |
55 | (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ | |
56 | | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ | |
ad756a16 | 57 | | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ |
afcbf13f | 58 | | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \ |
56d6efc2 | 59 | | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE | X86_CR4_SMAP)) |
cfec82cb JR |
60 | |
61 | #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) | |
62 | ||
63 | ||
cd6e8f87 | 64 | |
cd6e8f87 | 65 | #define INVALID_PAGE (~(hpa_t)0) |
dd180b3e XG |
66 | #define VALID_PAGE(x) ((x) != INVALID_PAGE) |
67 | ||
cd6e8f87 ZX |
68 | #define UNMAPPED_GVA (~(gpa_t)0) |
69 | ||
ec04b260 | 70 | /* KVM Hugepage definitions for x86 */ |
04326caa | 71 | #define KVM_NR_PAGE_SIZES 3 |
82855413 JR |
72 | #define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9) |
73 | #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x)) | |
ec04b260 JR |
74 | #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) |
75 | #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) | |
76 | #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) | |
05da4558 | 77 | |
6d9d41e5 CD |
78 | static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) |
79 | { | |
80 | /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */ | |
81 | return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - | |
82 | (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); | |
83 | } | |
84 | ||
cd6e8f87 ZX |
85 | #define SELECTOR_TI_MASK (1 << 2) |
86 | #define SELECTOR_RPL_MASK 0x03 | |
87 | ||
88 | #define IOPL_SHIFT 12 | |
89 | ||
d657a98e ZX |
90 | #define KVM_PERMILLE_MMU_PAGES 20 |
91 | #define KVM_MIN_ALLOC_MMU_PAGES 64 | |
1ae0a13d DE |
92 | #define KVM_MMU_HASH_SHIFT 10 |
93 | #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) | |
d657a98e ZX |
94 | #define KVM_MIN_FREE_MMU_PAGES 5 |
95 | #define KVM_REFILL_PAGES 25 | |
73c1160c | 96 | #define KVM_MAX_CPUID_ENTRIES 80 |
0bed3b56 | 97 | #define KVM_NR_FIXED_MTRR_REGION 88 |
9ba075a6 | 98 | #define KVM_NR_VAR_MTRR 8 |
d657a98e | 99 | |
af585b92 GN |
100 | #define ASYNC_PF_PER_VCPU 64 |
101 | ||
d657a98e ZX |
102 | struct kvm_vcpu; |
103 | struct kvm; | |
af585b92 | 104 | struct kvm_async_pf; |
d657a98e | 105 | |
5fdbf976 | 106 | enum kvm_reg { |
2b3ccfa0 ZX |
107 | VCPU_REGS_RAX = 0, |
108 | VCPU_REGS_RCX = 1, | |
109 | VCPU_REGS_RDX = 2, | |
110 | VCPU_REGS_RBX = 3, | |
111 | VCPU_REGS_RSP = 4, | |
112 | VCPU_REGS_RBP = 5, | |
113 | VCPU_REGS_RSI = 6, | |
114 | VCPU_REGS_RDI = 7, | |
115 | #ifdef CONFIG_X86_64 | |
116 | VCPU_REGS_R8 = 8, | |
117 | VCPU_REGS_R9 = 9, | |
118 | VCPU_REGS_R10 = 10, | |
119 | VCPU_REGS_R11 = 11, | |
120 | VCPU_REGS_R12 = 12, | |
121 | VCPU_REGS_R13 = 13, | |
122 | VCPU_REGS_R14 = 14, | |
123 | VCPU_REGS_R15 = 15, | |
124 | #endif | |
5fdbf976 | 125 | VCPU_REGS_RIP, |
2b3ccfa0 ZX |
126 | NR_VCPU_REGS |
127 | }; | |
128 | ||
6de4f3ad AK |
129 | enum kvm_reg_ex { |
130 | VCPU_EXREG_PDPTR = NR_VCPU_REGS, | |
aff48baa | 131 | VCPU_EXREG_CR3, |
6de12732 | 132 | VCPU_EXREG_RFLAGS, |
2fb92db1 | 133 | VCPU_EXREG_SEGMENTS, |
6de4f3ad AK |
134 | }; |
135 | ||
2b3ccfa0 | 136 | enum { |
81609e3e | 137 | VCPU_SREG_ES, |
2b3ccfa0 | 138 | VCPU_SREG_CS, |
81609e3e | 139 | VCPU_SREG_SS, |
2b3ccfa0 | 140 | VCPU_SREG_DS, |
2b3ccfa0 ZX |
141 | VCPU_SREG_FS, |
142 | VCPU_SREG_GS, | |
2b3ccfa0 ZX |
143 | VCPU_SREG_TR, |
144 | VCPU_SREG_LDTR, | |
145 | }; | |
146 | ||
56e82318 | 147 | #include <asm/kvm_emulate.h> |
2b3ccfa0 | 148 | |
d657a98e ZX |
149 | #define KVM_NR_MEM_OBJS 40 |
150 | ||
42dbaa5a JK |
151 | #define KVM_NR_DB_REGS 4 |
152 | ||
153 | #define DR6_BD (1 << 13) | |
154 | #define DR6_BS (1 << 14) | |
155 | #define DR6_FIXED_1 0xffff0ff0 | |
156 | #define DR6_VOLATILE 0x0000e00f | |
157 | ||
158 | #define DR7_BP_EN_MASK 0x000000ff | |
159 | #define DR7_GE (1 << 9) | |
160 | #define DR7_GD (1 << 13) | |
161 | #define DR7_FIXED_1 0x00000400 | |
162 | #define DR7_VOLATILE 0xffff23ff | |
163 | ||
41383771 GN |
164 | /* apic attention bits */ |
165 | #define KVM_APIC_CHECK_VAPIC 0 | |
ae7a2a3f MT |
166 | /* |
167 | * The following bit is set with PV-EOI, unset on EOI. | |
168 | * We detect PV-EOI changes by guest by comparing | |
169 | * this bit with PV-EOI in guest memory. | |
170 | * See the implementation in apic_update_pv_eoi. | |
171 | */ | |
172 | #define KVM_APIC_PV_EOI_PENDING 1 | |
41383771 | 173 | |
d657a98e ZX |
174 | /* |
175 | * We don't want allocation failures within the mmu code, so we preallocate | |
176 | * enough memory for a single page fault in a cache. | |
177 | */ | |
178 | struct kvm_mmu_memory_cache { | |
179 | int nobjs; | |
180 | void *objects[KVM_NR_MEM_OBJS]; | |
181 | }; | |
182 | ||
d657a98e ZX |
183 | /* |
184 | * kvm_mmu_page_role, below, is defined as: | |
185 | * | |
186 | * bits 0:3 - total guest paging levels (2-4, or zero for real mode) | |
187 | * bits 4:7 - page table level for this shadow (1-4) | |
188 | * bits 8:9 - page table quadrant for 2-level guests | |
f6e2c02b AK |
189 | * bit 16 - direct mapping of virtual to physical mapping at gfn |
190 | * used for real mode and two-dimensional paging | |
d657a98e ZX |
191 | * bits 17:19 - common access permissions for all ptes in this shadow page |
192 | */ | |
193 | union kvm_mmu_page_role { | |
194 | unsigned word; | |
195 | struct { | |
7d76b4d3 | 196 | unsigned level:4; |
5b7e0102 | 197 | unsigned cr4_pae:1; |
7d76b4d3 JP |
198 | unsigned quadrant:2; |
199 | unsigned pad_for_nice_hex_output:6; | |
f6e2c02b | 200 | unsigned direct:1; |
7d76b4d3 | 201 | unsigned access:3; |
2e53d63a | 202 | unsigned invalid:1; |
9645bb56 | 203 | unsigned nxe:1; |
3dbe1415 | 204 | unsigned cr0_wp:1; |
411c588d | 205 | unsigned smep_andnot_wp:1; |
d657a98e ZX |
206 | }; |
207 | }; | |
208 | ||
209 | struct kvm_mmu_page { | |
210 | struct list_head link; | |
211 | struct hlist_node hash_link; | |
212 | ||
213 | /* | |
214 | * The following two entries are used to key the shadow page in the | |
215 | * hash table. | |
216 | */ | |
217 | gfn_t gfn; | |
218 | union kvm_mmu_page_role role; | |
219 | ||
220 | u64 *spt; | |
221 | /* hold the gfn of each spte inside spt */ | |
222 | gfn_t *gfns; | |
4731d4c7 | 223 | bool unsync; |
0571d366 | 224 | int root_count; /* Currently serving as active root */ |
60c8aec6 | 225 | unsigned int unsync_children; |
67052b35 | 226 | unsigned long parent_ptes; /* Reverse mapping for parent_pte */ |
f6f8adee XG |
227 | |
228 | /* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen. */ | |
5304b8d3 | 229 | unsigned long mmu_valid_gen; |
f6f8adee | 230 | |
0074ff63 | 231 | DECLARE_BITMAP(unsync_child_bitmap, 512); |
c2a2ac2b XG |
232 | |
233 | #ifdef CONFIG_X86_32 | |
accaefe0 XG |
234 | /* |
235 | * Used out of the mmu-lock to avoid reading spte values while an | |
236 | * update is in progress; see the comments in __get_spte_lockless(). | |
237 | */ | |
c2a2ac2b XG |
238 | int clear_spte_count; |
239 | #endif | |
240 | ||
0cbf8e43 | 241 | /* Number of writes since the last time traversal visited this page. */ |
a30f47cb | 242 | int write_flooding_count; |
d657a98e ZX |
243 | }; |
244 | ||
1c08364c AK |
245 | struct kvm_pio_request { |
246 | unsigned long count; | |
1c08364c AK |
247 | int in; |
248 | int port; | |
249 | int size; | |
1c08364c AK |
250 | }; |
251 | ||
d657a98e ZX |
252 | /* |
253 | * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level | |
254 | * 32-bit). The kvm_mmu structure abstracts the details of the current mmu | |
255 | * mode. | |
256 | */ | |
257 | struct kvm_mmu { | |
f43addd4 | 258 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); |
5777ed34 | 259 | unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); |
e4e517b4 | 260 | u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index); |
78b2c54a XG |
261 | int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, |
262 | bool prefault); | |
6389ee94 AK |
263 | void (*inject_page_fault)(struct kvm_vcpu *vcpu, |
264 | struct x86_exception *fault); | |
1871c602 | 265 | gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, |
ab9ae313 | 266 | struct x86_exception *exception); |
c30a358d | 267 | gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access); |
e8bc217a | 268 | int (*sync_page)(struct kvm_vcpu *vcpu, |
a4a8e6f7 | 269 | struct kvm_mmu_page *sp); |
a7052897 | 270 | void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); |
0f53b5b1 | 271 | void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
7c562522 | 272 | u64 *spte, const void *pte); |
d657a98e ZX |
273 | hpa_t root_hpa; |
274 | int root_level; | |
275 | int shadow_root_level; | |
a770f6f2 | 276 | union kvm_mmu_page_role base_role; |
c5a78f2b | 277 | bool direct_map; |
d657a98e | 278 | |
97d64b78 AK |
279 | /* |
280 | * Bitmap; bit set = permission fault | |
281 | * Byte index: page fault error code [4:1] | |
282 | * Bit index: pte permissions in ACC_* format | |
283 | */ | |
284 | u8 permissions[16]; | |
285 | ||
d657a98e | 286 | u64 *pae_root; |
81407ca5 | 287 | u64 *lm_root; |
82725b20 | 288 | u64 rsvd_bits_mask[2][4]; |
25d92081 | 289 | u64 bad_mt_xwr; |
ff03a073 | 290 | |
6fd01b71 AK |
291 | /* |
292 | * Bitmap: bit set = last pte in walk | |
293 | * index[0:1]: level (zero-based) | |
294 | * index[2]: pte.ps | |
295 | */ | |
296 | u8 last_pte_bitmap; | |
297 | ||
2d48a985 JR |
298 | bool nx; |
299 | ||
ff03a073 | 300 | u64 pdptrs[4]; /* pae */ |
d657a98e ZX |
301 | }; |
302 | ||
f5132b01 GN |
303 | enum pmc_type { |
304 | KVM_PMC_GP = 0, | |
305 | KVM_PMC_FIXED, | |
306 | }; | |
307 | ||
308 | struct kvm_pmc { | |
309 | enum pmc_type type; | |
310 | u8 idx; | |
311 | u64 counter; | |
312 | u64 eventsel; | |
313 | struct perf_event *perf_event; | |
314 | struct kvm_vcpu *vcpu; | |
315 | }; | |
316 | ||
317 | struct kvm_pmu { | |
318 | unsigned nr_arch_gp_counters; | |
319 | unsigned nr_arch_fixed_counters; | |
320 | unsigned available_event_types; | |
321 | u64 fixed_ctr_ctrl; | |
322 | u64 global_ctrl; | |
323 | u64 global_status; | |
324 | u64 global_ovf_ctrl; | |
325 | u64 counter_bitmask[2]; | |
326 | u64 global_ctrl_mask; | |
103af0a9 | 327 | u64 reserved_bits; |
f5132b01 | 328 | u8 version; |
15c7ad51 RR |
329 | struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC]; |
330 | struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED]; | |
f5132b01 GN |
331 | struct irq_work irq_work; |
332 | u64 reprogram_pmi; | |
333 | }; | |
334 | ||
360b948d PB |
335 | enum { |
336 | KVM_DEBUGREG_BP_ENABLED = 1, | |
c77fb5fe | 337 | KVM_DEBUGREG_WONT_EXIT = 2, |
360b948d PB |
338 | }; |
339 | ||
ad312c7c | 340 | struct kvm_vcpu_arch { |
5fdbf976 MT |
341 | /* |
342 | * rip and regs accesses must go through | |
343 | * kvm_{register,rip}_{read,write} functions. | |
344 | */ | |
345 | unsigned long regs[NR_VCPU_REGS]; | |
346 | u32 regs_avail; | |
347 | u32 regs_dirty; | |
34c16eec ZX |
348 | |
349 | unsigned long cr0; | |
e8467fda | 350 | unsigned long cr0_guest_owned_bits; |
34c16eec ZX |
351 | unsigned long cr2; |
352 | unsigned long cr3; | |
353 | unsigned long cr4; | |
fc78f519 | 354 | unsigned long cr4_guest_owned_bits; |
34c16eec | 355 | unsigned long cr8; |
1371d904 | 356 | u32 hflags; |
f6801dff | 357 | u64 efer; |
34c16eec ZX |
358 | u64 apic_base; |
359 | struct kvm_lapic *apic; /* kernel irqchip context */ | |
41383771 | 360 | unsigned long apic_attention; |
e1035715 | 361 | int32_t apic_arb_prio; |
34c16eec | 362 | int mp_state; |
34c16eec | 363 | u64 ia32_misc_enable_msr; |
b209749f | 364 | bool tpr_access_reporting; |
34c16eec | 365 | |
14dfe855 JR |
366 | /* |
367 | * Paging state of the vcpu | |
368 | * | |
369 | * If the vcpu runs in guest mode with two level paging this still saves | |
370 | * the paging mode of the l1 guest. This context is always used to | |
371 | * handle faults. | |
372 | */ | |
34c16eec | 373 | struct kvm_mmu mmu; |
8df25a32 | 374 | |
6539e738 JR |
375 | /* |
376 | * Paging state of an L2 guest (used for nested npt) | |
377 | * | |
378 | * This context will save all necessary information to walk page tables | |
379 | * of the an L2 guest. This context is only initialized for page table | |
380 | * walking and not for faulting since we never handle l2 page faults on | |
381 | * the host. | |
382 | */ | |
383 | struct kvm_mmu nested_mmu; | |
384 | ||
14dfe855 JR |
385 | /* |
386 | * Pointer to the mmu context currently used for | |
387 | * gva_to_gpa translations. | |
388 | */ | |
389 | struct kvm_mmu *walk_mmu; | |
390 | ||
53c07b18 | 391 | struct kvm_mmu_memory_cache mmu_pte_list_desc_cache; |
34c16eec ZX |
392 | struct kvm_mmu_memory_cache mmu_page_cache; |
393 | struct kvm_mmu_memory_cache mmu_page_header_cache; | |
394 | ||
98918833 | 395 | struct fpu guest_fpu; |
2acf923e | 396 | u64 xcr0; |
d7876f1b | 397 | u64 guest_supported_xcr0; |
4344ee98 | 398 | u32 guest_xstate_size; |
34c16eec | 399 | |
34c16eec ZX |
400 | struct kvm_pio_request pio; |
401 | void *pio_data; | |
402 | ||
66fd3f7f GN |
403 | u8 event_exit_inst_len; |
404 | ||
298101da AK |
405 | struct kvm_queued_exception { |
406 | bool pending; | |
407 | bool has_error_code; | |
ce7ddec4 | 408 | bool reinject; |
298101da AK |
409 | u8 nr; |
410 | u32 error_code; | |
411 | } exception; | |
412 | ||
937a7eae AK |
413 | struct kvm_queued_interrupt { |
414 | bool pending; | |
66fd3f7f | 415 | bool soft; |
937a7eae AK |
416 | u8 nr; |
417 | } interrupt; | |
418 | ||
34c16eec ZX |
419 | int halt_request; /* real mode on Intel only */ |
420 | ||
421 | int cpuid_nent; | |
07716717 | 422 | struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES]; |
34c16eec ZX |
423 | /* emulate context */ |
424 | ||
425 | struct x86_emulate_ctxt emulate_ctxt; | |
7ae441ea GN |
426 | bool emulate_regs_need_sync_to_vcpu; |
427 | bool emulate_regs_need_sync_from_vcpu; | |
716d51ab | 428 | int (*complete_userspace_io)(struct kvm_vcpu *vcpu); |
18068523 GOC |
429 | |
430 | gpa_t time; | |
50d0a0f9 | 431 | struct pvclock_vcpu_time_info hv_clock; |
e48672fa | 432 | unsigned int hw_tsc_khz; |
0b79459b AH |
433 | struct gfn_to_hva_cache pv_time; |
434 | bool pv_time_enabled; | |
51d59c6b MT |
435 | /* set guest stopped flag in pvclock flags field */ |
436 | bool pvclock_set_guest_stopped_request; | |
c9aaa895 GC |
437 | |
438 | struct { | |
439 | u64 msr_val; | |
440 | u64 last_steal; | |
441 | u64 accum_steal; | |
442 | struct gfn_to_hva_cache stime; | |
443 | struct kvm_steal_time steal; | |
444 | } st; | |
445 | ||
1d5f066e | 446 | u64 last_guest_tsc; |
6f526ec5 | 447 | u64 last_host_tsc; |
0dd6a6ed | 448 | u64 tsc_offset_adjustment; |
e26101b1 ZA |
449 | u64 this_tsc_nsec; |
450 | u64 this_tsc_write; | |
0d3da0d2 | 451 | u64 this_tsc_generation; |
c285545f | 452 | bool tsc_catchup; |
cc578287 ZA |
453 | bool tsc_always_catchup; |
454 | s8 virtual_tsc_shift; | |
455 | u32 virtual_tsc_mult; | |
456 | u32 virtual_tsc_khz; | |
ba904635 | 457 | s64 ia32_tsc_adjust_msr; |
3419ffc8 | 458 | |
7460fb4a AK |
459 | atomic_t nmi_queued; /* unprocessed asynchronous NMIs */ |
460 | unsigned nmi_pending; /* NMI queued after currently running handler */ | |
461 | bool nmi_injected; /* Trying to inject an NMI this entry */ | |
9ba075a6 | 462 | |
0bed3b56 SY |
463 | struct mtrr_state_type mtrr_state; |
464 | u32 pat; | |
42dbaa5a | 465 | |
360b948d | 466 | unsigned switch_db_regs; |
42dbaa5a JK |
467 | unsigned long db[KVM_NR_DB_REGS]; |
468 | unsigned long dr6; | |
469 | unsigned long dr7; | |
470 | unsigned long eff_db[KVM_NR_DB_REGS]; | |
c8639010 | 471 | unsigned long guest_debug_dr7; |
890ca9ae YH |
472 | |
473 | u64 mcg_cap; | |
474 | u64 mcg_status; | |
475 | u64 mcg_ctl; | |
476 | u64 *mce_banks; | |
94fe45da | 477 | |
bebb106a XG |
478 | /* Cache MMIO info */ |
479 | u64 mmio_gva; | |
480 | unsigned access; | |
481 | gfn_t mmio_gfn; | |
482 | ||
f5132b01 GN |
483 | struct kvm_pmu pmu; |
484 | ||
94fe45da | 485 | /* used for guest single stepping over the given code position */ |
94fe45da | 486 | unsigned long singlestep_rip; |
f92653ee | 487 | |
10388a07 GN |
488 | /* fields used by HYPER-V emulation */ |
489 | u64 hv_vapic; | |
f5f48ee1 SY |
490 | |
491 | cpumask_var_t wbinvd_dirty_mask; | |
af585b92 | 492 | |
1cb3f3ae XG |
493 | unsigned long last_retry_eip; |
494 | unsigned long last_retry_addr; | |
495 | ||
af585b92 GN |
496 | struct { |
497 | bool halted; | |
498 | gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)]; | |
344d9588 GN |
499 | struct gfn_to_hva_cache data; |
500 | u64 msr_val; | |
7c90705b | 501 | u32 id; |
6adba527 | 502 | bool send_user_only; |
af585b92 | 503 | } apf; |
2b036c6b BO |
504 | |
505 | /* OSVW MSRs (AMD only) */ | |
506 | struct { | |
507 | u64 length; | |
508 | u64 status; | |
509 | } osvw; | |
ae7a2a3f MT |
510 | |
511 | struct { | |
512 | u64 msr_val; | |
513 | struct gfn_to_hva_cache data; | |
514 | } pv_eoi; | |
93c05d3e XG |
515 | |
516 | /* | |
517 | * Indicate whether the access faults on its page table in guest | |
518 | * which is set when fix page fault and used to detect unhandeable | |
519 | * instruction. | |
520 | */ | |
521 | bool write_fault_to_shadow_pgtable; | |
25d92081 YZ |
522 | |
523 | /* set at EPT violation at this point */ | |
524 | unsigned long exit_qualification; | |
6aef266c SV |
525 | |
526 | /* pv related host specific info */ | |
527 | struct { | |
528 | bool pv_unhalted; | |
529 | } pv; | |
34c16eec ZX |
530 | }; |
531 | ||
db3fe4eb | 532 | struct kvm_lpage_info { |
db3fe4eb TY |
533 | int write_count; |
534 | }; | |
535 | ||
536 | struct kvm_arch_memory_slot { | |
d89cc617 | 537 | unsigned long *rmap[KVM_NR_PAGE_SIZES]; |
db3fe4eb TY |
538 | struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; |
539 | }; | |
540 | ||
1e08ec4a GN |
541 | struct kvm_apic_map { |
542 | struct rcu_head rcu; | |
543 | u8 ldr_bits; | |
544 | /* fields bellow are used to decode ldr values in different modes */ | |
545 | u32 cid_shift, cid_mask, lid_mask; | |
546 | struct kvm_lapic *phys_map[256]; | |
547 | /* first index is cluster id second is cpu id in a cluster */ | |
548 | struct kvm_lapic *logical_map[16][16]; | |
549 | }; | |
550 | ||
fef9cce0 | 551 | struct kvm_arch { |
49d5ca26 | 552 | unsigned int n_used_mmu_pages; |
f05e70ac | 553 | unsigned int n_requested_mmu_pages; |
39de71ec | 554 | unsigned int n_max_mmu_pages; |
332b207d | 555 | unsigned int indirect_shadow_pages; |
5304b8d3 | 556 | unsigned long mmu_valid_gen; |
f05e70ac ZX |
557 | struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; |
558 | /* | |
559 | * Hash table of struct kvm_mmu_page. | |
560 | */ | |
561 | struct list_head active_mmu_pages; | |
365c8868 XG |
562 | struct list_head zapped_obsolete_pages; |
563 | ||
4d5c5d0f | 564 | struct list_head assigned_dev_head; |
19de40a8 | 565 | struct iommu_domain *iommu_domain; |
d96eb2c6 | 566 | bool iommu_noncoherent; |
e0f0bbc5 AW |
567 | #define __KVM_HAVE_ARCH_NONCOHERENT_DMA |
568 | atomic_t noncoherent_dma_count; | |
d7deeeb0 ZX |
569 | struct kvm_pic *vpic; |
570 | struct kvm_ioapic *vioapic; | |
7837699f | 571 | struct kvm_pit *vpit; |
cc6e462c | 572 | int vapics_in_nmi_mode; |
1e08ec4a GN |
573 | struct mutex apic_map_lock; |
574 | struct kvm_apic_map *apic_map; | |
bfc6d222 | 575 | |
bfc6d222 ZX |
576 | unsigned int tss_addr; |
577 | struct page *apic_access_page; | |
18068523 GOC |
578 | |
579 | gpa_t wall_clock; | |
b7ebfb05 SY |
580 | |
581 | struct page *ept_identity_pagetable; | |
582 | bool ept_identity_pagetable_done; | |
b927a3ce | 583 | gpa_t ept_identity_map_addr; |
5550af4d SY |
584 | |
585 | unsigned long irq_sources_bitmap; | |
afbcf7ab | 586 | s64 kvmclock_offset; |
038f8c11 | 587 | raw_spinlock_t tsc_write_lock; |
f38e098f | 588 | u64 last_tsc_nsec; |
f38e098f | 589 | u64 last_tsc_write; |
5d3cb0f6 | 590 | u32 last_tsc_khz; |
e26101b1 ZA |
591 | u64 cur_tsc_nsec; |
592 | u64 cur_tsc_write; | |
593 | u64 cur_tsc_offset; | |
0d3da0d2 | 594 | u64 cur_tsc_generation; |
b48aa97e | 595 | int nr_vcpus_matched_tsc; |
ffde22ac | 596 | |
d828199e MT |
597 | spinlock_t pvclock_gtod_sync_lock; |
598 | bool use_master_clock; | |
599 | u64 master_kernel_ns; | |
600 | cycle_t master_cycle_now; | |
7e44e449 | 601 | struct delayed_work kvmclock_update_work; |
332967a3 | 602 | struct delayed_work kvmclock_sync_work; |
d828199e | 603 | |
ffde22ac | 604 | struct kvm_xen_hvm_config xen_hvm_config; |
55cd8e5a GN |
605 | |
606 | /* fields used by HYPER-V emulation */ | |
607 | u64 hv_guest_os_id; | |
608 | u64 hv_hypercall; | |
e984097b | 609 | u64 hv_tsc_page; |
b034cf01 XG |
610 | |
611 | #ifdef CONFIG_KVM_MMU_AUDIT | |
612 | int audit_point; | |
613 | #endif | |
d69fb81f ZX |
614 | }; |
615 | ||
0711456c ZX |
616 | struct kvm_vm_stat { |
617 | u32 mmu_shadow_zapped; | |
618 | u32 mmu_pte_write; | |
619 | u32 mmu_pte_updated; | |
620 | u32 mmu_pde_zapped; | |
621 | u32 mmu_flooded; | |
622 | u32 mmu_recycled; | |
dfc5aa00 | 623 | u32 mmu_cache_miss; |
4731d4c7 | 624 | u32 mmu_unsync; |
0711456c | 625 | u32 remote_tlb_flush; |
05da4558 | 626 | u32 lpages; |
0711456c ZX |
627 | }; |
628 | ||
77b4c255 ZX |
629 | struct kvm_vcpu_stat { |
630 | u32 pf_fixed; | |
631 | u32 pf_guest; | |
632 | u32 tlb_flush; | |
633 | u32 invlpg; | |
634 | ||
635 | u32 exits; | |
636 | u32 io_exits; | |
637 | u32 mmio_exits; | |
638 | u32 signal_exits; | |
639 | u32 irq_window_exits; | |
f08864b4 | 640 | u32 nmi_window_exits; |
77b4c255 ZX |
641 | u32 halt_exits; |
642 | u32 halt_wakeup; | |
643 | u32 request_irq_exits; | |
644 | u32 irq_exits; | |
645 | u32 host_state_reload; | |
646 | u32 efer_reload; | |
647 | u32 fpu_reload; | |
648 | u32 insn_emulation; | |
649 | u32 insn_emulation_fail; | |
f11c3a8d | 650 | u32 hypercalls; |
fa89a817 | 651 | u32 irq_injections; |
c4abb7c9 | 652 | u32 nmi_injections; |
77b4c255 | 653 | }; |
ad312c7c | 654 | |
8a76d7f2 JR |
655 | struct x86_instruction_info; |
656 | ||
8fe8ab46 WA |
657 | struct msr_data { |
658 | bool host_initiated; | |
659 | u32 index; | |
660 | u64 data; | |
661 | }; | |
662 | ||
ea4a5ff8 ZX |
663 | struct kvm_x86_ops { |
664 | int (*cpu_has_kvm_support)(void); /* __init */ | |
665 | int (*disabled_by_bios)(void); /* __init */ | |
10474ae8 | 666 | int (*hardware_enable)(void *dummy); |
ea4a5ff8 ZX |
667 | void (*hardware_disable)(void *dummy); |
668 | void (*check_processor_compatibility)(void *rtn); | |
669 | int (*hardware_setup)(void); /* __init */ | |
670 | void (*hardware_unsetup)(void); /* __exit */ | |
774ead3a | 671 | bool (*cpu_has_accelerated_tpr)(void); |
0e851880 | 672 | void (*cpuid_update)(struct kvm_vcpu *vcpu); |
ea4a5ff8 ZX |
673 | |
674 | /* Create, but do not attach this VCPU */ | |
675 | struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); | |
676 | void (*vcpu_free)(struct kvm_vcpu *vcpu); | |
57f252f2 | 677 | void (*vcpu_reset)(struct kvm_vcpu *vcpu); |
ea4a5ff8 ZX |
678 | |
679 | void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); | |
680 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); | |
681 | void (*vcpu_put)(struct kvm_vcpu *vcpu); | |
ea4a5ff8 | 682 | |
c8639010 | 683 | void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu); |
ea4a5ff8 | 684 | int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); |
8fe8ab46 | 685 | int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); |
ea4a5ff8 ZX |
686 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); |
687 | void (*get_segment)(struct kvm_vcpu *vcpu, | |
688 | struct kvm_segment *var, int seg); | |
2e4d2653 | 689 | int (*get_cpl)(struct kvm_vcpu *vcpu); |
ea4a5ff8 ZX |
690 | void (*set_segment)(struct kvm_vcpu *vcpu, |
691 | struct kvm_segment *var, int seg); | |
692 | void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); | |
e8467fda | 693 | void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu); |
aff48baa | 694 | void (*decache_cr3)(struct kvm_vcpu *vcpu); |
ea4a5ff8 ZX |
695 | void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); |
696 | void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); | |
697 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); | |
5e1746d6 | 698 | int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); |
ea4a5ff8 | 699 | void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); |
89a27f4d GN |
700 | void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); |
701 | void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); | |
702 | void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); | |
703 | void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); | |
73aaf249 JK |
704 | u64 (*get_dr6)(struct kvm_vcpu *vcpu); |
705 | void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value); | |
c77fb5fe | 706 | void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu); |
020df079 | 707 | void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value); |
5fdbf976 | 708 | void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); |
ea4a5ff8 ZX |
709 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); |
710 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); | |
6b52d186 | 711 | void (*fpu_activate)(struct kvm_vcpu *vcpu); |
02daab21 | 712 | void (*fpu_deactivate)(struct kvm_vcpu *vcpu); |
ea4a5ff8 ZX |
713 | |
714 | void (*tlb_flush)(struct kvm_vcpu *vcpu); | |
ea4a5ff8 | 715 | |
851ba692 AK |
716 | void (*run)(struct kvm_vcpu *vcpu); |
717 | int (*handle_exit)(struct kvm_vcpu *vcpu); | |
ea4a5ff8 | 718 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); |
2809f5d2 | 719 | void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); |
37ccdcbe | 720 | u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu); |
ea4a5ff8 ZX |
721 | void (*patch_hypercall)(struct kvm_vcpu *vcpu, |
722 | unsigned char *hypercall_addr); | |
66fd3f7f | 723 | void (*set_irq)(struct kvm_vcpu *vcpu); |
95ba8273 | 724 | void (*set_nmi)(struct kvm_vcpu *vcpu); |
298101da | 725 | void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, |
ce7ddec4 JR |
726 | bool has_error_code, u32 error_code, |
727 | bool reinject); | |
b463a6f7 | 728 | void (*cancel_injection)(struct kvm_vcpu *vcpu); |
78646121 | 729 | int (*interrupt_allowed)(struct kvm_vcpu *vcpu); |
95ba8273 | 730 | int (*nmi_allowed)(struct kvm_vcpu *vcpu); |
3cfc3092 JK |
731 | bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); |
732 | void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked); | |
c9a7953f JK |
733 | void (*enable_nmi_window)(struct kvm_vcpu *vcpu); |
734 | void (*enable_irq_window)(struct kvm_vcpu *vcpu); | |
95ba8273 | 735 | void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); |
c7c9c56c YZ |
736 | int (*vm_has_apicv)(struct kvm *kvm); |
737 | void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); | |
738 | void (*hwapic_isr_update)(struct kvm *kvm, int isr); | |
739 | void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); | |
8d14695f | 740 | void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set); |
a20ed54d YZ |
741 | void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector); |
742 | void (*sync_pir_to_irr)(struct kvm_vcpu *vcpu); | |
ea4a5ff8 | 743 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); |
67253af5 | 744 | int (*get_tdp_level)(void); |
4b12f0de | 745 | u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); |
17cc3935 | 746 | int (*get_lpage_level)(void); |
4e47c7a6 | 747 | bool (*rdtscp_supported)(void); |
ad756a16 | 748 | bool (*invpcid_supported)(void); |
f1e2b260 | 749 | void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host); |
344f414f | 750 | |
1c97f0a0 JR |
751 | void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); |
752 | ||
d4330ef2 JR |
753 | void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry); |
754 | ||
f5f48ee1 SY |
755 | bool (*has_wbinvd_exit)(void); |
756 | ||
cc578287 | 757 | void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale); |
ba904635 | 758 | u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu); |
99e3e30a ZA |
759 | void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); |
760 | ||
857e4099 | 761 | u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc); |
886b470c | 762 | u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc); |
857e4099 | 763 | |
586f9607 | 764 | void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); |
8a76d7f2 JR |
765 | |
766 | int (*check_intercept)(struct kvm_vcpu *vcpu, | |
767 | struct x86_instruction_info *info, | |
768 | enum x86_intercept_stage stage); | |
a547c6db | 769 | void (*handle_external_intr)(struct kvm_vcpu *vcpu); |
da8999d3 | 770 | bool (*mpx_supported)(void); |
b6b8a145 JK |
771 | |
772 | int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); | |
ea4a5ff8 ZX |
773 | }; |
774 | ||
af585b92 | 775 | struct kvm_arch_async_pf { |
7c90705b | 776 | u32 token; |
af585b92 | 777 | gfn_t gfn; |
fb67e14f | 778 | unsigned long cr3; |
c4806acd | 779 | bool direct_map; |
af585b92 GN |
780 | }; |
781 | ||
97896d04 ZX |
782 | extern struct kvm_x86_ops *kvm_x86_ops; |
783 | ||
f1e2b260 MT |
784 | static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, |
785 | s64 adjustment) | |
786 | { | |
787 | kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, false); | |
788 | } | |
789 | ||
790 | static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) | |
791 | { | |
792 | kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, true); | |
793 | } | |
794 | ||
54f1585a ZX |
795 | int kvm_mmu_module_init(void); |
796 | void kvm_mmu_module_exit(void); | |
797 | ||
798 | void kvm_mmu_destroy(struct kvm_vcpu *vcpu); | |
799 | int kvm_mmu_create(struct kvm_vcpu *vcpu); | |
8a3c1a33 | 800 | void kvm_mmu_setup(struct kvm_vcpu *vcpu); |
7b52345e | 801 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, |
4b12f0de | 802 | u64 dirty_mask, u64 nx_mask, u64 x_mask); |
54f1585a | 803 | |
8a3c1a33 | 804 | void kvm_mmu_reset_context(struct kvm_vcpu *vcpu); |
54f1585a | 805 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); |
5dc99b23 TY |
806 | void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, |
807 | struct kvm_memory_slot *slot, | |
808 | gfn_t gfn_offset, unsigned long mask); | |
54f1585a | 809 | void kvm_mmu_zap_all(struct kvm *kvm); |
f8f55942 | 810 | void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm); |
3ad82a7e | 811 | unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); |
54f1585a ZX |
812 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); |
813 | ||
ff03a073 | 814 | int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); |
cc4b6871 | 815 | |
3200f405 | 816 | int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, |
9f811285 | 817 | const void *val, int bytes); |
4b12f0de | 818 | u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); |
2f333bcb MT |
819 | |
820 | extern bool tdp_enabled; | |
9f811285 | 821 | |
a3e06bbe LJ |
822 | u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu); |
823 | ||
92a1f12d JR |
824 | /* control of guest tsc rate supported? */ |
825 | extern bool kvm_has_tsc_control; | |
826 | /* minimum supported tsc_khz for guests */ | |
827 | extern u32 kvm_min_guest_tsc_khz; | |
828 | /* maximum supported tsc_khz for guests */ | |
829 | extern u32 kvm_max_guest_tsc_khz; | |
830 | ||
54f1585a | 831 | enum emulation_result { |
ac0a48c3 PB |
832 | EMULATE_DONE, /* no further processing */ |
833 | EMULATE_USER_EXIT, /* kvm_run ready for userspace exit */ | |
54f1585a ZX |
834 | EMULATE_FAIL, /* can't emulate this instruction */ |
835 | }; | |
836 | ||
571008da SY |
837 | #define EMULTYPE_NO_DECODE (1 << 0) |
838 | #define EMULTYPE_TRAP_UD (1 << 1) | |
ba8afb6b | 839 | #define EMULTYPE_SKIP (1 << 2) |
1cb3f3ae | 840 | #define EMULTYPE_RETRY (1 << 3) |
991eebf9 | 841 | #define EMULTYPE_NO_REEXECUTE (1 << 4) |
dc25e89e AP |
842 | int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, |
843 | int emulation_type, void *insn, int insn_len); | |
51d8b661 AP |
844 | |
845 | static inline int emulate_instruction(struct kvm_vcpu *vcpu, | |
846 | int emulation_type) | |
847 | { | |
dc25e89e | 848 | return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); |
51d8b661 AP |
849 | } |
850 | ||
f2b4b7dd | 851 | void kvm_enable_efer_bits(u64); |
384bb783 | 852 | bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer); |
54f1585a | 853 | int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); |
8fe8ab46 | 854 | int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr); |
54f1585a ZX |
855 | |
856 | struct x86_emulate_ctxt; | |
857 | ||
cf8f70bf | 858 | int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port); |
54f1585a ZX |
859 | void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); |
860 | int kvm_emulate_halt(struct kvm_vcpu *vcpu); | |
f5f48ee1 | 861 | int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu); |
54f1585a | 862 | |
3e6e0aab | 863 | void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); |
c697518a | 864 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); |
66450a21 | 865 | void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector); |
3e6e0aab | 866 | |
7f3d35fd KW |
867 | int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, |
868 | int reason, bool has_error_code, u32 error_code); | |
37817f29 | 869 | |
49a9b07e | 870 | int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); |
2390218b | 871 | int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); |
a83b29c6 | 872 | int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); |
eea1cff9 | 873 | int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); |
020df079 GN |
874 | int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); |
875 | int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); | |
2d3ad1f4 AK |
876 | unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); |
877 | void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); | |
54f1585a | 878 | void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); |
2acf923e | 879 | int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr); |
54f1585a ZX |
880 | |
881 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); | |
8fe8ab46 | 882 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); |
54f1585a | 883 | |
91586a3b JK |
884 | unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); |
885 | void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); | |
022cd0e8 | 886 | bool kvm_rdpmc(struct kvm_vcpu *vcpu); |
91586a3b | 887 | |
298101da AK |
888 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); |
889 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | |
ce7ddec4 JR |
890 | void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); |
891 | void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | |
6389ee94 | 892 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); |
ec92fe44 JR |
893 | int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
894 | gfn_t gfn, void *data, int offset, int len, | |
895 | u32 access); | |
6389ee94 | 896 | void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); |
0a79b009 | 897 | bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); |
298101da | 898 | |
1a577b72 MT |
899 | static inline int __kvm_irq_line_state(unsigned long *irq_state, |
900 | int irq_source_id, int level) | |
901 | { | |
902 | /* Logical OR for level trig interrupt */ | |
903 | if (level) | |
904 | __set_bit(irq_source_id, irq_state); | |
905 | else | |
906 | __clear_bit(irq_source_id, irq_state); | |
907 | ||
908 | return !!(*irq_state); | |
909 | } | |
910 | ||
911 | int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level); | |
912 | void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id); | |
3de42dc0 | 913 | |
3419ffc8 SY |
914 | void kvm_inject_nmi(struct kvm_vcpu *vcpu); |
915 | ||
10ab25cd | 916 | int fx_init(struct kvm_vcpu *vcpu); |
54f1585a | 917 | |
d835dfec | 918 | void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); |
54f1585a | 919 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, |
f57f2ef5 | 920 | const u8 *new, int bytes); |
1cb3f3ae | 921 | int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn); |
54f1585a ZX |
922 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); |
923 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); | |
924 | int kvm_mmu_load(struct kvm_vcpu *vcpu); | |
925 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); | |
0ba73cda | 926 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); |
e459e322 | 927 | gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access); |
ab9ae313 AK |
928 | gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, |
929 | struct x86_exception *exception); | |
930 | gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, | |
931 | struct x86_exception *exception); | |
932 | gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, | |
933 | struct x86_exception *exception); | |
934 | gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, | |
935 | struct x86_exception *exception); | |
54f1585a ZX |
936 | |
937 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); | |
938 | ||
dc25e89e AP |
939 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code, |
940 | void *insn, int insn_len); | |
a7052897 | 941 | void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); |
d8d173da | 942 | void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu); |
34c16eec | 943 | |
18552672 | 944 | void kvm_enable_tdp(void); |
5f4cb662 | 945 | void kvm_disable_tdp(void); |
18552672 | 946 | |
e459e322 XG |
947 | static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) |
948 | { | |
949 | return gpa; | |
950 | } | |
951 | ||
ec6d273d ZX |
952 | static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) |
953 | { | |
954 | struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); | |
955 | ||
956 | return (struct kvm_mmu_page *)page_private(page); | |
957 | } | |
958 | ||
d6e88aec | 959 | static inline u16 kvm_read_ldt(void) |
ec6d273d ZX |
960 | { |
961 | u16 ldt; | |
962 | asm("sldt %0" : "=g"(ldt)); | |
963 | return ldt; | |
964 | } | |
965 | ||
d6e88aec | 966 | static inline void kvm_load_ldt(u16 sel) |
ec6d273d ZX |
967 | { |
968 | asm("lldt %0" : : "rm"(sel)); | |
969 | } | |
ec6d273d | 970 | |
ec6d273d ZX |
971 | #ifdef CONFIG_X86_64 |
972 | static inline unsigned long read_msr(unsigned long msr) | |
973 | { | |
974 | u64 value; | |
975 | ||
976 | rdmsrl(msr, value); | |
977 | return value; | |
978 | } | |
979 | #endif | |
980 | ||
ec6d273d ZX |
981 | static inline u32 get_rdx_init_val(void) |
982 | { | |
983 | return 0x600; /* P6 family */ | |
984 | } | |
985 | ||
c1a5d4f9 AK |
986 | static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) |
987 | { | |
988 | kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); | |
989 | } | |
990 | ||
ec6d273d ZX |
991 | #define TSS_IOPB_BASE_OFFSET 0x66 |
992 | #define TSS_BASE_SIZE 0x68 | |
993 | #define TSS_IOPB_SIZE (65536 / 8) | |
994 | #define TSS_REDIRECTION_SIZE (256 / 8) | |
7d76b4d3 JP |
995 | #define RMODE_TSS_SIZE \ |
996 | (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) | |
53e0aa7b | 997 | |
37817f29 IE |
998 | enum { |
999 | TASK_SWITCH_CALL = 0, | |
1000 | TASK_SWITCH_IRET = 1, | |
1001 | TASK_SWITCH_JMP = 2, | |
1002 | TASK_SWITCH_GATE = 3, | |
1003 | }; | |
1004 | ||
1371d904 | 1005 | #define HF_GIF_MASK (1 << 0) |
3d6368ef AG |
1006 | #define HF_HIF_MASK (1 << 1) |
1007 | #define HF_VINTR_MASK (1 << 2) | |
95ba8273 | 1008 | #define HF_NMI_MASK (1 << 3) |
44c11430 | 1009 | #define HF_IRET_MASK (1 << 4) |
ec9e60b2 | 1010 | #define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */ |
1371d904 | 1011 | |
4ecac3fd AK |
1012 | /* |
1013 | * Hardware virtualization extension instructions may fault if a | |
1014 | * reboot turns off virtualization while processes are running. | |
1015 | * Trap the fault and ignore the instruction if that happens. | |
1016 | */ | |
b7c4145b | 1017 | asmlinkage void kvm_spurious_fault(void); |
4ecac3fd | 1018 | |
5e520e62 | 1019 | #define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \ |
4ecac3fd | 1020 | "666: " insn "\n\t" \ |
b7c4145b | 1021 | "668: \n\t" \ |
18b13e54 | 1022 | ".pushsection .fixup, \"ax\" \n" \ |
4ecac3fd | 1023 | "667: \n\t" \ |
5e520e62 | 1024 | cleanup_insn "\n\t" \ |
b7c4145b AK |
1025 | "cmpb $0, kvm_rebooting \n\t" \ |
1026 | "jne 668b \n\t" \ | |
8ceed347 | 1027 | __ASM_SIZE(push) " $666b \n\t" \ |
b7c4145b | 1028 | "call kvm_spurious_fault \n\t" \ |
4ecac3fd | 1029 | ".popsection \n\t" \ |
3ee89722 | 1030 | _ASM_EXTABLE(666b, 667b) |
4ecac3fd | 1031 | |
5e520e62 AK |
1032 | #define __kvm_handle_fault_on_reboot(insn) \ |
1033 | ____kvm_handle_fault_on_reboot(insn, "") | |
1034 | ||
e930bffe AA |
1035 | #define KVM_ARCH_WANT_MMU_NOTIFIER |
1036 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | |
b3ae2096 | 1037 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); |
e930bffe | 1038 | int kvm_age_hva(struct kvm *kvm, unsigned long hva); |
8ee53820 | 1039 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); |
3da0dd43 | 1040 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); |
82725b20 | 1041 | int cpuid_maxphyaddr(struct kvm_vcpu *vcpu); |
c7c9c56c | 1042 | int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); |
a1b37100 GN |
1043 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); |
1044 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); | |
0b71785d | 1045 | int kvm_cpu_get_interrupt(struct kvm_vcpu *v); |
66450a21 | 1046 | void kvm_vcpu_reset(struct kvm_vcpu *vcpu); |
e930bffe | 1047 | |
18863bdd | 1048 | void kvm_define_shared_msr(unsigned index, u32 msr); |
d5696725 | 1049 | void kvm_set_shared_msr(unsigned index, u64 val, u64 mask); |
18863bdd | 1050 | |
f92653ee JK |
1051 | bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); |
1052 | ||
af585b92 GN |
1053 | void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, |
1054 | struct kvm_async_pf *work); | |
1055 | void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, | |
1056 | struct kvm_async_pf *work); | |
56028d08 GN |
1057 | void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, |
1058 | struct kvm_async_pf *work); | |
7c90705b | 1059 | bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu); |
af585b92 GN |
1060 | extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); |
1061 | ||
db8fcefa AP |
1062 | void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); |
1063 | ||
f5132b01 GN |
1064 | int kvm_is_in_guest(void); |
1065 | ||
1066 | void kvm_pmu_init(struct kvm_vcpu *vcpu); | |
1067 | void kvm_pmu_destroy(struct kvm_vcpu *vcpu); | |
1068 | void kvm_pmu_reset(struct kvm_vcpu *vcpu); | |
1069 | void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu); | |
1070 | bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr); | |
1071 | int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data); | |
afd80d85 | 1072 | int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
67f4d428 | 1073 | int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc); |
f5132b01 GN |
1074 | int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); |
1075 | void kvm_handle_pmu_event(struct kvm_vcpu *vcpu); | |
1076 | void kvm_deliver_pmi(struct kvm_vcpu *vcpu); | |
1077 | ||
1965aae3 | 1078 | #endif /* _ASM_X86_KVM_HOST_H */ |