]>
Commit | Line | Data |
---|---|---|
a656c8ef | 1 | /* |
043405e1 CO |
2 | * Kernel-based Virtual Machine driver for Linux |
3 | * | |
4 | * This header defines architecture specific interfaces, x86 version | |
5 | * | |
6 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
7 | * the COPYING file in the top-level directory. | |
8 | * | |
9 | */ | |
10 | ||
1965aae3 PA |
11 | #ifndef _ASM_X86_KVM_HOST_H |
12 | #define _ASM_X86_KVM_HOST_H | |
043405e1 | 13 | |
34c16eec ZX |
14 | #include <linux/types.h> |
15 | #include <linux/mm.h> | |
e930bffe | 16 | #include <linux/mmu_notifier.h> |
229456fc | 17 | #include <linux/tracepoint.h> |
f5f48ee1 | 18 | #include <linux/cpumask.h> |
f5132b01 | 19 | #include <linux/irq_work.h> |
34c16eec ZX |
20 | |
21 | #include <linux/kvm.h> | |
22 | #include <linux/kvm_para.h> | |
edf88417 | 23 | #include <linux/kvm_types.h> |
f5132b01 | 24 | #include <linux/perf_event.h> |
d828199e MT |
25 | #include <linux/pvclock_gtod.h> |
26 | #include <linux/clocksource.h> | |
34c16eec | 27 | |
50d0a0f9 | 28 | #include <asm/pvclock-abi.h> |
e01a1b57 | 29 | #include <asm/desc.h> |
0bed3b56 | 30 | #include <asm/mtrr.h> |
9962d032 | 31 | #include <asm/msr-index.h> |
3ee89722 | 32 | #include <asm/asm.h> |
e01a1b57 | 33 | |
cbf64358 | 34 | #define KVM_MAX_VCPUS 255 |
a59cb29e | 35 | #define KVM_SOFT_MAX_VCPUS 160 |
1d4e7e3c | 36 | #define KVM_USER_MEM_SLOTS 509 |
0743247f AW |
37 | /* memory slots that are not exposed to userspace */ |
38 | #define KVM_PRIVATE_MEM_SLOTS 3 | |
bbacc0c1 | 39 | #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) |
93a5cef0 | 40 | |
69a9f69b | 41 | #define KVM_PIO_PAGE_OFFSET 1 |
542472b5 | 42 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 2 |
69a9f69b | 43 | |
8175e5b7 AG |
44 | #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS |
45 | ||
cfec82cb JR |
46 | #define CR0_RESERVED_BITS \ |
47 | (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ | |
48 | | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ | |
49 | | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG)) | |
50 | ||
346874c9 | 51 | #define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL |
cfaa790a | 52 | #define CR3_PCID_INVD BIT_64(63) |
cfec82cb JR |
53 | #define CR4_RESERVED_BITS \ |
54 | (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ | |
55 | | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ | |
ad756a16 | 56 | | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ |
afcbf13f | 57 | | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \ |
56d6efc2 | 58 | | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE | X86_CR4_SMAP)) |
cfec82cb JR |
59 | |
60 | #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) | |
61 | ||
62 | ||
cd6e8f87 | 63 | |
cd6e8f87 | 64 | #define INVALID_PAGE (~(hpa_t)0) |
dd180b3e XG |
65 | #define VALID_PAGE(x) ((x) != INVALID_PAGE) |
66 | ||
cd6e8f87 ZX |
67 | #define UNMAPPED_GVA (~(gpa_t)0) |
68 | ||
ec04b260 | 69 | /* KVM Hugepage definitions for x86 */ |
04326caa | 70 | #define KVM_NR_PAGE_SIZES 3 |
82855413 JR |
71 | #define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9) |
72 | #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x)) | |
ec04b260 JR |
73 | #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) |
74 | #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) | |
75 | #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) | |
05da4558 | 76 | |
6d9d41e5 CD |
77 | static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) |
78 | { | |
79 | /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */ | |
80 | return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - | |
81 | (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); | |
82 | } | |
83 | ||
cd6e8f87 ZX |
84 | #define SELECTOR_TI_MASK (1 << 2) |
85 | #define SELECTOR_RPL_MASK 0x03 | |
86 | ||
87 | #define IOPL_SHIFT 12 | |
88 | ||
d657a98e ZX |
89 | #define KVM_PERMILLE_MMU_PAGES 20 |
90 | #define KVM_MIN_ALLOC_MMU_PAGES 64 | |
1ae0a13d DE |
91 | #define KVM_MMU_HASH_SHIFT 10 |
92 | #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) | |
d657a98e ZX |
93 | #define KVM_MIN_FREE_MMU_PAGES 5 |
94 | #define KVM_REFILL_PAGES 25 | |
73c1160c | 95 | #define KVM_MAX_CPUID_ENTRIES 80 |
0bed3b56 | 96 | #define KVM_NR_FIXED_MTRR_REGION 88 |
0d234daf | 97 | #define KVM_NR_VAR_MTRR 8 |
d657a98e | 98 | |
af585b92 GN |
99 | #define ASYNC_PF_PER_VCPU 64 |
100 | ||
5fdbf976 | 101 | enum kvm_reg { |
2b3ccfa0 ZX |
102 | VCPU_REGS_RAX = 0, |
103 | VCPU_REGS_RCX = 1, | |
104 | VCPU_REGS_RDX = 2, | |
105 | VCPU_REGS_RBX = 3, | |
106 | VCPU_REGS_RSP = 4, | |
107 | VCPU_REGS_RBP = 5, | |
108 | VCPU_REGS_RSI = 6, | |
109 | VCPU_REGS_RDI = 7, | |
110 | #ifdef CONFIG_X86_64 | |
111 | VCPU_REGS_R8 = 8, | |
112 | VCPU_REGS_R9 = 9, | |
113 | VCPU_REGS_R10 = 10, | |
114 | VCPU_REGS_R11 = 11, | |
115 | VCPU_REGS_R12 = 12, | |
116 | VCPU_REGS_R13 = 13, | |
117 | VCPU_REGS_R14 = 14, | |
118 | VCPU_REGS_R15 = 15, | |
119 | #endif | |
5fdbf976 | 120 | VCPU_REGS_RIP, |
2b3ccfa0 ZX |
121 | NR_VCPU_REGS |
122 | }; | |
123 | ||
6de4f3ad AK |
124 | enum kvm_reg_ex { |
125 | VCPU_EXREG_PDPTR = NR_VCPU_REGS, | |
aff48baa | 126 | VCPU_EXREG_CR3, |
6de12732 | 127 | VCPU_EXREG_RFLAGS, |
2fb92db1 | 128 | VCPU_EXREG_SEGMENTS, |
6de4f3ad AK |
129 | }; |
130 | ||
2b3ccfa0 | 131 | enum { |
81609e3e | 132 | VCPU_SREG_ES, |
2b3ccfa0 | 133 | VCPU_SREG_CS, |
81609e3e | 134 | VCPU_SREG_SS, |
2b3ccfa0 | 135 | VCPU_SREG_DS, |
2b3ccfa0 ZX |
136 | VCPU_SREG_FS, |
137 | VCPU_SREG_GS, | |
2b3ccfa0 ZX |
138 | VCPU_SREG_TR, |
139 | VCPU_SREG_LDTR, | |
140 | }; | |
141 | ||
56e82318 | 142 | #include <asm/kvm_emulate.h> |
2b3ccfa0 | 143 | |
d657a98e ZX |
144 | #define KVM_NR_MEM_OBJS 40 |
145 | ||
42dbaa5a JK |
146 | #define KVM_NR_DB_REGS 4 |
147 | ||
148 | #define DR6_BD (1 << 13) | |
149 | #define DR6_BS (1 << 14) | |
6f43ed01 NA |
150 | #define DR6_RTM (1 << 16) |
151 | #define DR6_FIXED_1 0xfffe0ff0 | |
152 | #define DR6_INIT 0xffff0ff0 | |
153 | #define DR6_VOLATILE 0x0001e00f | |
42dbaa5a JK |
154 | |
155 | #define DR7_BP_EN_MASK 0x000000ff | |
156 | #define DR7_GE (1 << 9) | |
157 | #define DR7_GD (1 << 13) | |
158 | #define DR7_FIXED_1 0x00000400 | |
6f43ed01 | 159 | #define DR7_VOLATILE 0xffff2bff |
42dbaa5a | 160 | |
c205fb7d NA |
161 | #define PFERR_PRESENT_BIT 0 |
162 | #define PFERR_WRITE_BIT 1 | |
163 | #define PFERR_USER_BIT 2 | |
164 | #define PFERR_RSVD_BIT 3 | |
165 | #define PFERR_FETCH_BIT 4 | |
166 | ||
167 | #define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT) | |
168 | #define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT) | |
169 | #define PFERR_USER_MASK (1U << PFERR_USER_BIT) | |
170 | #define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT) | |
171 | #define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT) | |
172 | ||
41383771 GN |
173 | /* apic attention bits */ |
174 | #define KVM_APIC_CHECK_VAPIC 0 | |
ae7a2a3f MT |
175 | /* |
176 | * The following bit is set with PV-EOI, unset on EOI. | |
177 | * We detect PV-EOI changes by guest by comparing | |
178 | * this bit with PV-EOI in guest memory. | |
179 | * See the implementation in apic_update_pv_eoi. | |
180 | */ | |
181 | #define KVM_APIC_PV_EOI_PENDING 1 | |
41383771 | 182 | |
d657a98e ZX |
183 | /* |
184 | * We don't want allocation failures within the mmu code, so we preallocate | |
185 | * enough memory for a single page fault in a cache. | |
186 | */ | |
187 | struct kvm_mmu_memory_cache { | |
188 | int nobjs; | |
189 | void *objects[KVM_NR_MEM_OBJS]; | |
190 | }; | |
191 | ||
d657a98e ZX |
192 | /* |
193 | * kvm_mmu_page_role, below, is defined as: | |
194 | * | |
195 | * bits 0:3 - total guest paging levels (2-4, or zero for real mode) | |
196 | * bits 4:7 - page table level for this shadow (1-4) | |
197 | * bits 8:9 - page table quadrant for 2-level guests | |
f6e2c02b AK |
198 | * bit 16 - direct mapping of virtual to physical mapping at gfn |
199 | * used for real mode and two-dimensional paging | |
d657a98e ZX |
200 | * bits 17:19 - common access permissions for all ptes in this shadow page |
201 | */ | |
202 | union kvm_mmu_page_role { | |
203 | unsigned word; | |
204 | struct { | |
7d76b4d3 | 205 | unsigned level:4; |
5b7e0102 | 206 | unsigned cr4_pae:1; |
7d76b4d3 JP |
207 | unsigned quadrant:2; |
208 | unsigned pad_for_nice_hex_output:6; | |
f6e2c02b | 209 | unsigned direct:1; |
7d76b4d3 | 210 | unsigned access:3; |
2e53d63a | 211 | unsigned invalid:1; |
9645bb56 | 212 | unsigned nxe:1; |
3dbe1415 | 213 | unsigned cr0_wp:1; |
411c588d | 214 | unsigned smep_andnot_wp:1; |
d657a98e ZX |
215 | }; |
216 | }; | |
217 | ||
218 | struct kvm_mmu_page { | |
219 | struct list_head link; | |
220 | struct hlist_node hash_link; | |
221 | ||
222 | /* | |
223 | * The following two entries are used to key the shadow page in the | |
224 | * hash table. | |
225 | */ | |
226 | gfn_t gfn; | |
227 | union kvm_mmu_page_role role; | |
228 | ||
229 | u64 *spt; | |
230 | /* hold the gfn of each spte inside spt */ | |
231 | gfn_t *gfns; | |
4731d4c7 | 232 | bool unsync; |
0571d366 | 233 | int root_count; /* Currently serving as active root */ |
60c8aec6 | 234 | unsigned int unsync_children; |
67052b35 | 235 | unsigned long parent_ptes; /* Reverse mapping for parent_pte */ |
f6f8adee XG |
236 | |
237 | /* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen. */ | |
5304b8d3 | 238 | unsigned long mmu_valid_gen; |
f6f8adee | 239 | |
0074ff63 | 240 | DECLARE_BITMAP(unsync_child_bitmap, 512); |
c2a2ac2b XG |
241 | |
242 | #ifdef CONFIG_X86_32 | |
accaefe0 XG |
243 | /* |
244 | * Used out of the mmu-lock to avoid reading spte values while an | |
245 | * update is in progress; see the comments in __get_spte_lockless(). | |
246 | */ | |
c2a2ac2b XG |
247 | int clear_spte_count; |
248 | #endif | |
249 | ||
0cbf8e43 | 250 | /* Number of writes since the last time traversal visited this page. */ |
a30f47cb | 251 | int write_flooding_count; |
d657a98e ZX |
252 | }; |
253 | ||
1c08364c AK |
254 | struct kvm_pio_request { |
255 | unsigned long count; | |
1c08364c AK |
256 | int in; |
257 | int port; | |
258 | int size; | |
1c08364c AK |
259 | }; |
260 | ||
d657a98e ZX |
261 | /* |
262 | * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level | |
263 | * 32-bit). The kvm_mmu structure abstracts the details of the current mmu | |
264 | * mode. | |
265 | */ | |
266 | struct kvm_mmu { | |
f43addd4 | 267 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); |
5777ed34 | 268 | unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); |
e4e517b4 | 269 | u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index); |
78b2c54a XG |
270 | int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, |
271 | bool prefault); | |
6389ee94 AK |
272 | void (*inject_page_fault)(struct kvm_vcpu *vcpu, |
273 | struct x86_exception *fault); | |
1871c602 | 274 | gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, |
ab9ae313 | 275 | struct x86_exception *exception); |
54987b7a PB |
276 | gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, |
277 | struct x86_exception *exception); | |
e8bc217a | 278 | int (*sync_page)(struct kvm_vcpu *vcpu, |
a4a8e6f7 | 279 | struct kvm_mmu_page *sp); |
a7052897 | 280 | void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); |
0f53b5b1 | 281 | void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
7c562522 | 282 | u64 *spte, const void *pte); |
d657a98e ZX |
283 | hpa_t root_hpa; |
284 | int root_level; | |
285 | int shadow_root_level; | |
a770f6f2 | 286 | union kvm_mmu_page_role base_role; |
c5a78f2b | 287 | bool direct_map; |
d657a98e | 288 | |
97d64b78 AK |
289 | /* |
290 | * Bitmap; bit set = permission fault | |
291 | * Byte index: page fault error code [4:1] | |
292 | * Bit index: pte permissions in ACC_* format | |
293 | */ | |
294 | u8 permissions[16]; | |
295 | ||
d657a98e | 296 | u64 *pae_root; |
81407ca5 | 297 | u64 *lm_root; |
82725b20 | 298 | u64 rsvd_bits_mask[2][4]; |
25d92081 | 299 | u64 bad_mt_xwr; |
ff03a073 | 300 | |
6fd01b71 AK |
301 | /* |
302 | * Bitmap: bit set = last pte in walk | |
303 | * index[0:1]: level (zero-based) | |
304 | * index[2]: pte.ps | |
305 | */ | |
306 | u8 last_pte_bitmap; | |
307 | ||
2d48a985 JR |
308 | bool nx; |
309 | ||
ff03a073 | 310 | u64 pdptrs[4]; /* pae */ |
d657a98e ZX |
311 | }; |
312 | ||
f5132b01 GN |
313 | enum pmc_type { |
314 | KVM_PMC_GP = 0, | |
315 | KVM_PMC_FIXED, | |
316 | }; | |
317 | ||
318 | struct kvm_pmc { | |
319 | enum pmc_type type; | |
320 | u8 idx; | |
321 | u64 counter; | |
322 | u64 eventsel; | |
323 | struct perf_event *perf_event; | |
324 | struct kvm_vcpu *vcpu; | |
325 | }; | |
326 | ||
327 | struct kvm_pmu { | |
328 | unsigned nr_arch_gp_counters; | |
329 | unsigned nr_arch_fixed_counters; | |
330 | unsigned available_event_types; | |
331 | u64 fixed_ctr_ctrl; | |
332 | u64 global_ctrl; | |
333 | u64 global_status; | |
334 | u64 global_ovf_ctrl; | |
335 | u64 counter_bitmask[2]; | |
336 | u64 global_ctrl_mask; | |
103af0a9 | 337 | u64 reserved_bits; |
f5132b01 | 338 | u8 version; |
15c7ad51 RR |
339 | struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC]; |
340 | struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED]; | |
f5132b01 GN |
341 | struct irq_work irq_work; |
342 | u64 reprogram_pmi; | |
343 | }; | |
344 | ||
360b948d PB |
345 | enum { |
346 | KVM_DEBUGREG_BP_ENABLED = 1, | |
c77fb5fe | 347 | KVM_DEBUGREG_WONT_EXIT = 2, |
360b948d PB |
348 | }; |
349 | ||
ad312c7c | 350 | struct kvm_vcpu_arch { |
5fdbf976 MT |
351 | /* |
352 | * rip and regs accesses must go through | |
353 | * kvm_{register,rip}_{read,write} functions. | |
354 | */ | |
355 | unsigned long regs[NR_VCPU_REGS]; | |
356 | u32 regs_avail; | |
357 | u32 regs_dirty; | |
34c16eec ZX |
358 | |
359 | unsigned long cr0; | |
e8467fda | 360 | unsigned long cr0_guest_owned_bits; |
34c16eec ZX |
361 | unsigned long cr2; |
362 | unsigned long cr3; | |
363 | unsigned long cr4; | |
fc78f519 | 364 | unsigned long cr4_guest_owned_bits; |
34c16eec | 365 | unsigned long cr8; |
1371d904 | 366 | u32 hflags; |
f6801dff | 367 | u64 efer; |
34c16eec ZX |
368 | u64 apic_base; |
369 | struct kvm_lapic *apic; /* kernel irqchip context */ | |
41383771 | 370 | unsigned long apic_attention; |
e1035715 | 371 | int32_t apic_arb_prio; |
34c16eec | 372 | int mp_state; |
34c16eec | 373 | u64 ia32_misc_enable_msr; |
b209749f | 374 | bool tpr_access_reporting; |
20300099 | 375 | u64 ia32_xss; |
34c16eec | 376 | |
14dfe855 JR |
377 | /* |
378 | * Paging state of the vcpu | |
379 | * | |
380 | * If the vcpu runs in guest mode with two level paging this still saves | |
381 | * the paging mode of the l1 guest. This context is always used to | |
382 | * handle faults. | |
383 | */ | |
34c16eec | 384 | struct kvm_mmu mmu; |
8df25a32 | 385 | |
6539e738 JR |
386 | /* |
387 | * Paging state of an L2 guest (used for nested npt) | |
388 | * | |
389 | * This context will save all necessary information to walk page tables | |
390 | * of the an L2 guest. This context is only initialized for page table | |
391 | * walking and not for faulting since we never handle l2 page faults on | |
392 | * the host. | |
393 | */ | |
394 | struct kvm_mmu nested_mmu; | |
395 | ||
14dfe855 JR |
396 | /* |
397 | * Pointer to the mmu context currently used for | |
398 | * gva_to_gpa translations. | |
399 | */ | |
400 | struct kvm_mmu *walk_mmu; | |
401 | ||
53c07b18 | 402 | struct kvm_mmu_memory_cache mmu_pte_list_desc_cache; |
34c16eec ZX |
403 | struct kvm_mmu_memory_cache mmu_page_cache; |
404 | struct kvm_mmu_memory_cache mmu_page_header_cache; | |
405 | ||
98918833 | 406 | struct fpu guest_fpu; |
2acf923e | 407 | u64 xcr0; |
d7876f1b | 408 | u64 guest_supported_xcr0; |
4344ee98 | 409 | u32 guest_xstate_size; |
34c16eec | 410 | |
34c16eec ZX |
411 | struct kvm_pio_request pio; |
412 | void *pio_data; | |
413 | ||
66fd3f7f GN |
414 | u8 event_exit_inst_len; |
415 | ||
298101da AK |
416 | struct kvm_queued_exception { |
417 | bool pending; | |
418 | bool has_error_code; | |
ce7ddec4 | 419 | bool reinject; |
298101da AK |
420 | u8 nr; |
421 | u32 error_code; | |
422 | } exception; | |
423 | ||
937a7eae AK |
424 | struct kvm_queued_interrupt { |
425 | bool pending; | |
66fd3f7f | 426 | bool soft; |
937a7eae AK |
427 | u8 nr; |
428 | } interrupt; | |
429 | ||
34c16eec ZX |
430 | int halt_request; /* real mode on Intel only */ |
431 | ||
432 | int cpuid_nent; | |
07716717 | 433 | struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES]; |
34c16eec ZX |
434 | /* emulate context */ |
435 | ||
436 | struct x86_emulate_ctxt emulate_ctxt; | |
7ae441ea GN |
437 | bool emulate_regs_need_sync_to_vcpu; |
438 | bool emulate_regs_need_sync_from_vcpu; | |
716d51ab | 439 | int (*complete_userspace_io)(struct kvm_vcpu *vcpu); |
18068523 GOC |
440 | |
441 | gpa_t time; | |
50d0a0f9 | 442 | struct pvclock_vcpu_time_info hv_clock; |
e48672fa | 443 | unsigned int hw_tsc_khz; |
0b79459b AH |
444 | struct gfn_to_hva_cache pv_time; |
445 | bool pv_time_enabled; | |
51d59c6b MT |
446 | /* set guest stopped flag in pvclock flags field */ |
447 | bool pvclock_set_guest_stopped_request; | |
c9aaa895 GC |
448 | |
449 | struct { | |
450 | u64 msr_val; | |
451 | u64 last_steal; | |
452 | u64 accum_steal; | |
453 | struct gfn_to_hva_cache stime; | |
454 | struct kvm_steal_time steal; | |
455 | } st; | |
456 | ||
1d5f066e | 457 | u64 last_guest_tsc; |
6f526ec5 | 458 | u64 last_host_tsc; |
0dd6a6ed | 459 | u64 tsc_offset_adjustment; |
e26101b1 ZA |
460 | u64 this_tsc_nsec; |
461 | u64 this_tsc_write; | |
0d3da0d2 | 462 | u64 this_tsc_generation; |
c285545f | 463 | bool tsc_catchup; |
cc578287 ZA |
464 | bool tsc_always_catchup; |
465 | s8 virtual_tsc_shift; | |
466 | u32 virtual_tsc_mult; | |
467 | u32 virtual_tsc_khz; | |
ba904635 | 468 | s64 ia32_tsc_adjust_msr; |
3419ffc8 | 469 | |
7460fb4a AK |
470 | atomic_t nmi_queued; /* unprocessed asynchronous NMIs */ |
471 | unsigned nmi_pending; /* NMI queued after currently running handler */ | |
472 | bool nmi_injected; /* Trying to inject an NMI this entry */ | |
9ba075a6 | 473 | |
0bed3b56 | 474 | struct mtrr_state_type mtrr_state; |
7cb060a9 | 475 | u64 pat; |
42dbaa5a | 476 | |
360b948d | 477 | unsigned switch_db_regs; |
42dbaa5a JK |
478 | unsigned long db[KVM_NR_DB_REGS]; |
479 | unsigned long dr6; | |
480 | unsigned long dr7; | |
481 | unsigned long eff_db[KVM_NR_DB_REGS]; | |
c8639010 | 482 | unsigned long guest_debug_dr7; |
890ca9ae YH |
483 | |
484 | u64 mcg_cap; | |
485 | u64 mcg_status; | |
486 | u64 mcg_ctl; | |
487 | u64 *mce_banks; | |
94fe45da | 488 | |
bebb106a XG |
489 | /* Cache MMIO info */ |
490 | u64 mmio_gva; | |
491 | unsigned access; | |
492 | gfn_t mmio_gfn; | |
56f17dd3 | 493 | u64 mmio_gen; |
bebb106a | 494 | |
f5132b01 GN |
495 | struct kvm_pmu pmu; |
496 | ||
94fe45da | 497 | /* used for guest single stepping over the given code position */ |
94fe45da | 498 | unsigned long singlestep_rip; |
f92653ee | 499 | |
10388a07 GN |
500 | /* fields used by HYPER-V emulation */ |
501 | u64 hv_vapic; | |
f5f48ee1 SY |
502 | |
503 | cpumask_var_t wbinvd_dirty_mask; | |
af585b92 | 504 | |
1cb3f3ae XG |
505 | unsigned long last_retry_eip; |
506 | unsigned long last_retry_addr; | |
507 | ||
af585b92 GN |
508 | struct { |
509 | bool halted; | |
510 | gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)]; | |
344d9588 GN |
511 | struct gfn_to_hva_cache data; |
512 | u64 msr_val; | |
7c90705b | 513 | u32 id; |
6adba527 | 514 | bool send_user_only; |
af585b92 | 515 | } apf; |
2b036c6b BO |
516 | |
517 | /* OSVW MSRs (AMD only) */ | |
518 | struct { | |
519 | u64 length; | |
520 | u64 status; | |
521 | } osvw; | |
ae7a2a3f MT |
522 | |
523 | struct { | |
524 | u64 msr_val; | |
525 | struct gfn_to_hva_cache data; | |
526 | } pv_eoi; | |
93c05d3e XG |
527 | |
528 | /* | |
529 | * Indicate whether the access faults on its page table in guest | |
530 | * which is set when fix page fault and used to detect unhandeable | |
531 | * instruction. | |
532 | */ | |
533 | bool write_fault_to_shadow_pgtable; | |
25d92081 YZ |
534 | |
535 | /* set at EPT violation at this point */ | |
536 | unsigned long exit_qualification; | |
6aef266c SV |
537 | |
538 | /* pv related host specific info */ | |
539 | struct { | |
540 | bool pv_unhalted; | |
541 | } pv; | |
34c16eec ZX |
542 | }; |
543 | ||
db3fe4eb | 544 | struct kvm_lpage_info { |
db3fe4eb TY |
545 | int write_count; |
546 | }; | |
547 | ||
548 | struct kvm_arch_memory_slot { | |
d89cc617 | 549 | unsigned long *rmap[KVM_NR_PAGE_SIZES]; |
db3fe4eb TY |
550 | struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; |
551 | }; | |
552 | ||
1e08ec4a GN |
553 | struct kvm_apic_map { |
554 | struct rcu_head rcu; | |
555 | u8 ldr_bits; | |
556 | /* fields bellow are used to decode ldr values in different modes */ | |
394457a9 | 557 | u32 cid_shift, cid_mask, lid_mask, broadcast; |
1e08ec4a GN |
558 | struct kvm_lapic *phys_map[256]; |
559 | /* first index is cluster id second is cpu id in a cluster */ | |
560 | struct kvm_lapic *logical_map[16][16]; | |
561 | }; | |
562 | ||
fef9cce0 | 563 | struct kvm_arch { |
49d5ca26 | 564 | unsigned int n_used_mmu_pages; |
f05e70ac | 565 | unsigned int n_requested_mmu_pages; |
39de71ec | 566 | unsigned int n_max_mmu_pages; |
332b207d | 567 | unsigned int indirect_shadow_pages; |
5304b8d3 | 568 | unsigned long mmu_valid_gen; |
f05e70ac ZX |
569 | struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; |
570 | /* | |
571 | * Hash table of struct kvm_mmu_page. | |
572 | */ | |
573 | struct list_head active_mmu_pages; | |
365c8868 XG |
574 | struct list_head zapped_obsolete_pages; |
575 | ||
4d5c5d0f | 576 | struct list_head assigned_dev_head; |
19de40a8 | 577 | struct iommu_domain *iommu_domain; |
d96eb2c6 | 578 | bool iommu_noncoherent; |
e0f0bbc5 AW |
579 | #define __KVM_HAVE_ARCH_NONCOHERENT_DMA |
580 | atomic_t noncoherent_dma_count; | |
d7deeeb0 ZX |
581 | struct kvm_pic *vpic; |
582 | struct kvm_ioapic *vioapic; | |
7837699f | 583 | struct kvm_pit *vpit; |
cc6e462c | 584 | int vapics_in_nmi_mode; |
1e08ec4a GN |
585 | struct mutex apic_map_lock; |
586 | struct kvm_apic_map *apic_map; | |
bfc6d222 | 587 | |
bfc6d222 | 588 | unsigned int tss_addr; |
c24ae0dc | 589 | bool apic_access_page_done; |
18068523 GOC |
590 | |
591 | gpa_t wall_clock; | |
b7ebfb05 | 592 | |
b7ebfb05 | 593 | bool ept_identity_pagetable_done; |
b927a3ce | 594 | gpa_t ept_identity_map_addr; |
5550af4d SY |
595 | |
596 | unsigned long irq_sources_bitmap; | |
afbcf7ab | 597 | s64 kvmclock_offset; |
038f8c11 | 598 | raw_spinlock_t tsc_write_lock; |
f38e098f | 599 | u64 last_tsc_nsec; |
f38e098f | 600 | u64 last_tsc_write; |
5d3cb0f6 | 601 | u32 last_tsc_khz; |
e26101b1 ZA |
602 | u64 cur_tsc_nsec; |
603 | u64 cur_tsc_write; | |
604 | u64 cur_tsc_offset; | |
0d3da0d2 | 605 | u64 cur_tsc_generation; |
b48aa97e | 606 | int nr_vcpus_matched_tsc; |
ffde22ac | 607 | |
d828199e MT |
608 | spinlock_t pvclock_gtod_sync_lock; |
609 | bool use_master_clock; | |
610 | u64 master_kernel_ns; | |
611 | cycle_t master_cycle_now; | |
7e44e449 | 612 | struct delayed_work kvmclock_update_work; |
332967a3 | 613 | struct delayed_work kvmclock_sync_work; |
d828199e | 614 | |
ffde22ac | 615 | struct kvm_xen_hvm_config xen_hvm_config; |
55cd8e5a | 616 | |
6ef768fa PB |
617 | /* reads protected by irq_srcu, writes by irq_lock */ |
618 | struct hlist_head mask_notifier_list; | |
619 | ||
55cd8e5a GN |
620 | /* fields used by HYPER-V emulation */ |
621 | u64 hv_guest_os_id; | |
622 | u64 hv_hypercall; | |
e984097b | 623 | u64 hv_tsc_page; |
b034cf01 XG |
624 | |
625 | #ifdef CONFIG_KVM_MMU_AUDIT | |
626 | int audit_point; | |
627 | #endif | |
54750f2c MT |
628 | |
629 | bool boot_vcpu_runs_old_kvmclock; | |
d69fb81f ZX |
630 | }; |
631 | ||
0711456c ZX |
632 | struct kvm_vm_stat { |
633 | u32 mmu_shadow_zapped; | |
634 | u32 mmu_pte_write; | |
635 | u32 mmu_pte_updated; | |
636 | u32 mmu_pde_zapped; | |
637 | u32 mmu_flooded; | |
638 | u32 mmu_recycled; | |
dfc5aa00 | 639 | u32 mmu_cache_miss; |
4731d4c7 | 640 | u32 mmu_unsync; |
0711456c | 641 | u32 remote_tlb_flush; |
05da4558 | 642 | u32 lpages; |
0711456c ZX |
643 | }; |
644 | ||
77b4c255 ZX |
645 | struct kvm_vcpu_stat { |
646 | u32 pf_fixed; | |
647 | u32 pf_guest; | |
648 | u32 tlb_flush; | |
649 | u32 invlpg; | |
650 | ||
651 | u32 exits; | |
652 | u32 io_exits; | |
653 | u32 mmio_exits; | |
654 | u32 signal_exits; | |
655 | u32 irq_window_exits; | |
f08864b4 | 656 | u32 nmi_window_exits; |
77b4c255 ZX |
657 | u32 halt_exits; |
658 | u32 halt_wakeup; | |
659 | u32 request_irq_exits; | |
660 | u32 irq_exits; | |
661 | u32 host_state_reload; | |
662 | u32 efer_reload; | |
663 | u32 fpu_reload; | |
664 | u32 insn_emulation; | |
665 | u32 insn_emulation_fail; | |
f11c3a8d | 666 | u32 hypercalls; |
fa89a817 | 667 | u32 irq_injections; |
c4abb7c9 | 668 | u32 nmi_injections; |
77b4c255 | 669 | }; |
ad312c7c | 670 | |
8a76d7f2 JR |
671 | struct x86_instruction_info; |
672 | ||
8fe8ab46 WA |
673 | struct msr_data { |
674 | bool host_initiated; | |
675 | u32 index; | |
676 | u64 data; | |
677 | }; | |
678 | ||
cb5281a5 PB |
679 | struct kvm_lapic_irq { |
680 | u32 vector; | |
681 | u32 delivery_mode; | |
682 | u32 dest_mode; | |
683 | u32 level; | |
684 | u32 trig_mode; | |
685 | u32 shorthand; | |
686 | u32 dest_id; | |
687 | }; | |
688 | ||
ea4a5ff8 ZX |
689 | struct kvm_x86_ops { |
690 | int (*cpu_has_kvm_support)(void); /* __init */ | |
691 | int (*disabled_by_bios)(void); /* __init */ | |
13a34e06 RK |
692 | int (*hardware_enable)(void); |
693 | void (*hardware_disable)(void); | |
ea4a5ff8 ZX |
694 | void (*check_processor_compatibility)(void *rtn); |
695 | int (*hardware_setup)(void); /* __init */ | |
696 | void (*hardware_unsetup)(void); /* __exit */ | |
774ead3a | 697 | bool (*cpu_has_accelerated_tpr)(void); |
0e851880 | 698 | void (*cpuid_update)(struct kvm_vcpu *vcpu); |
ea4a5ff8 ZX |
699 | |
700 | /* Create, but do not attach this VCPU */ | |
701 | struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); | |
702 | void (*vcpu_free)(struct kvm_vcpu *vcpu); | |
57f252f2 | 703 | void (*vcpu_reset)(struct kvm_vcpu *vcpu); |
ea4a5ff8 ZX |
704 | |
705 | void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); | |
706 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); | |
707 | void (*vcpu_put)(struct kvm_vcpu *vcpu); | |
ea4a5ff8 | 708 | |
c8639010 | 709 | void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu); |
ea4a5ff8 | 710 | int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); |
8fe8ab46 | 711 | int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); |
ea4a5ff8 ZX |
712 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); |
713 | void (*get_segment)(struct kvm_vcpu *vcpu, | |
714 | struct kvm_segment *var, int seg); | |
2e4d2653 | 715 | int (*get_cpl)(struct kvm_vcpu *vcpu); |
ea4a5ff8 ZX |
716 | void (*set_segment)(struct kvm_vcpu *vcpu, |
717 | struct kvm_segment *var, int seg); | |
718 | void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); | |
e8467fda | 719 | void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu); |
aff48baa | 720 | void (*decache_cr3)(struct kvm_vcpu *vcpu); |
ea4a5ff8 ZX |
721 | void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); |
722 | void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); | |
723 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); | |
5e1746d6 | 724 | int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); |
ea4a5ff8 | 725 | void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); |
89a27f4d GN |
726 | void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); |
727 | void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); | |
728 | void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); | |
729 | void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); | |
73aaf249 JK |
730 | u64 (*get_dr6)(struct kvm_vcpu *vcpu); |
731 | void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value); | |
c77fb5fe | 732 | void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu); |
020df079 | 733 | void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value); |
5fdbf976 | 734 | void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); |
ea4a5ff8 ZX |
735 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); |
736 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); | |
02daab21 | 737 | void (*fpu_deactivate)(struct kvm_vcpu *vcpu); |
ea4a5ff8 ZX |
738 | |
739 | void (*tlb_flush)(struct kvm_vcpu *vcpu); | |
ea4a5ff8 | 740 | |
851ba692 AK |
741 | void (*run)(struct kvm_vcpu *vcpu); |
742 | int (*handle_exit)(struct kvm_vcpu *vcpu); | |
ea4a5ff8 | 743 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); |
2809f5d2 | 744 | void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); |
37ccdcbe | 745 | u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu); |
ea4a5ff8 ZX |
746 | void (*patch_hypercall)(struct kvm_vcpu *vcpu, |
747 | unsigned char *hypercall_addr); | |
66fd3f7f | 748 | void (*set_irq)(struct kvm_vcpu *vcpu); |
95ba8273 | 749 | void (*set_nmi)(struct kvm_vcpu *vcpu); |
298101da | 750 | void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, |
ce7ddec4 JR |
751 | bool has_error_code, u32 error_code, |
752 | bool reinject); | |
b463a6f7 | 753 | void (*cancel_injection)(struct kvm_vcpu *vcpu); |
78646121 | 754 | int (*interrupt_allowed)(struct kvm_vcpu *vcpu); |
95ba8273 | 755 | int (*nmi_allowed)(struct kvm_vcpu *vcpu); |
3cfc3092 JK |
756 | bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); |
757 | void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked); | |
c9a7953f JK |
758 | void (*enable_nmi_window)(struct kvm_vcpu *vcpu); |
759 | void (*enable_irq_window)(struct kvm_vcpu *vcpu); | |
95ba8273 | 760 | void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); |
c7c9c56c YZ |
761 | int (*vm_has_apicv)(struct kvm *kvm); |
762 | void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); | |
763 | void (*hwapic_isr_update)(struct kvm *kvm, int isr); | |
764 | void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); | |
8d14695f | 765 | void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set); |
4256f43f | 766 | void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa); |
a20ed54d YZ |
767 | void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector); |
768 | void (*sync_pir_to_irr)(struct kvm_vcpu *vcpu); | |
ea4a5ff8 | 769 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); |
67253af5 | 770 | int (*get_tdp_level)(void); |
4b12f0de | 771 | u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); |
17cc3935 | 772 | int (*get_lpage_level)(void); |
4e47c7a6 | 773 | bool (*rdtscp_supported)(void); |
ad756a16 | 774 | bool (*invpcid_supported)(void); |
f1e2b260 | 775 | void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host); |
344f414f | 776 | |
1c97f0a0 JR |
777 | void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); |
778 | ||
d4330ef2 JR |
779 | void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry); |
780 | ||
f5f48ee1 SY |
781 | bool (*has_wbinvd_exit)(void); |
782 | ||
cc578287 | 783 | void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale); |
ba904635 | 784 | u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu); |
99e3e30a ZA |
785 | void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); |
786 | ||
857e4099 | 787 | u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc); |
886b470c | 788 | u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc); |
857e4099 | 789 | |
586f9607 | 790 | void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); |
8a76d7f2 JR |
791 | |
792 | int (*check_intercept)(struct kvm_vcpu *vcpu, | |
793 | struct x86_instruction_info *info, | |
794 | enum x86_intercept_stage stage); | |
a547c6db | 795 | void (*handle_external_intr)(struct kvm_vcpu *vcpu); |
da8999d3 | 796 | bool (*mpx_supported)(void); |
55412b2e | 797 | bool (*xsaves_supported)(void); |
b6b8a145 JK |
798 | |
799 | int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); | |
ae97a3b8 RK |
800 | |
801 | void (*sched_in)(struct kvm_vcpu *kvm, int cpu); | |
88178fd4 KH |
802 | |
803 | /* | |
804 | * Arch-specific dirty logging hooks. These hooks are only supposed to | |
805 | * be valid if the specific arch has hardware-accelerated dirty logging | |
806 | * mechanism. Currently only for PML on VMX. | |
807 | * | |
808 | * - slot_enable_log_dirty: | |
809 | * called when enabling log dirty mode for the slot. | |
810 | * - slot_disable_log_dirty: | |
811 | * called when disabling log dirty mode for the slot. | |
812 | * also called when slot is created with log dirty disabled. | |
813 | * - flush_log_dirty: | |
814 | * called before reporting dirty_bitmap to userspace. | |
815 | * - enable_log_dirty_pt_masked: | |
816 | * called when reenabling log dirty for the GFNs in the mask after | |
817 | * corresponding bits are cleared in slot->dirty_bitmap. | |
818 | */ | |
819 | void (*slot_enable_log_dirty)(struct kvm *kvm, | |
820 | struct kvm_memory_slot *slot); | |
821 | void (*slot_disable_log_dirty)(struct kvm *kvm, | |
822 | struct kvm_memory_slot *slot); | |
823 | void (*flush_log_dirty)(struct kvm *kvm); | |
824 | void (*enable_log_dirty_pt_masked)(struct kvm *kvm, | |
825 | struct kvm_memory_slot *slot, | |
826 | gfn_t offset, unsigned long mask); | |
ea4a5ff8 ZX |
827 | }; |
828 | ||
af585b92 | 829 | struct kvm_arch_async_pf { |
7c90705b | 830 | u32 token; |
af585b92 | 831 | gfn_t gfn; |
fb67e14f | 832 | unsigned long cr3; |
c4806acd | 833 | bool direct_map; |
af585b92 GN |
834 | }; |
835 | ||
97896d04 ZX |
836 | extern struct kvm_x86_ops *kvm_x86_ops; |
837 | ||
f1e2b260 MT |
838 | static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, |
839 | s64 adjustment) | |
840 | { | |
841 | kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, false); | |
842 | } | |
843 | ||
844 | static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) | |
845 | { | |
846 | kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, true); | |
847 | } | |
848 | ||
54f1585a ZX |
849 | int kvm_mmu_module_init(void); |
850 | void kvm_mmu_module_exit(void); | |
851 | ||
852 | void kvm_mmu_destroy(struct kvm_vcpu *vcpu); | |
853 | int kvm_mmu_create(struct kvm_vcpu *vcpu); | |
8a3c1a33 | 854 | void kvm_mmu_setup(struct kvm_vcpu *vcpu); |
7b52345e | 855 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, |
4b12f0de | 856 | u64 dirty_mask, u64 nx_mask, u64 x_mask); |
54f1585a | 857 | |
8a3c1a33 | 858 | void kvm_mmu_reset_context(struct kvm_vcpu *vcpu); |
1c91cad4 KH |
859 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, |
860 | struct kvm_memory_slot *memslot); | |
f4b4b180 KH |
861 | void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, |
862 | struct kvm_memory_slot *memslot); | |
863 | void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, | |
864 | struct kvm_memory_slot *memslot); | |
865 | void kvm_mmu_slot_set_dirty(struct kvm *kvm, | |
866 | struct kvm_memory_slot *memslot); | |
867 | void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, | |
868 | struct kvm_memory_slot *slot, | |
869 | gfn_t gfn_offset, unsigned long mask); | |
54f1585a | 870 | void kvm_mmu_zap_all(struct kvm *kvm); |
f8f55942 | 871 | void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm); |
3ad82a7e | 872 | unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); |
54f1585a ZX |
873 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); |
874 | ||
ff03a073 | 875 | int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); |
cc4b6871 | 876 | |
3200f405 | 877 | int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, |
9f811285 | 878 | const void *val, int bytes); |
4b12f0de | 879 | u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); |
2f333bcb | 880 | |
6ef768fa PB |
881 | struct kvm_irq_mask_notifier { |
882 | void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); | |
883 | int irq; | |
884 | struct hlist_node link; | |
885 | }; | |
886 | ||
887 | void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, | |
888 | struct kvm_irq_mask_notifier *kimn); | |
889 | void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, | |
890 | struct kvm_irq_mask_notifier *kimn); | |
891 | void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, | |
892 | bool mask); | |
893 | ||
2f333bcb | 894 | extern bool tdp_enabled; |
9f811285 | 895 | |
a3e06bbe LJ |
896 | u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu); |
897 | ||
92a1f12d JR |
898 | /* control of guest tsc rate supported? */ |
899 | extern bool kvm_has_tsc_control; | |
900 | /* minimum supported tsc_khz for guests */ | |
901 | extern u32 kvm_min_guest_tsc_khz; | |
902 | /* maximum supported tsc_khz for guests */ | |
903 | extern u32 kvm_max_guest_tsc_khz; | |
904 | ||
54f1585a | 905 | enum emulation_result { |
ac0a48c3 PB |
906 | EMULATE_DONE, /* no further processing */ |
907 | EMULATE_USER_EXIT, /* kvm_run ready for userspace exit */ | |
54f1585a ZX |
908 | EMULATE_FAIL, /* can't emulate this instruction */ |
909 | }; | |
910 | ||
571008da SY |
911 | #define EMULTYPE_NO_DECODE (1 << 0) |
912 | #define EMULTYPE_TRAP_UD (1 << 1) | |
ba8afb6b | 913 | #define EMULTYPE_SKIP (1 << 2) |
1cb3f3ae | 914 | #define EMULTYPE_RETRY (1 << 3) |
991eebf9 | 915 | #define EMULTYPE_NO_REEXECUTE (1 << 4) |
dc25e89e AP |
916 | int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, |
917 | int emulation_type, void *insn, int insn_len); | |
51d8b661 AP |
918 | |
919 | static inline int emulate_instruction(struct kvm_vcpu *vcpu, | |
920 | int emulation_type) | |
921 | { | |
dc25e89e | 922 | return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); |
51d8b661 AP |
923 | } |
924 | ||
f2b4b7dd | 925 | void kvm_enable_efer_bits(u64); |
384bb783 | 926 | bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer); |
54f1585a | 927 | int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); |
8fe8ab46 | 928 | int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr); |
54f1585a ZX |
929 | |
930 | struct x86_emulate_ctxt; | |
931 | ||
cf8f70bf | 932 | int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port); |
54f1585a ZX |
933 | void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); |
934 | int kvm_emulate_halt(struct kvm_vcpu *vcpu); | |
f5f48ee1 | 935 | int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu); |
54f1585a | 936 | |
3e6e0aab | 937 | void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); |
c697518a | 938 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); |
2b4a273b | 939 | void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); |
3e6e0aab | 940 | |
7f3d35fd KW |
941 | int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, |
942 | int reason, bool has_error_code, u32 error_code); | |
37817f29 | 943 | |
49a9b07e | 944 | int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); |
2390218b | 945 | int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); |
a83b29c6 | 946 | int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); |
eea1cff9 | 947 | int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); |
020df079 GN |
948 | int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); |
949 | int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); | |
2d3ad1f4 AK |
950 | unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); |
951 | void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); | |
54f1585a | 952 | void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); |
2acf923e | 953 | int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr); |
54f1585a ZX |
954 | |
955 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); | |
8fe8ab46 | 956 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); |
54f1585a | 957 | |
91586a3b JK |
958 | unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); |
959 | void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); | |
022cd0e8 | 960 | bool kvm_rdpmc(struct kvm_vcpu *vcpu); |
91586a3b | 961 | |
298101da AK |
962 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); |
963 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | |
ce7ddec4 JR |
964 | void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); |
965 | void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | |
6389ee94 | 966 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); |
ec92fe44 JR |
967 | int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
968 | gfn_t gfn, void *data, int offset, int len, | |
969 | u32 access); | |
0a79b009 | 970 | bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); |
16f8a6f9 | 971 | bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr); |
298101da | 972 | |
1a577b72 MT |
973 | static inline int __kvm_irq_line_state(unsigned long *irq_state, |
974 | int irq_source_id, int level) | |
975 | { | |
976 | /* Logical OR for level trig interrupt */ | |
977 | if (level) | |
978 | __set_bit(irq_source_id, irq_state); | |
979 | else | |
980 | __clear_bit(irq_source_id, irq_state); | |
981 | ||
982 | return !!(*irq_state); | |
983 | } | |
984 | ||
985 | int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level); | |
986 | void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id); | |
3de42dc0 | 987 | |
3419ffc8 SY |
988 | void kvm_inject_nmi(struct kvm_vcpu *vcpu); |
989 | ||
10ab25cd | 990 | int fx_init(struct kvm_vcpu *vcpu); |
54f1585a | 991 | |
54f1585a | 992 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, |
f57f2ef5 | 993 | const u8 *new, int bytes); |
1cb3f3ae | 994 | int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn); |
54f1585a ZX |
995 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); |
996 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); | |
997 | int kvm_mmu_load(struct kvm_vcpu *vcpu); | |
998 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); | |
0ba73cda | 999 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); |
54987b7a PB |
1000 | gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, |
1001 | struct x86_exception *exception); | |
ab9ae313 AK |
1002 | gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, |
1003 | struct x86_exception *exception); | |
1004 | gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, | |
1005 | struct x86_exception *exception); | |
1006 | gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, | |
1007 | struct x86_exception *exception); | |
1008 | gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, | |
1009 | struct x86_exception *exception); | |
54f1585a ZX |
1010 | |
1011 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); | |
1012 | ||
dc25e89e AP |
1013 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code, |
1014 | void *insn, int insn_len); | |
a7052897 | 1015 | void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); |
d8d173da | 1016 | void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu); |
34c16eec | 1017 | |
18552672 | 1018 | void kvm_enable_tdp(void); |
5f4cb662 | 1019 | void kvm_disable_tdp(void); |
18552672 | 1020 | |
54987b7a PB |
1021 | static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, |
1022 | struct x86_exception *exception) | |
e459e322 XG |
1023 | { |
1024 | return gpa; | |
1025 | } | |
1026 | ||
ec6d273d ZX |
1027 | static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) |
1028 | { | |
1029 | struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); | |
1030 | ||
1031 | return (struct kvm_mmu_page *)page_private(page); | |
1032 | } | |
1033 | ||
d6e88aec | 1034 | static inline u16 kvm_read_ldt(void) |
ec6d273d ZX |
1035 | { |
1036 | u16 ldt; | |
1037 | asm("sldt %0" : "=g"(ldt)); | |
1038 | return ldt; | |
1039 | } | |
1040 | ||
d6e88aec | 1041 | static inline void kvm_load_ldt(u16 sel) |
ec6d273d ZX |
1042 | { |
1043 | asm("lldt %0" : : "rm"(sel)); | |
1044 | } | |
ec6d273d | 1045 | |
ec6d273d ZX |
1046 | #ifdef CONFIG_X86_64 |
1047 | static inline unsigned long read_msr(unsigned long msr) | |
1048 | { | |
1049 | u64 value; | |
1050 | ||
1051 | rdmsrl(msr, value); | |
1052 | return value; | |
1053 | } | |
1054 | #endif | |
1055 | ||
ec6d273d ZX |
1056 | static inline u32 get_rdx_init_val(void) |
1057 | { | |
1058 | return 0x600; /* P6 family */ | |
1059 | } | |
1060 | ||
c1a5d4f9 AK |
1061 | static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) |
1062 | { | |
1063 | kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); | |
1064 | } | |
1065 | ||
854e8bb1 NA |
1066 | static inline u64 get_canonical(u64 la) |
1067 | { | |
1068 | return ((int64_t)la << 16) >> 16; | |
1069 | } | |
1070 | ||
1071 | static inline bool is_noncanonical_address(u64 la) | |
1072 | { | |
1073 | #ifdef CONFIG_X86_64 | |
1074 | return get_canonical(la) != la; | |
1075 | #else | |
1076 | return false; | |
1077 | #endif | |
1078 | } | |
1079 | ||
ec6d273d ZX |
1080 | #define TSS_IOPB_BASE_OFFSET 0x66 |
1081 | #define TSS_BASE_SIZE 0x68 | |
1082 | #define TSS_IOPB_SIZE (65536 / 8) | |
1083 | #define TSS_REDIRECTION_SIZE (256 / 8) | |
7d76b4d3 JP |
1084 | #define RMODE_TSS_SIZE \ |
1085 | (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) | |
53e0aa7b | 1086 | |
37817f29 IE |
1087 | enum { |
1088 | TASK_SWITCH_CALL = 0, | |
1089 | TASK_SWITCH_IRET = 1, | |
1090 | TASK_SWITCH_JMP = 2, | |
1091 | TASK_SWITCH_GATE = 3, | |
1092 | }; | |
1093 | ||
1371d904 | 1094 | #define HF_GIF_MASK (1 << 0) |
3d6368ef AG |
1095 | #define HF_HIF_MASK (1 << 1) |
1096 | #define HF_VINTR_MASK (1 << 2) | |
95ba8273 | 1097 | #define HF_NMI_MASK (1 << 3) |
44c11430 | 1098 | #define HF_IRET_MASK (1 << 4) |
ec9e60b2 | 1099 | #define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */ |
1371d904 | 1100 | |
4ecac3fd AK |
1101 | /* |
1102 | * Hardware virtualization extension instructions may fault if a | |
1103 | * reboot turns off virtualization while processes are running. | |
1104 | * Trap the fault and ignore the instruction if that happens. | |
1105 | */ | |
b7c4145b | 1106 | asmlinkage void kvm_spurious_fault(void); |
4ecac3fd | 1107 | |
5e520e62 | 1108 | #define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \ |
4ecac3fd | 1109 | "666: " insn "\n\t" \ |
b7c4145b | 1110 | "668: \n\t" \ |
18b13e54 | 1111 | ".pushsection .fixup, \"ax\" \n" \ |
4ecac3fd | 1112 | "667: \n\t" \ |
5e520e62 | 1113 | cleanup_insn "\n\t" \ |
b7c4145b AK |
1114 | "cmpb $0, kvm_rebooting \n\t" \ |
1115 | "jne 668b \n\t" \ | |
8ceed347 | 1116 | __ASM_SIZE(push) " $666b \n\t" \ |
b7c4145b | 1117 | "call kvm_spurious_fault \n\t" \ |
4ecac3fd | 1118 | ".popsection \n\t" \ |
3ee89722 | 1119 | _ASM_EXTABLE(666b, 667b) |
4ecac3fd | 1120 | |
5e520e62 AK |
1121 | #define __kvm_handle_fault_on_reboot(insn) \ |
1122 | ____kvm_handle_fault_on_reboot(insn, "") | |
1123 | ||
e930bffe AA |
1124 | #define KVM_ARCH_WANT_MMU_NOTIFIER |
1125 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | |
b3ae2096 | 1126 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); |
57128468 | 1127 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); |
8ee53820 | 1128 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); |
3da0dd43 | 1129 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); |
82725b20 | 1130 | int cpuid_maxphyaddr(struct kvm_vcpu *vcpu); |
c7c9c56c | 1131 | int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); |
a1b37100 GN |
1132 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); |
1133 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); | |
0b71785d | 1134 | int kvm_cpu_get_interrupt(struct kvm_vcpu *v); |
66450a21 | 1135 | void kvm_vcpu_reset(struct kvm_vcpu *vcpu); |
4256f43f | 1136 | void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu); |
fe71557a TC |
1137 | void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, |
1138 | unsigned long address); | |
e930bffe | 1139 | |
18863bdd | 1140 | void kvm_define_shared_msr(unsigned index, u32 msr); |
8b3c3104 | 1141 | int kvm_set_shared_msr(unsigned index, u64 val, u64 mask); |
18863bdd | 1142 | |
82b32774 | 1143 | unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu); |
f92653ee JK |
1144 | bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); |
1145 | ||
af585b92 GN |
1146 | void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, |
1147 | struct kvm_async_pf *work); | |
1148 | void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, | |
1149 | struct kvm_async_pf *work); | |
56028d08 GN |
1150 | void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, |
1151 | struct kvm_async_pf *work); | |
7c90705b | 1152 | bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu); |
af585b92 GN |
1153 | extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); |
1154 | ||
db8fcefa AP |
1155 | void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); |
1156 | ||
f5132b01 GN |
1157 | int kvm_is_in_guest(void); |
1158 | ||
1159 | void kvm_pmu_init(struct kvm_vcpu *vcpu); | |
1160 | void kvm_pmu_destroy(struct kvm_vcpu *vcpu); | |
1161 | void kvm_pmu_reset(struct kvm_vcpu *vcpu); | |
1162 | void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu); | |
1163 | bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr); | |
1164 | int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data); | |
afd80d85 | 1165 | int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
67f4d428 | 1166 | int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc); |
f5132b01 GN |
1167 | int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); |
1168 | void kvm_handle_pmu_event(struct kvm_vcpu *vcpu); | |
1169 | void kvm_deliver_pmi(struct kvm_vcpu *vcpu); | |
1170 | ||
1965aae3 | 1171 | #endif /* _ASM_X86_KVM_HOST_H */ |