]>
Commit | Line | Data |
---|---|---|
a656c8ef | 1 | /* |
043405e1 CO |
2 | * Kernel-based Virtual Machine driver for Linux |
3 | * | |
4 | * This header defines architecture specific interfaces, x86 version | |
5 | * | |
6 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
7 | * the COPYING file in the top-level directory. | |
8 | * | |
9 | */ | |
10 | ||
1965aae3 PA |
11 | #ifndef _ASM_X86_KVM_HOST_H |
12 | #define _ASM_X86_KVM_HOST_H | |
043405e1 | 13 | |
34c16eec ZX |
14 | #include <linux/types.h> |
15 | #include <linux/mm.h> | |
e930bffe | 16 | #include <linux/mmu_notifier.h> |
229456fc | 17 | #include <linux/tracepoint.h> |
f5f48ee1 | 18 | #include <linux/cpumask.h> |
34c16eec ZX |
19 | |
20 | #include <linux/kvm.h> | |
21 | #include <linux/kvm_para.h> | |
edf88417 | 22 | #include <linux/kvm_types.h> |
34c16eec | 23 | |
50d0a0f9 | 24 | #include <asm/pvclock-abi.h> |
e01a1b57 | 25 | #include <asm/desc.h> |
0bed3b56 | 26 | #include <asm/mtrr.h> |
9962d032 | 27 | #include <asm/msr-index.h> |
e01a1b57 | 28 | |
8c3ba334 SL |
29 | #define KVM_MAX_VCPUS 254 |
30 | #define KVM_SOFT_MAX_VCPUS 64 | |
69a9f69b AK |
31 | #define KVM_MEMORY_SLOTS 32 |
32 | /* memory slots that does not exposed to userspace */ | |
33 | #define KVM_PRIVATE_MEM_SLOTS 4 | |
93a5cef0 XG |
34 | #define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) |
35 | ||
cef4dea0 | 36 | #define KVM_MMIO_SIZE 16 |
69a9f69b AK |
37 | |
38 | #define KVM_PIO_PAGE_OFFSET 1 | |
542472b5 | 39 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 2 |
69a9f69b | 40 | |
cfec82cb JR |
41 | #define CR0_RESERVED_BITS \ |
42 | (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ | |
43 | | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ | |
44 | | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG)) | |
45 | ||
cd6e8f87 ZX |
46 | #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) |
47 | #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) | |
7d76b4d3 JP |
48 | #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ |
49 | 0xFFFFFF0000000000ULL) | |
cfec82cb JR |
50 | #define CR4_RESERVED_BITS \ |
51 | (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ | |
52 | | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ | |
53 | | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \ | |
d9c3476d | 54 | | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_RDWRGSFS \ |
cfec82cb JR |
55 | | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) |
56 | ||
57 | #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) | |
58 | ||
59 | ||
cd6e8f87 | 60 | |
cd6e8f87 | 61 | #define INVALID_PAGE (~(hpa_t)0) |
dd180b3e XG |
62 | #define VALID_PAGE(x) ((x) != INVALID_PAGE) |
63 | ||
cd6e8f87 ZX |
64 | #define UNMAPPED_GVA (~(gpa_t)0) |
65 | ||
ec04b260 | 66 | /* KVM Hugepage definitions for x86 */ |
04326caa | 67 | #define KVM_NR_PAGE_SIZES 3 |
82855413 JR |
68 | #define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9) |
69 | #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x)) | |
ec04b260 JR |
70 | #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) |
71 | #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) | |
72 | #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) | |
05da4558 | 73 | |
cd6e8f87 | 74 | #define DE_VECTOR 0 |
19bd8afd | 75 | #define DB_VECTOR 1 |
77ab6db0 JK |
76 | #define BP_VECTOR 3 |
77 | #define OF_VECTOR 4 | |
78 | #define BR_VECTOR 5 | |
cd6e8f87 ZX |
79 | #define UD_VECTOR 6 |
80 | #define NM_VECTOR 7 | |
81 | #define DF_VECTOR 8 | |
82 | #define TS_VECTOR 10 | |
83 | #define NP_VECTOR 11 | |
84 | #define SS_VECTOR 12 | |
85 | #define GP_VECTOR 13 | |
86 | #define PF_VECTOR 14 | |
77ab6db0 | 87 | #define MF_VECTOR 16 |
53371b50 | 88 | #define MC_VECTOR 18 |
cd6e8f87 ZX |
89 | |
90 | #define SELECTOR_TI_MASK (1 << 2) | |
91 | #define SELECTOR_RPL_MASK 0x03 | |
92 | ||
93 | #define IOPL_SHIFT 12 | |
94 | ||
d657a98e ZX |
95 | #define KVM_PERMILLE_MMU_PAGES 20 |
96 | #define KVM_MIN_ALLOC_MMU_PAGES 64 | |
1ae0a13d DE |
97 | #define KVM_MMU_HASH_SHIFT 10 |
98 | #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) | |
d657a98e ZX |
99 | #define KVM_MIN_FREE_MMU_PAGES 5 |
100 | #define KVM_REFILL_PAGES 25 | |
73c1160c | 101 | #define KVM_MAX_CPUID_ENTRIES 80 |
0bed3b56 | 102 | #define KVM_NR_FIXED_MTRR_REGION 88 |
9ba075a6 | 103 | #define KVM_NR_VAR_MTRR 8 |
d657a98e | 104 | |
af585b92 GN |
105 | #define ASYNC_PF_PER_VCPU 64 |
106 | ||
e935b837 | 107 | extern raw_spinlock_t kvm_lock; |
e9b11c17 ZX |
108 | extern struct list_head vm_list; |
109 | ||
d657a98e ZX |
110 | struct kvm_vcpu; |
111 | struct kvm; | |
af585b92 | 112 | struct kvm_async_pf; |
d657a98e | 113 | |
5fdbf976 | 114 | enum kvm_reg { |
2b3ccfa0 ZX |
115 | VCPU_REGS_RAX = 0, |
116 | VCPU_REGS_RCX = 1, | |
117 | VCPU_REGS_RDX = 2, | |
118 | VCPU_REGS_RBX = 3, | |
119 | VCPU_REGS_RSP = 4, | |
120 | VCPU_REGS_RBP = 5, | |
121 | VCPU_REGS_RSI = 6, | |
122 | VCPU_REGS_RDI = 7, | |
123 | #ifdef CONFIG_X86_64 | |
124 | VCPU_REGS_R8 = 8, | |
125 | VCPU_REGS_R9 = 9, | |
126 | VCPU_REGS_R10 = 10, | |
127 | VCPU_REGS_R11 = 11, | |
128 | VCPU_REGS_R12 = 12, | |
129 | VCPU_REGS_R13 = 13, | |
130 | VCPU_REGS_R14 = 14, | |
131 | VCPU_REGS_R15 = 15, | |
132 | #endif | |
5fdbf976 | 133 | VCPU_REGS_RIP, |
2b3ccfa0 ZX |
134 | NR_VCPU_REGS |
135 | }; | |
136 | ||
6de4f3ad AK |
137 | enum kvm_reg_ex { |
138 | VCPU_EXREG_PDPTR = NR_VCPU_REGS, | |
aff48baa | 139 | VCPU_EXREG_CR3, |
6de12732 | 140 | VCPU_EXREG_RFLAGS, |
69c73028 | 141 | VCPU_EXREG_CPL, |
2fb92db1 | 142 | VCPU_EXREG_SEGMENTS, |
6de4f3ad AK |
143 | }; |
144 | ||
2b3ccfa0 | 145 | enum { |
81609e3e | 146 | VCPU_SREG_ES, |
2b3ccfa0 | 147 | VCPU_SREG_CS, |
81609e3e | 148 | VCPU_SREG_SS, |
2b3ccfa0 | 149 | VCPU_SREG_DS, |
2b3ccfa0 ZX |
150 | VCPU_SREG_FS, |
151 | VCPU_SREG_GS, | |
2b3ccfa0 ZX |
152 | VCPU_SREG_TR, |
153 | VCPU_SREG_LDTR, | |
154 | }; | |
155 | ||
56e82318 | 156 | #include <asm/kvm_emulate.h> |
2b3ccfa0 | 157 | |
d657a98e ZX |
158 | #define KVM_NR_MEM_OBJS 40 |
159 | ||
42dbaa5a JK |
160 | #define KVM_NR_DB_REGS 4 |
161 | ||
162 | #define DR6_BD (1 << 13) | |
163 | #define DR6_BS (1 << 14) | |
164 | #define DR6_FIXED_1 0xffff0ff0 | |
165 | #define DR6_VOLATILE 0x0000e00f | |
166 | ||
167 | #define DR7_BP_EN_MASK 0x000000ff | |
168 | #define DR7_GE (1 << 9) | |
169 | #define DR7_GD (1 << 13) | |
170 | #define DR7_FIXED_1 0x00000400 | |
171 | #define DR7_VOLATILE 0xffff23ff | |
172 | ||
d657a98e ZX |
173 | /* |
174 | * We don't want allocation failures within the mmu code, so we preallocate | |
175 | * enough memory for a single page fault in a cache. | |
176 | */ | |
177 | struct kvm_mmu_memory_cache { | |
178 | int nobjs; | |
179 | void *objects[KVM_NR_MEM_OBJS]; | |
180 | }; | |
181 | ||
182 | #define NR_PTE_CHAIN_ENTRIES 5 | |
183 | ||
184 | struct kvm_pte_chain { | |
185 | u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES]; | |
186 | struct hlist_node link; | |
187 | }; | |
188 | ||
189 | /* | |
190 | * kvm_mmu_page_role, below, is defined as: | |
191 | * | |
192 | * bits 0:3 - total guest paging levels (2-4, or zero for real mode) | |
193 | * bits 4:7 - page table level for this shadow (1-4) | |
194 | * bits 8:9 - page table quadrant for 2-level guests | |
f6e2c02b AK |
195 | * bit 16 - direct mapping of virtual to physical mapping at gfn |
196 | * used for real mode and two-dimensional paging | |
d657a98e ZX |
197 | * bits 17:19 - common access permissions for all ptes in this shadow page |
198 | */ | |
199 | union kvm_mmu_page_role { | |
200 | unsigned word; | |
201 | struct { | |
7d76b4d3 | 202 | unsigned level:4; |
5b7e0102 | 203 | unsigned cr4_pae:1; |
7d76b4d3 JP |
204 | unsigned quadrant:2; |
205 | unsigned pad_for_nice_hex_output:6; | |
f6e2c02b | 206 | unsigned direct:1; |
7d76b4d3 | 207 | unsigned access:3; |
2e53d63a | 208 | unsigned invalid:1; |
9645bb56 | 209 | unsigned nxe:1; |
3dbe1415 | 210 | unsigned cr0_wp:1; |
411c588d | 211 | unsigned smep_andnot_wp:1; |
d657a98e ZX |
212 | }; |
213 | }; | |
214 | ||
215 | struct kvm_mmu_page { | |
216 | struct list_head link; | |
217 | struct hlist_node hash_link; | |
218 | ||
219 | /* | |
220 | * The following two entries are used to key the shadow page in the | |
221 | * hash table. | |
222 | */ | |
223 | gfn_t gfn; | |
224 | union kvm_mmu_page_role role; | |
225 | ||
226 | u64 *spt; | |
227 | /* hold the gfn of each spte inside spt */ | |
228 | gfn_t *gfns; | |
291f26bc SY |
229 | /* |
230 | * One bit set per slot which has memory | |
231 | * in this shadow page. | |
232 | */ | |
93a5cef0 | 233 | DECLARE_BITMAP(slot_bitmap, KVM_MEM_SLOTS_NUM); |
4731d4c7 | 234 | bool unsync; |
0571d366 | 235 | int root_count; /* Currently serving as active root */ |
60c8aec6 | 236 | unsigned int unsync_children; |
67052b35 | 237 | unsigned long parent_ptes; /* Reverse mapping for parent_pte */ |
0074ff63 | 238 | DECLARE_BITMAP(unsync_child_bitmap, 512); |
c2a2ac2b XG |
239 | |
240 | #ifdef CONFIG_X86_32 | |
241 | int clear_spte_count; | |
242 | #endif | |
243 | ||
a30f47cb XG |
244 | int write_flooding_count; |
245 | ||
c2a2ac2b | 246 | struct rcu_head rcu; |
d657a98e ZX |
247 | }; |
248 | ||
1c08364c AK |
249 | struct kvm_pio_request { |
250 | unsigned long count; | |
1c08364c AK |
251 | int in; |
252 | int port; | |
253 | int size; | |
1c08364c AK |
254 | }; |
255 | ||
d657a98e ZX |
256 | /* |
257 | * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level | |
258 | * 32-bit). The kvm_mmu structure abstracts the details of the current mmu | |
259 | * mode. | |
260 | */ | |
261 | struct kvm_mmu { | |
262 | void (*new_cr3)(struct kvm_vcpu *vcpu); | |
f43addd4 | 263 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); |
5777ed34 | 264 | unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); |
e4e517b4 | 265 | u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index); |
78b2c54a XG |
266 | int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, |
267 | bool prefault); | |
6389ee94 AK |
268 | void (*inject_page_fault)(struct kvm_vcpu *vcpu, |
269 | struct x86_exception *fault); | |
d657a98e | 270 | void (*free)(struct kvm_vcpu *vcpu); |
1871c602 | 271 | gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, |
ab9ae313 | 272 | struct x86_exception *exception); |
c30a358d | 273 | gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access); |
e8bc217a | 274 | int (*sync_page)(struct kvm_vcpu *vcpu, |
a4a8e6f7 | 275 | struct kvm_mmu_page *sp); |
a7052897 | 276 | void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); |
0f53b5b1 | 277 | void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
7c562522 | 278 | u64 *spte, const void *pte); |
d657a98e ZX |
279 | hpa_t root_hpa; |
280 | int root_level; | |
281 | int shadow_root_level; | |
a770f6f2 | 282 | union kvm_mmu_page_role base_role; |
c5a78f2b | 283 | bool direct_map; |
d657a98e ZX |
284 | |
285 | u64 *pae_root; | |
81407ca5 | 286 | u64 *lm_root; |
82725b20 | 287 | u64 rsvd_bits_mask[2][4]; |
ff03a073 | 288 | |
2d48a985 JR |
289 | bool nx; |
290 | ||
ff03a073 | 291 | u64 pdptrs[4]; /* pae */ |
d657a98e ZX |
292 | }; |
293 | ||
ad312c7c | 294 | struct kvm_vcpu_arch { |
5fdbf976 MT |
295 | /* |
296 | * rip and regs accesses must go through | |
297 | * kvm_{register,rip}_{read,write} functions. | |
298 | */ | |
299 | unsigned long regs[NR_VCPU_REGS]; | |
300 | u32 regs_avail; | |
301 | u32 regs_dirty; | |
34c16eec ZX |
302 | |
303 | unsigned long cr0; | |
e8467fda | 304 | unsigned long cr0_guest_owned_bits; |
34c16eec ZX |
305 | unsigned long cr2; |
306 | unsigned long cr3; | |
307 | unsigned long cr4; | |
fc78f519 | 308 | unsigned long cr4_guest_owned_bits; |
34c16eec | 309 | unsigned long cr8; |
1371d904 | 310 | u32 hflags; |
f6801dff | 311 | u64 efer; |
34c16eec ZX |
312 | u64 apic_base; |
313 | struct kvm_lapic *apic; /* kernel irqchip context */ | |
e1035715 | 314 | int32_t apic_arb_prio; |
34c16eec ZX |
315 | int mp_state; |
316 | int sipi_vector; | |
317 | u64 ia32_misc_enable_msr; | |
b209749f | 318 | bool tpr_access_reporting; |
34c16eec | 319 | |
14dfe855 JR |
320 | /* |
321 | * Paging state of the vcpu | |
322 | * | |
323 | * If the vcpu runs in guest mode with two level paging this still saves | |
324 | * the paging mode of the l1 guest. This context is always used to | |
325 | * handle faults. | |
326 | */ | |
34c16eec | 327 | struct kvm_mmu mmu; |
8df25a32 | 328 | |
6539e738 JR |
329 | /* |
330 | * Paging state of an L2 guest (used for nested npt) | |
331 | * | |
332 | * This context will save all necessary information to walk page tables | |
333 | * of the an L2 guest. This context is only initialized for page table | |
334 | * walking and not for faulting since we never handle l2 page faults on | |
335 | * the host. | |
336 | */ | |
337 | struct kvm_mmu nested_mmu; | |
338 | ||
14dfe855 JR |
339 | /* |
340 | * Pointer to the mmu context currently used for | |
341 | * gva_to_gpa translations. | |
342 | */ | |
343 | struct kvm_mmu *walk_mmu; | |
344 | ||
53c07b18 | 345 | struct kvm_mmu_memory_cache mmu_pte_list_desc_cache; |
34c16eec ZX |
346 | struct kvm_mmu_memory_cache mmu_page_cache; |
347 | struct kvm_mmu_memory_cache mmu_page_header_cache; | |
348 | ||
98918833 | 349 | struct fpu guest_fpu; |
2acf923e | 350 | u64 xcr0; |
34c16eec | 351 | |
34c16eec ZX |
352 | struct kvm_pio_request pio; |
353 | void *pio_data; | |
354 | ||
66fd3f7f GN |
355 | u8 event_exit_inst_len; |
356 | ||
298101da AK |
357 | struct kvm_queued_exception { |
358 | bool pending; | |
359 | bool has_error_code; | |
ce7ddec4 | 360 | bool reinject; |
298101da AK |
361 | u8 nr; |
362 | u32 error_code; | |
363 | } exception; | |
364 | ||
937a7eae AK |
365 | struct kvm_queued_interrupt { |
366 | bool pending; | |
66fd3f7f | 367 | bool soft; |
937a7eae AK |
368 | u8 nr; |
369 | } interrupt; | |
370 | ||
34c16eec ZX |
371 | int halt_request; /* real mode on Intel only */ |
372 | ||
373 | int cpuid_nent; | |
07716717 | 374 | struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES]; |
34c16eec ZX |
375 | /* emulate context */ |
376 | ||
377 | struct x86_emulate_ctxt emulate_ctxt; | |
7ae441ea GN |
378 | bool emulate_regs_need_sync_to_vcpu; |
379 | bool emulate_regs_need_sync_from_vcpu; | |
18068523 GOC |
380 | |
381 | gpa_t time; | |
50d0a0f9 | 382 | struct pvclock_vcpu_time_info hv_clock; |
e48672fa | 383 | unsigned int hw_tsc_khz; |
18068523 GOC |
384 | unsigned int time_offset; |
385 | struct page *time_page; | |
c9aaa895 GC |
386 | |
387 | struct { | |
388 | u64 msr_val; | |
389 | u64 last_steal; | |
390 | u64 accum_steal; | |
391 | struct gfn_to_hva_cache stime; | |
392 | struct kvm_steal_time steal; | |
393 | } st; | |
394 | ||
1d5f066e ZA |
395 | u64 last_guest_tsc; |
396 | u64 last_kernel_ns; | |
c285545f ZA |
397 | u64 last_tsc_nsec; |
398 | u64 last_tsc_write; | |
1e993611 | 399 | u32 virtual_tsc_khz; |
c285545f | 400 | bool tsc_catchup; |
1e993611 JR |
401 | u32 tsc_catchup_mult; |
402 | s8 tsc_catchup_shift; | |
3419ffc8 | 403 | |
7460fb4a AK |
404 | atomic_t nmi_queued; /* unprocessed asynchronous NMIs */ |
405 | unsigned nmi_pending; /* NMI queued after currently running handler */ | |
406 | bool nmi_injected; /* Trying to inject an NMI this entry */ | |
9ba075a6 | 407 | |
0bed3b56 SY |
408 | struct mtrr_state_type mtrr_state; |
409 | u32 pat; | |
42dbaa5a JK |
410 | |
411 | int switch_db_regs; | |
42dbaa5a JK |
412 | unsigned long db[KVM_NR_DB_REGS]; |
413 | unsigned long dr6; | |
414 | unsigned long dr7; | |
415 | unsigned long eff_db[KVM_NR_DB_REGS]; | |
890ca9ae YH |
416 | |
417 | u64 mcg_cap; | |
418 | u64 mcg_status; | |
419 | u64 mcg_ctl; | |
420 | u64 *mce_banks; | |
94fe45da | 421 | |
bebb106a XG |
422 | /* Cache MMIO info */ |
423 | u64 mmio_gva; | |
424 | unsigned access; | |
425 | gfn_t mmio_gfn; | |
426 | ||
94fe45da | 427 | /* used for guest single stepping over the given code position */ |
94fe45da | 428 | unsigned long singlestep_rip; |
f92653ee | 429 | |
10388a07 GN |
430 | /* fields used by HYPER-V emulation */ |
431 | u64 hv_vapic; | |
f5f48ee1 SY |
432 | |
433 | cpumask_var_t wbinvd_dirty_mask; | |
af585b92 | 434 | |
1cb3f3ae XG |
435 | unsigned long last_retry_eip; |
436 | unsigned long last_retry_addr; | |
437 | ||
af585b92 GN |
438 | struct { |
439 | bool halted; | |
440 | gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)]; | |
344d9588 GN |
441 | struct gfn_to_hva_cache data; |
442 | u64 msr_val; | |
7c90705b | 443 | u32 id; |
6adba527 | 444 | bool send_user_only; |
af585b92 | 445 | } apf; |
34c16eec ZX |
446 | }; |
447 | ||
fef9cce0 | 448 | struct kvm_arch { |
49d5ca26 | 449 | unsigned int n_used_mmu_pages; |
f05e70ac | 450 | unsigned int n_requested_mmu_pages; |
39de71ec | 451 | unsigned int n_max_mmu_pages; |
332b207d | 452 | unsigned int indirect_shadow_pages; |
f05e70ac ZX |
453 | struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; |
454 | /* | |
455 | * Hash table of struct kvm_mmu_page. | |
456 | */ | |
457 | struct list_head active_mmu_pages; | |
4d5c5d0f | 458 | struct list_head assigned_dev_head; |
19de40a8 | 459 | struct iommu_domain *iommu_domain; |
522c68c4 | 460 | int iommu_flags; |
d7deeeb0 ZX |
461 | struct kvm_pic *vpic; |
462 | struct kvm_ioapic *vioapic; | |
7837699f | 463 | struct kvm_pit *vpit; |
cc6e462c | 464 | int vapics_in_nmi_mode; |
bfc6d222 | 465 | |
bfc6d222 ZX |
466 | unsigned int tss_addr; |
467 | struct page *apic_access_page; | |
18068523 GOC |
468 | |
469 | gpa_t wall_clock; | |
b7ebfb05 SY |
470 | |
471 | struct page *ept_identity_pagetable; | |
472 | bool ept_identity_pagetable_done; | |
b927a3ce | 473 | gpa_t ept_identity_map_addr; |
5550af4d SY |
474 | |
475 | unsigned long irq_sources_bitmap; | |
afbcf7ab | 476 | s64 kvmclock_offset; |
038f8c11 | 477 | raw_spinlock_t tsc_write_lock; |
f38e098f ZA |
478 | u64 last_tsc_nsec; |
479 | u64 last_tsc_offset; | |
480 | u64 last_tsc_write; | |
ffde22ac ES |
481 | |
482 | struct kvm_xen_hvm_config xen_hvm_config; | |
55cd8e5a GN |
483 | |
484 | /* fields used by HYPER-V emulation */ | |
485 | u64 hv_guest_os_id; | |
486 | u64 hv_hypercall; | |
b034cf01 | 487 | |
c2a2ac2b XG |
488 | atomic_t reader_counter; |
489 | ||
b034cf01 XG |
490 | #ifdef CONFIG_KVM_MMU_AUDIT |
491 | int audit_point; | |
492 | #endif | |
d69fb81f ZX |
493 | }; |
494 | ||
0711456c ZX |
495 | struct kvm_vm_stat { |
496 | u32 mmu_shadow_zapped; | |
497 | u32 mmu_pte_write; | |
498 | u32 mmu_pte_updated; | |
499 | u32 mmu_pde_zapped; | |
500 | u32 mmu_flooded; | |
501 | u32 mmu_recycled; | |
dfc5aa00 | 502 | u32 mmu_cache_miss; |
4731d4c7 | 503 | u32 mmu_unsync; |
0711456c | 504 | u32 remote_tlb_flush; |
05da4558 | 505 | u32 lpages; |
0711456c ZX |
506 | }; |
507 | ||
77b4c255 ZX |
508 | struct kvm_vcpu_stat { |
509 | u32 pf_fixed; | |
510 | u32 pf_guest; | |
511 | u32 tlb_flush; | |
512 | u32 invlpg; | |
513 | ||
514 | u32 exits; | |
515 | u32 io_exits; | |
516 | u32 mmio_exits; | |
517 | u32 signal_exits; | |
518 | u32 irq_window_exits; | |
f08864b4 | 519 | u32 nmi_window_exits; |
77b4c255 ZX |
520 | u32 halt_exits; |
521 | u32 halt_wakeup; | |
522 | u32 request_irq_exits; | |
523 | u32 irq_exits; | |
524 | u32 host_state_reload; | |
525 | u32 efer_reload; | |
526 | u32 fpu_reload; | |
527 | u32 insn_emulation; | |
528 | u32 insn_emulation_fail; | |
f11c3a8d | 529 | u32 hypercalls; |
fa89a817 | 530 | u32 irq_injections; |
c4abb7c9 | 531 | u32 nmi_injections; |
77b4c255 | 532 | }; |
ad312c7c | 533 | |
8a76d7f2 JR |
534 | struct x86_instruction_info; |
535 | ||
ea4a5ff8 ZX |
536 | struct kvm_x86_ops { |
537 | int (*cpu_has_kvm_support)(void); /* __init */ | |
538 | int (*disabled_by_bios)(void); /* __init */ | |
10474ae8 | 539 | int (*hardware_enable)(void *dummy); |
ea4a5ff8 ZX |
540 | void (*hardware_disable)(void *dummy); |
541 | void (*check_processor_compatibility)(void *rtn); | |
542 | int (*hardware_setup)(void); /* __init */ | |
543 | void (*hardware_unsetup)(void); /* __exit */ | |
774ead3a | 544 | bool (*cpu_has_accelerated_tpr)(void); |
0e851880 | 545 | void (*cpuid_update)(struct kvm_vcpu *vcpu); |
ea4a5ff8 ZX |
546 | |
547 | /* Create, but do not attach this VCPU */ | |
548 | struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); | |
549 | void (*vcpu_free)(struct kvm_vcpu *vcpu); | |
550 | int (*vcpu_reset)(struct kvm_vcpu *vcpu); | |
551 | ||
552 | void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); | |
553 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); | |
554 | void (*vcpu_put)(struct kvm_vcpu *vcpu); | |
ea4a5ff8 | 555 | |
355be0b9 JK |
556 | void (*set_guest_debug)(struct kvm_vcpu *vcpu, |
557 | struct kvm_guest_debug *dbg); | |
ea4a5ff8 ZX |
558 | int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); |
559 | int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); | |
560 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); | |
561 | void (*get_segment)(struct kvm_vcpu *vcpu, | |
562 | struct kvm_segment *var, int seg); | |
2e4d2653 | 563 | int (*get_cpl)(struct kvm_vcpu *vcpu); |
ea4a5ff8 ZX |
564 | void (*set_segment)(struct kvm_vcpu *vcpu, |
565 | struct kvm_segment *var, int seg); | |
566 | void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); | |
e8467fda | 567 | void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu); |
aff48baa | 568 | void (*decache_cr3)(struct kvm_vcpu *vcpu); |
ea4a5ff8 ZX |
569 | void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); |
570 | void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); | |
571 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); | |
5e1746d6 | 572 | int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); |
ea4a5ff8 | 573 | void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); |
89a27f4d GN |
574 | void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); |
575 | void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); | |
576 | void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); | |
577 | void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); | |
020df079 | 578 | void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value); |
5fdbf976 | 579 | void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); |
ea4a5ff8 ZX |
580 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); |
581 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); | |
6b52d186 | 582 | void (*fpu_activate)(struct kvm_vcpu *vcpu); |
02daab21 | 583 | void (*fpu_deactivate)(struct kvm_vcpu *vcpu); |
ea4a5ff8 ZX |
584 | |
585 | void (*tlb_flush)(struct kvm_vcpu *vcpu); | |
ea4a5ff8 | 586 | |
851ba692 AK |
587 | void (*run)(struct kvm_vcpu *vcpu); |
588 | int (*handle_exit)(struct kvm_vcpu *vcpu); | |
ea4a5ff8 | 589 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); |
2809f5d2 GC |
590 | void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); |
591 | u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); | |
ea4a5ff8 ZX |
592 | void (*patch_hypercall)(struct kvm_vcpu *vcpu, |
593 | unsigned char *hypercall_addr); | |
66fd3f7f | 594 | void (*set_irq)(struct kvm_vcpu *vcpu); |
95ba8273 | 595 | void (*set_nmi)(struct kvm_vcpu *vcpu); |
298101da | 596 | void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, |
ce7ddec4 JR |
597 | bool has_error_code, u32 error_code, |
598 | bool reinject); | |
b463a6f7 | 599 | void (*cancel_injection)(struct kvm_vcpu *vcpu); |
78646121 | 600 | int (*interrupt_allowed)(struct kvm_vcpu *vcpu); |
95ba8273 | 601 | int (*nmi_allowed)(struct kvm_vcpu *vcpu); |
3cfc3092 JK |
602 | bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); |
603 | void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked); | |
95ba8273 GN |
604 | void (*enable_nmi_window)(struct kvm_vcpu *vcpu); |
605 | void (*enable_irq_window)(struct kvm_vcpu *vcpu); | |
606 | void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); | |
ea4a5ff8 | 607 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); |
67253af5 | 608 | int (*get_tdp_level)(void); |
4b12f0de | 609 | u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); |
17cc3935 | 610 | int (*get_lpage_level)(void); |
4e47c7a6 | 611 | bool (*rdtscp_supported)(void); |
e48672fa | 612 | void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment); |
344f414f | 613 | |
1c97f0a0 JR |
614 | void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); |
615 | ||
d4330ef2 JR |
616 | void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry); |
617 | ||
f5f48ee1 SY |
618 | bool (*has_wbinvd_exit)(void); |
619 | ||
4051b188 | 620 | void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz); |
99e3e30a ZA |
621 | void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); |
622 | ||
857e4099 | 623 | u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc); |
d5c1785d | 624 | u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu); |
857e4099 | 625 | |
586f9607 | 626 | void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); |
8a76d7f2 JR |
627 | |
628 | int (*check_intercept)(struct kvm_vcpu *vcpu, | |
629 | struct x86_instruction_info *info, | |
630 | enum x86_intercept_stage stage); | |
ea4a5ff8 ZX |
631 | }; |
632 | ||
af585b92 | 633 | struct kvm_arch_async_pf { |
7c90705b | 634 | u32 token; |
af585b92 | 635 | gfn_t gfn; |
fb67e14f | 636 | unsigned long cr3; |
c4806acd | 637 | bool direct_map; |
af585b92 GN |
638 | }; |
639 | ||
97896d04 ZX |
640 | extern struct kvm_x86_ops *kvm_x86_ops; |
641 | ||
54f1585a ZX |
642 | int kvm_mmu_module_init(void); |
643 | void kvm_mmu_module_exit(void); | |
644 | ||
645 | void kvm_mmu_destroy(struct kvm_vcpu *vcpu); | |
646 | int kvm_mmu_create(struct kvm_vcpu *vcpu); | |
647 | int kvm_mmu_setup(struct kvm_vcpu *vcpu); | |
7b52345e | 648 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, |
4b12f0de | 649 | u64 dirty_mask, u64 nx_mask, u64 x_mask); |
54f1585a ZX |
650 | |
651 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); | |
652 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); | |
95d4c16c TY |
653 | int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn, |
654 | struct kvm_memory_slot *slot); | |
54f1585a | 655 | void kvm_mmu_zap_all(struct kvm *kvm); |
3ad82a7e | 656 | unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); |
54f1585a ZX |
657 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); |
658 | ||
ff03a073 | 659 | int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); |
cc4b6871 | 660 | |
3200f405 | 661 | int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, |
9f811285 | 662 | const void *val, int bytes); |
4b12f0de | 663 | u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); |
2f333bcb MT |
664 | |
665 | extern bool tdp_enabled; | |
9f811285 | 666 | |
a3e06bbe LJ |
667 | u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu); |
668 | ||
92a1f12d JR |
669 | /* control of guest tsc rate supported? */ |
670 | extern bool kvm_has_tsc_control; | |
671 | /* minimum supported tsc_khz for guests */ | |
672 | extern u32 kvm_min_guest_tsc_khz; | |
673 | /* maximum supported tsc_khz for guests */ | |
674 | extern u32 kvm_max_guest_tsc_khz; | |
675 | ||
54f1585a ZX |
676 | enum emulation_result { |
677 | EMULATE_DONE, /* no further processing */ | |
678 | EMULATE_DO_MMIO, /* kvm_run filled with mmio request */ | |
679 | EMULATE_FAIL, /* can't emulate this instruction */ | |
680 | }; | |
681 | ||
571008da SY |
682 | #define EMULTYPE_NO_DECODE (1 << 0) |
683 | #define EMULTYPE_TRAP_UD (1 << 1) | |
ba8afb6b | 684 | #define EMULTYPE_SKIP (1 << 2) |
1cb3f3ae | 685 | #define EMULTYPE_RETRY (1 << 3) |
dc25e89e AP |
686 | int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, |
687 | int emulation_type, void *insn, int insn_len); | |
51d8b661 AP |
688 | |
689 | static inline int emulate_instruction(struct kvm_vcpu *vcpu, | |
690 | int emulation_type) | |
691 | { | |
dc25e89e | 692 | return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); |
51d8b661 AP |
693 | } |
694 | ||
f2b4b7dd | 695 | void kvm_enable_efer_bits(u64); |
54f1585a ZX |
696 | int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); |
697 | int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); | |
698 | ||
699 | struct x86_emulate_ctxt; | |
700 | ||
cf8f70bf | 701 | int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port); |
54f1585a ZX |
702 | void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); |
703 | int kvm_emulate_halt(struct kvm_vcpu *vcpu); | |
f5f48ee1 | 704 | int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu); |
54f1585a | 705 | |
3e6e0aab | 706 | void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); |
c697518a | 707 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); |
3e6e0aab | 708 | |
e269fb21 JK |
709 | int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason, |
710 | bool has_error_code, u32 error_code); | |
37817f29 | 711 | |
49a9b07e | 712 | int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); |
2390218b | 713 | int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); |
a83b29c6 | 714 | int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); |
eea1cff9 | 715 | int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); |
020df079 GN |
716 | int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); |
717 | int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); | |
2d3ad1f4 AK |
718 | unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); |
719 | void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); | |
54f1585a | 720 | void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); |
2acf923e | 721 | int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr); |
54f1585a ZX |
722 | |
723 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); | |
724 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); | |
725 | ||
91586a3b JK |
726 | unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); |
727 | void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); | |
728 | ||
298101da AK |
729 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); |
730 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | |
ce7ddec4 JR |
731 | void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); |
732 | void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | |
6389ee94 | 733 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); |
ec92fe44 JR |
734 | int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
735 | gfn_t gfn, void *data, int offset, int len, | |
736 | u32 access); | |
6389ee94 | 737 | void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); |
0a79b009 | 738 | bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); |
298101da | 739 | |
4925663a | 740 | int kvm_pic_set_irq(void *opaque, int irq, int level); |
3de42dc0 | 741 | |
3419ffc8 SY |
742 | void kvm_inject_nmi(struct kvm_vcpu *vcpu); |
743 | ||
10ab25cd | 744 | int fx_init(struct kvm_vcpu *vcpu); |
54f1585a | 745 | |
d835dfec | 746 | void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); |
54f1585a | 747 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, |
f57f2ef5 | 748 | const u8 *new, int bytes); |
1cb3f3ae | 749 | int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn); |
54f1585a ZX |
750 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); |
751 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); | |
752 | int kvm_mmu_load(struct kvm_vcpu *vcpu); | |
753 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); | |
0ba73cda | 754 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); |
ab9ae313 AK |
755 | gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, |
756 | struct x86_exception *exception); | |
757 | gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, | |
758 | struct x86_exception *exception); | |
759 | gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, | |
760 | struct x86_exception *exception); | |
761 | gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, | |
762 | struct x86_exception *exception); | |
54f1585a ZX |
763 | |
764 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); | |
765 | ||
dc25e89e AP |
766 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code, |
767 | void *insn, int insn_len); | |
a7052897 | 768 | void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); |
34c16eec | 769 | |
18552672 | 770 | void kvm_enable_tdp(void); |
5f4cb662 | 771 | void kvm_disable_tdp(void); |
18552672 | 772 | |
de7d789a | 773 | int complete_pio(struct kvm_vcpu *vcpu); |
f850e2e6 | 774 | bool kvm_check_iopl(struct kvm_vcpu *vcpu); |
ec6d273d ZX |
775 | |
776 | static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) | |
777 | { | |
778 | struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); | |
779 | ||
780 | return (struct kvm_mmu_page *)page_private(page); | |
781 | } | |
782 | ||
d6e88aec | 783 | static inline u16 kvm_read_ldt(void) |
ec6d273d ZX |
784 | { |
785 | u16 ldt; | |
786 | asm("sldt %0" : "=g"(ldt)); | |
787 | return ldt; | |
788 | } | |
789 | ||
d6e88aec | 790 | static inline void kvm_load_ldt(u16 sel) |
ec6d273d ZX |
791 | { |
792 | asm("lldt %0" : : "rm"(sel)); | |
793 | } | |
ec6d273d | 794 | |
ec6d273d ZX |
795 | #ifdef CONFIG_X86_64 |
796 | static inline unsigned long read_msr(unsigned long msr) | |
797 | { | |
798 | u64 value; | |
799 | ||
800 | rdmsrl(msr, value); | |
801 | return value; | |
802 | } | |
803 | #endif | |
804 | ||
ec6d273d ZX |
805 | static inline u32 get_rdx_init_val(void) |
806 | { | |
807 | return 0x600; /* P6 family */ | |
808 | } | |
809 | ||
c1a5d4f9 AK |
810 | static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) |
811 | { | |
812 | kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); | |
813 | } | |
814 | ||
ec6d273d ZX |
815 | #define TSS_IOPB_BASE_OFFSET 0x66 |
816 | #define TSS_BASE_SIZE 0x68 | |
817 | #define TSS_IOPB_SIZE (65536 / 8) | |
818 | #define TSS_REDIRECTION_SIZE (256 / 8) | |
7d76b4d3 JP |
819 | #define RMODE_TSS_SIZE \ |
820 | (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) | |
53e0aa7b | 821 | |
37817f29 IE |
822 | enum { |
823 | TASK_SWITCH_CALL = 0, | |
824 | TASK_SWITCH_IRET = 1, | |
825 | TASK_SWITCH_JMP = 2, | |
826 | TASK_SWITCH_GATE = 3, | |
827 | }; | |
828 | ||
1371d904 | 829 | #define HF_GIF_MASK (1 << 0) |
3d6368ef AG |
830 | #define HF_HIF_MASK (1 << 1) |
831 | #define HF_VINTR_MASK (1 << 2) | |
95ba8273 | 832 | #define HF_NMI_MASK (1 << 3) |
44c11430 | 833 | #define HF_IRET_MASK (1 << 4) |
ec9e60b2 | 834 | #define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */ |
1371d904 | 835 | |
4ecac3fd AK |
836 | /* |
837 | * Hardware virtualization extension instructions may fault if a | |
838 | * reboot turns off virtualization while processes are running. | |
839 | * Trap the fault and ignore the instruction if that happens. | |
840 | */ | |
b7c4145b AK |
841 | asmlinkage void kvm_spurious_fault(void); |
842 | extern bool kvm_rebooting; | |
4ecac3fd | 843 | |
5e520e62 | 844 | #define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \ |
4ecac3fd | 845 | "666: " insn "\n\t" \ |
b7c4145b | 846 | "668: \n\t" \ |
18b13e54 | 847 | ".pushsection .fixup, \"ax\" \n" \ |
4ecac3fd | 848 | "667: \n\t" \ |
5e520e62 | 849 | cleanup_insn "\n\t" \ |
b7c4145b AK |
850 | "cmpb $0, kvm_rebooting \n\t" \ |
851 | "jne 668b \n\t" \ | |
8ceed347 | 852 | __ASM_SIZE(push) " $666b \n\t" \ |
b7c4145b | 853 | "call kvm_spurious_fault \n\t" \ |
4ecac3fd AK |
854 | ".popsection \n\t" \ |
855 | ".pushsection __ex_table, \"a\" \n\t" \ | |
8ceed347 | 856 | _ASM_PTR " 666b, 667b \n\t" \ |
4ecac3fd AK |
857 | ".popsection" |
858 | ||
5e520e62 AK |
859 | #define __kvm_handle_fault_on_reboot(insn) \ |
860 | ____kvm_handle_fault_on_reboot(insn, "") | |
861 | ||
e930bffe AA |
862 | #define KVM_ARCH_WANT_MMU_NOTIFIER |
863 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | |
864 | int kvm_age_hva(struct kvm *kvm, unsigned long hva); | |
8ee53820 | 865 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); |
3da0dd43 | 866 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); |
82725b20 | 867 | int cpuid_maxphyaddr(struct kvm_vcpu *vcpu); |
a1b37100 GN |
868 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); |
869 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); | |
0b71785d | 870 | int kvm_cpu_get_interrupt(struct kvm_vcpu *v); |
e930bffe | 871 | |
18863bdd | 872 | void kvm_define_shared_msr(unsigned index, u32 msr); |
d5696725 | 873 | void kvm_set_shared_msr(unsigned index, u64 val, u64 mask); |
18863bdd | 874 | |
f92653ee JK |
875 | bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); |
876 | ||
af585b92 GN |
877 | void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, |
878 | struct kvm_async_pf *work); | |
879 | void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, | |
880 | struct kvm_async_pf *work); | |
56028d08 GN |
881 | void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, |
882 | struct kvm_async_pf *work); | |
7c90705b | 883 | bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu); |
af585b92 GN |
884 | extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); |
885 | ||
db8fcefa AP |
886 | void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); |
887 | ||
1965aae3 | 888 | #endif /* _ASM_X86_KVM_HOST_H */ |