]>
Commit | Line | Data |
---|---|---|
6aa8b732 AK |
1 | #ifndef __KVM_H |
2 | #define __KVM_H | |
3 | ||
4 | /* | |
5 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
6 | * the COPYING file in the top-level directory. | |
7 | */ | |
8 | ||
9 | #include <linux/types.h> | |
10 | #include <linux/list.h> | |
11 | #include <linux/mutex.h> | |
12 | #include <linux/spinlock.h> | |
13 | #include <linux/mm.h> | |
14 | ||
15 | #include "vmx.h" | |
16 | #include <linux/kvm.h> | |
17 | ||
18 | #define CR0_PE_MASK (1ULL << 0) | |
19 | #define CR0_TS_MASK (1ULL << 3) | |
20 | #define CR0_NE_MASK (1ULL << 5) | |
21 | #define CR0_WP_MASK (1ULL << 16) | |
22 | #define CR0_NW_MASK (1ULL << 29) | |
23 | #define CR0_CD_MASK (1ULL << 30) | |
24 | #define CR0_PG_MASK (1ULL << 31) | |
25 | ||
26 | #define CR3_WPT_MASK (1ULL << 3) | |
27 | #define CR3_PCD_MASK (1ULL << 4) | |
28 | ||
29 | #define CR3_RESEVED_BITS 0x07ULL | |
30 | #define CR3_L_MODE_RESEVED_BITS (~((1ULL << 40) - 1) | 0x0fe7ULL) | |
31 | #define CR3_FLAGS_MASK ((1ULL << 5) - 1) | |
32 | ||
33 | #define CR4_VME_MASK (1ULL << 0) | |
34 | #define CR4_PSE_MASK (1ULL << 4) | |
35 | #define CR4_PAE_MASK (1ULL << 5) | |
36 | #define CR4_PGE_MASK (1ULL << 7) | |
37 | #define CR4_VMXE_MASK (1ULL << 13) | |
38 | ||
39 | #define KVM_GUEST_CR0_MASK \ | |
40 | (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK \ | |
41 | | CR0_NW_MASK | CR0_CD_MASK) | |
42 | #define KVM_VM_CR0_ALWAYS_ON \ | |
43 | (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK) | |
44 | #define KVM_GUEST_CR4_MASK \ | |
45 | (CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK) | |
46 | #define KVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK) | |
47 | #define KVM_RMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK | CR4_VME_MASK) | |
48 | ||
49 | #define INVALID_PAGE (~(hpa_t)0) | |
50 | #define UNMAPPED_GVA (~(gpa_t)0) | |
51 | ||
52 | #define KVM_MAX_VCPUS 1 | |
53 | #define KVM_MEMORY_SLOTS 4 | |
54 | #define KVM_NUM_MMU_PAGES 256 | |
ebeace86 AK |
55 | #define KVM_MIN_FREE_MMU_PAGES 5 |
56 | #define KVM_REFILL_PAGES 25 | |
6aa8b732 AK |
57 | |
58 | #define FX_IMAGE_SIZE 512 | |
59 | #define FX_IMAGE_ALIGN 16 | |
60 | #define FX_BUF_SIZE (2 * FX_IMAGE_SIZE + FX_IMAGE_ALIGN) | |
61 | ||
62 | #define DE_VECTOR 0 | |
63 | #define DF_VECTOR 8 | |
64 | #define TS_VECTOR 10 | |
65 | #define NP_VECTOR 11 | |
66 | #define SS_VECTOR 12 | |
67 | #define GP_VECTOR 13 | |
68 | #define PF_VECTOR 14 | |
69 | ||
70 | #define SELECTOR_TI_MASK (1 << 2) | |
71 | #define SELECTOR_RPL_MASK 0x03 | |
72 | ||
73 | #define IOPL_SHIFT 12 | |
74 | ||
75 | /* | |
76 | * Address types: | |
77 | * | |
78 | * gva - guest virtual address | |
79 | * gpa - guest physical address | |
80 | * gfn - guest frame number | |
81 | * hva - host virtual address | |
82 | * hpa - host physical address | |
83 | * hfn - host frame number | |
84 | */ | |
85 | ||
86 | typedef unsigned long gva_t; | |
87 | typedef u64 gpa_t; | |
88 | typedef unsigned long gfn_t; | |
89 | ||
90 | typedef unsigned long hva_t; | |
91 | typedef u64 hpa_t; | |
92 | typedef unsigned long hfn_t; | |
93 | ||
cea0f0e7 AK |
94 | #define NR_PTE_CHAIN_ENTRIES 5 |
95 | ||
96 | struct kvm_pte_chain { | |
97 | u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES]; | |
98 | struct hlist_node link; | |
99 | }; | |
100 | ||
101 | /* | |
102 | * kvm_mmu_page_role, below, is defined as: | |
103 | * | |
104 | * bits 0:3 - total guest paging levels (2-4, or zero for real mode) | |
105 | * bits 4:7 - page table level for this shadow (1-4) | |
106 | * bits 8:9 - page table quadrant for 2-level guests | |
107 | * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode) | |
108 | */ | |
109 | union kvm_mmu_page_role { | |
110 | unsigned word; | |
111 | struct { | |
112 | unsigned glevels : 4; | |
113 | unsigned level : 4; | |
114 | unsigned quadrant : 2; | |
115 | unsigned pad_for_nice_hex_output : 6; | |
116 | unsigned metaphysical : 1; | |
117 | }; | |
118 | }; | |
119 | ||
6aa8b732 AK |
120 | struct kvm_mmu_page { |
121 | struct list_head link; | |
cea0f0e7 AK |
122 | struct hlist_node hash_link; |
123 | ||
124 | /* | |
125 | * The following two entries are used to key the shadow page in the | |
126 | * hash table. | |
127 | */ | |
128 | gfn_t gfn; | |
129 | union kvm_mmu_page_role role; | |
130 | ||
6aa8b732 AK |
131 | hpa_t page_hpa; |
132 | unsigned long slot_bitmap; /* One bit set per slot which has memory | |
133 | * in this shadow page. | |
134 | */ | |
135 | int global; /* Set if all ptes in this page are global */ | |
cea0f0e7 | 136 | int multimapped; /* More than one parent_pte? */ |
3bb65a22 | 137 | int root_count; /* Currently serving as active root */ |
cea0f0e7 AK |
138 | union { |
139 | u64 *parent_pte; /* !multimapped */ | |
140 | struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */ | |
141 | }; | |
6aa8b732 AK |
142 | }; |
143 | ||
144 | struct vmcs { | |
145 | u32 revision_id; | |
146 | u32 abort; | |
147 | char data[0]; | |
148 | }; | |
149 | ||
150 | #define vmx_msr_entry kvm_msr_entry | |
151 | ||
152 | struct kvm_vcpu; | |
153 | ||
154 | /* | |
155 | * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level | |
156 | * 32-bit). The kvm_mmu structure abstracts the details of the current mmu | |
157 | * mode. | |
158 | */ | |
159 | struct kvm_mmu { | |
160 | void (*new_cr3)(struct kvm_vcpu *vcpu); | |
161 | int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); | |
6aa8b732 AK |
162 | void (*free)(struct kvm_vcpu *vcpu); |
163 | gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva); | |
164 | hpa_t root_hpa; | |
165 | int root_level; | |
166 | int shadow_root_level; | |
17ac10ad AK |
167 | |
168 | u64 *pae_root; | |
6aa8b732 AK |
169 | }; |
170 | ||
714b93da AK |
171 | #define KVM_NR_MEM_OBJS 20 |
172 | ||
173 | struct kvm_mmu_memory_cache { | |
174 | int nobjs; | |
175 | void *objects[KVM_NR_MEM_OBJS]; | |
176 | }; | |
177 | ||
178 | /* | |
179 | * We don't want allocation failures within the mmu code, so we preallocate | |
180 | * enough memory for a single page fault in a cache. | |
181 | */ | |
6aa8b732 AK |
182 | struct kvm_guest_debug { |
183 | int enabled; | |
184 | unsigned long bp[4]; | |
185 | int singlestep; | |
186 | }; | |
187 | ||
188 | enum { | |
189 | VCPU_REGS_RAX = 0, | |
190 | VCPU_REGS_RCX = 1, | |
191 | VCPU_REGS_RDX = 2, | |
192 | VCPU_REGS_RBX = 3, | |
193 | VCPU_REGS_RSP = 4, | |
194 | VCPU_REGS_RBP = 5, | |
195 | VCPU_REGS_RSI = 6, | |
196 | VCPU_REGS_RDI = 7, | |
05b3e0c2 | 197 | #ifdef CONFIG_X86_64 |
6aa8b732 AK |
198 | VCPU_REGS_R8 = 8, |
199 | VCPU_REGS_R9 = 9, | |
200 | VCPU_REGS_R10 = 10, | |
201 | VCPU_REGS_R11 = 11, | |
202 | VCPU_REGS_R12 = 12, | |
203 | VCPU_REGS_R13 = 13, | |
204 | VCPU_REGS_R14 = 14, | |
205 | VCPU_REGS_R15 = 15, | |
206 | #endif | |
207 | NR_VCPU_REGS | |
208 | }; | |
209 | ||
210 | enum { | |
211 | VCPU_SREG_CS, | |
212 | VCPU_SREG_DS, | |
213 | VCPU_SREG_ES, | |
214 | VCPU_SREG_FS, | |
215 | VCPU_SREG_GS, | |
216 | VCPU_SREG_SS, | |
217 | VCPU_SREG_TR, | |
218 | VCPU_SREG_LDTR, | |
219 | }; | |
220 | ||
221 | struct kvm_vcpu { | |
222 | struct kvm *kvm; | |
223 | union { | |
224 | struct vmcs *vmcs; | |
225 | struct vcpu_svm *svm; | |
226 | }; | |
227 | struct mutex mutex; | |
228 | int cpu; | |
229 | int launched; | |
c1150d8c | 230 | int interrupt_window_open; |
6aa8b732 AK |
231 | unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ |
232 | #define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long) | |
233 | unsigned long irq_pending[NR_IRQ_WORDS]; | |
234 | unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */ | |
235 | unsigned long rip; /* needs vcpu_load_rsp_rip() */ | |
236 | ||
237 | unsigned long cr0; | |
238 | unsigned long cr2; | |
239 | unsigned long cr3; | |
240 | unsigned long cr4; | |
241 | unsigned long cr8; | |
1342d353 | 242 | u64 pdptrs[4]; /* pae */ |
6aa8b732 AK |
243 | u64 shadow_efer; |
244 | u64 apic_base; | |
6f00e68f | 245 | u64 ia32_misc_enable_msr; |
6aa8b732 AK |
246 | int nmsrs; |
247 | struct vmx_msr_entry *guest_msrs; | |
248 | struct vmx_msr_entry *host_msrs; | |
249 | ||
250 | struct list_head free_pages; | |
251 | struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES]; | |
252 | struct kvm_mmu mmu; | |
253 | ||
714b93da AK |
254 | struct kvm_mmu_memory_cache mmu_pte_chain_cache; |
255 | struct kvm_mmu_memory_cache mmu_rmap_desc_cache; | |
256 | ||
86a5ba02 AK |
257 | gfn_t last_pt_write_gfn; |
258 | int last_pt_write_count; | |
259 | ||
6aa8b732 AK |
260 | struct kvm_guest_debug guest_debug; |
261 | ||
262 | char fx_buf[FX_BUF_SIZE]; | |
263 | char *host_fx_image; | |
264 | char *guest_fx_image; | |
265 | ||
266 | int mmio_needed; | |
267 | int mmio_read_completed; | |
268 | int mmio_is_write; | |
269 | int mmio_size; | |
270 | unsigned char mmio_data[8]; | |
271 | gpa_t mmio_phys_addr; | |
272 | ||
273 | struct { | |
274 | int active; | |
275 | u8 save_iopl; | |
276 | struct kvm_save_segment { | |
277 | u16 selector; | |
278 | unsigned long base; | |
279 | u32 limit; | |
280 | u32 ar; | |
281 | } tr, es, ds, fs, gs; | |
282 | } rmode; | |
283 | }; | |
284 | ||
285 | struct kvm_memory_slot { | |
286 | gfn_t base_gfn; | |
287 | unsigned long npages; | |
288 | unsigned long flags; | |
289 | struct page **phys_mem; | |
290 | unsigned long *dirty_bitmap; | |
291 | }; | |
292 | ||
293 | struct kvm { | |
294 | spinlock_t lock; /* protects everything except vcpus */ | |
295 | int nmemslots; | |
296 | struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS]; | |
cea0f0e7 AK |
297 | /* |
298 | * Hash table of struct kvm_mmu_page. | |
299 | */ | |
6aa8b732 | 300 | struct list_head active_mmu_pages; |
ebeace86 | 301 | int n_free_mmu_pages; |
cea0f0e7 | 302 | struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; |
6aa8b732 AK |
303 | struct kvm_vcpu vcpus[KVM_MAX_VCPUS]; |
304 | int memory_config_version; | |
305 | int busy; | |
cd4a4e53 | 306 | unsigned long rmap_overflow; |
133de902 | 307 | struct list_head vm_list; |
6aa8b732 AK |
308 | }; |
309 | ||
310 | struct kvm_stat { | |
311 | u32 pf_fixed; | |
312 | u32 pf_guest; | |
313 | u32 tlb_flush; | |
314 | u32 invlpg; | |
315 | ||
316 | u32 exits; | |
317 | u32 io_exits; | |
318 | u32 mmio_exits; | |
319 | u32 signal_exits; | |
c1150d8c DL |
320 | u32 irq_window_exits; |
321 | u32 halt_exits; | |
322 | u32 request_irq_exits; | |
6aa8b732 AK |
323 | u32 irq_exits; |
324 | }; | |
325 | ||
326 | struct descriptor_table { | |
327 | u16 limit; | |
328 | unsigned long base; | |
329 | } __attribute__((packed)); | |
330 | ||
331 | struct kvm_arch_ops { | |
332 | int (*cpu_has_kvm_support)(void); /* __init */ | |
333 | int (*disabled_by_bios)(void); /* __init */ | |
334 | void (*hardware_enable)(void *dummy); /* __init */ | |
335 | void (*hardware_disable)(void *dummy); | |
336 | int (*hardware_setup)(void); /* __init */ | |
337 | void (*hardware_unsetup)(void); /* __exit */ | |
338 | ||
339 | int (*vcpu_create)(struct kvm_vcpu *vcpu); | |
340 | void (*vcpu_free)(struct kvm_vcpu *vcpu); | |
341 | ||
342 | struct kvm_vcpu *(*vcpu_load)(struct kvm_vcpu *vcpu); | |
343 | void (*vcpu_put)(struct kvm_vcpu *vcpu); | |
344 | ||
345 | int (*set_guest_debug)(struct kvm_vcpu *vcpu, | |
346 | struct kvm_debug_guest *dbg); | |
347 | int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); | |
348 | int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); | |
349 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); | |
350 | void (*get_segment)(struct kvm_vcpu *vcpu, | |
351 | struct kvm_segment *var, int seg); | |
352 | void (*set_segment)(struct kvm_vcpu *vcpu, | |
353 | struct kvm_segment *var, int seg); | |
6aa8b732 | 354 | void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); |
399badf3 | 355 | void (*decache_cr0_cr4_guest_bits)(struct kvm_vcpu *vcpu); |
6aa8b732 AK |
356 | void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); |
357 | void (*set_cr0_no_modeswitch)(struct kvm_vcpu *vcpu, | |
358 | unsigned long cr0); | |
359 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); | |
360 | void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); | |
361 | void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); | |
362 | void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | |
363 | void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | |
364 | void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | |
365 | void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | |
366 | unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr); | |
367 | void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value, | |
368 | int *exception); | |
369 | void (*cache_regs)(struct kvm_vcpu *vcpu); | |
370 | void (*decache_regs)(struct kvm_vcpu *vcpu); | |
371 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); | |
372 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); | |
373 | ||
374 | void (*invlpg)(struct kvm_vcpu *vcpu, gva_t addr); | |
375 | void (*tlb_flush)(struct kvm_vcpu *vcpu); | |
376 | void (*inject_page_fault)(struct kvm_vcpu *vcpu, | |
377 | unsigned long addr, u32 err_code); | |
378 | ||
379 | void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code); | |
380 | ||
381 | int (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run); | |
382 | int (*vcpu_setup)(struct kvm_vcpu *vcpu); | |
383 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); | |
384 | }; | |
385 | ||
386 | extern struct kvm_stat kvm_stat; | |
387 | extern struct kvm_arch_ops *kvm_arch_ops; | |
388 | ||
389 | #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt) | |
390 | #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt) | |
391 | ||
392 | int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module); | |
393 | void kvm_exit_arch(void); | |
394 | ||
395 | void kvm_mmu_destroy(struct kvm_vcpu *vcpu); | |
8018c27b IM |
396 | int kvm_mmu_create(struct kvm_vcpu *vcpu); |
397 | int kvm_mmu_setup(struct kvm_vcpu *vcpu); | |
6aa8b732 AK |
398 | |
399 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); | |
714b93da | 400 | void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot); |
6aa8b732 AK |
401 | |
402 | hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa); | |
403 | #define HPA_MSB ((sizeof(hpa_t) * 8) - 1) | |
404 | #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) | |
405 | static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } | |
406 | hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva); | |
407 | ||
408 | void kvm_emulator_want_group7_invlpg(void); | |
409 | ||
410 | extern hpa_t bad_page_address; | |
411 | ||
412 | static inline struct page *gfn_to_page(struct kvm_memory_slot *slot, gfn_t gfn) | |
413 | { | |
414 | return slot->phys_mem[gfn - slot->base_gfn]; | |
415 | } | |
416 | ||
417 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); | |
418 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn); | |
419 | ||
420 | enum emulation_result { | |
421 | EMULATE_DONE, /* no further processing */ | |
422 | EMULATE_DO_MMIO, /* kvm_run filled with mmio request */ | |
423 | EMULATE_FAIL, /* can't emulate this instruction */ | |
424 | }; | |
425 | ||
426 | int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run, | |
427 | unsigned long cr2, u16 error_code); | |
428 | void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); | |
429 | void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); | |
430 | void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw, | |
431 | unsigned long *rflags); | |
432 | ||
433 | unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr); | |
434 | void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value, | |
435 | unsigned long *rflags); | |
436 | ||
437 | struct x86_emulate_ctxt; | |
438 | ||
439 | int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address); | |
440 | int emulate_clts(struct kvm_vcpu *vcpu); | |
441 | int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, | |
442 | unsigned long *dest); | |
443 | int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, | |
444 | unsigned long value); | |
445 | ||
446 | void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); | |
447 | void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0); | |
448 | void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0); | |
449 | void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0); | |
450 | void lmsw(struct kvm_vcpu *vcpu, unsigned long msw); | |
451 | ||
3bab1f5d AK |
452 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); |
453 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); | |
6aa8b732 AK |
454 | |
455 | void fx_init(struct kvm_vcpu *vcpu); | |
456 | ||
457 | void load_msrs(struct vmx_msr_entry *e, int n); | |
458 | void save_msrs(struct vmx_msr_entry *e, int n); | |
459 | void kvm_resched(struct kvm_vcpu *vcpu); | |
460 | ||
461 | int kvm_read_guest(struct kvm_vcpu *vcpu, | |
462 | gva_t addr, | |
463 | unsigned long size, | |
464 | void *dest); | |
465 | ||
466 | int kvm_write_guest(struct kvm_vcpu *vcpu, | |
467 | gva_t addr, | |
468 | unsigned long size, | |
469 | void *data); | |
470 | ||
471 | unsigned long segment_base(u16 selector); | |
472 | ||
da4a00f0 AK |
473 | void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); |
474 | void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); | |
a436036b | 475 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); |
ebeace86 AK |
476 | void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); |
477 | ||
478 | static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, | |
479 | u32 error_code) | |
480 | { | |
481 | if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES)) | |
482 | kvm_mmu_free_some_pages(vcpu); | |
483 | return vcpu->mmu.page_fault(vcpu, gva, error_code); | |
484 | } | |
da4a00f0 | 485 | |
6aa8b732 AK |
486 | static inline struct page *_gfn_to_page(struct kvm *kvm, gfn_t gfn) |
487 | { | |
488 | struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); | |
489 | return (slot) ? slot->phys_mem[gfn - slot->base_gfn] : NULL; | |
490 | } | |
491 | ||
a9058ecd AK |
492 | static inline int is_long_mode(struct kvm_vcpu *vcpu) |
493 | { | |
494 | #ifdef CONFIG_X86_64 | |
495 | return vcpu->shadow_efer & EFER_LME; | |
496 | #else | |
497 | return 0; | |
498 | #endif | |
499 | } | |
500 | ||
6aa8b732 AK |
501 | static inline int is_pae(struct kvm_vcpu *vcpu) |
502 | { | |
503 | return vcpu->cr4 & CR4_PAE_MASK; | |
504 | } | |
505 | ||
506 | static inline int is_pse(struct kvm_vcpu *vcpu) | |
507 | { | |
508 | return vcpu->cr4 & CR4_PSE_MASK; | |
509 | } | |
510 | ||
511 | static inline int is_paging(struct kvm_vcpu *vcpu) | |
512 | { | |
513 | return vcpu->cr0 & CR0_PG_MASK; | |
514 | } | |
515 | ||
516 | static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot) | |
517 | { | |
518 | return slot - kvm->memslots; | |
519 | } | |
520 | ||
521 | static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) | |
522 | { | |
523 | struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); | |
524 | ||
525 | return (struct kvm_mmu_page *)page->private; | |
526 | } | |
527 | ||
528 | static inline u16 read_fs(void) | |
529 | { | |
530 | u16 seg; | |
531 | asm ("mov %%fs, %0" : "=g"(seg)); | |
532 | return seg; | |
533 | } | |
534 | ||
535 | static inline u16 read_gs(void) | |
536 | { | |
537 | u16 seg; | |
538 | asm ("mov %%gs, %0" : "=g"(seg)); | |
539 | return seg; | |
540 | } | |
541 | ||
542 | static inline u16 read_ldt(void) | |
543 | { | |
544 | u16 ldt; | |
545 | asm ("sldt %0" : "=g"(ldt)); | |
546 | return ldt; | |
547 | } | |
548 | ||
549 | static inline void load_fs(u16 sel) | |
550 | { | |
551 | asm ("mov %0, %%fs" : : "rm"(sel)); | |
552 | } | |
553 | ||
554 | static inline void load_gs(u16 sel) | |
555 | { | |
556 | asm ("mov %0, %%gs" : : "rm"(sel)); | |
557 | } | |
558 | ||
559 | #ifndef load_ldt | |
560 | static inline void load_ldt(u16 sel) | |
561 | { | |
a0610ddf | 562 | asm ("lldt %0" : : "rm"(sel)); |
6aa8b732 AK |
563 | } |
564 | #endif | |
565 | ||
566 | static inline void get_idt(struct descriptor_table *table) | |
567 | { | |
568 | asm ("sidt %0" : "=m"(*table)); | |
569 | } | |
570 | ||
571 | static inline void get_gdt(struct descriptor_table *table) | |
572 | { | |
573 | asm ("sgdt %0" : "=m"(*table)); | |
574 | } | |
575 | ||
576 | static inline unsigned long read_tr_base(void) | |
577 | { | |
578 | u16 tr; | |
579 | asm ("str %0" : "=g"(tr)); | |
580 | return segment_base(tr); | |
581 | } | |
582 | ||
05b3e0c2 | 583 | #ifdef CONFIG_X86_64 |
6aa8b732 AK |
584 | static inline unsigned long read_msr(unsigned long msr) |
585 | { | |
586 | u64 value; | |
587 | ||
588 | rdmsrl(msr, value); | |
589 | return value; | |
590 | } | |
591 | #endif | |
592 | ||
593 | static inline void fx_save(void *image) | |
594 | { | |
595 | asm ("fxsave (%0)":: "r" (image)); | |
596 | } | |
597 | ||
598 | static inline void fx_restore(void *image) | |
599 | { | |
600 | asm ("fxrstor (%0)":: "r" (image)); | |
601 | } | |
602 | ||
603 | static inline void fpu_init(void) | |
604 | { | |
605 | asm ("finit"); | |
606 | } | |
607 | ||
608 | static inline u32 get_rdx_init_val(void) | |
609 | { | |
610 | return 0x600; /* P6 family */ | |
611 | } | |
612 | ||
613 | #define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30" | |
614 | #define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2" | |
615 | #define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3" | |
616 | #define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30" | |
617 | #define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0" | |
618 | #define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0" | |
619 | #define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4" | |
620 | #define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4" | |
621 | #define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30" | |
622 | ||
623 | #define MSR_IA32_TIME_STAMP_COUNTER 0x010 | |
624 | ||
625 | #define TSS_IOPB_BASE_OFFSET 0x66 | |
626 | #define TSS_BASE_SIZE 0x68 | |
627 | #define TSS_IOPB_SIZE (65536 / 8) | |
628 | #define TSS_REDIRECTION_SIZE (256 / 8) | |
629 | #define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) | |
630 | ||
6aa8b732 | 631 | #endif |