]>
Commit | Line | Data |
---|---|---|
5fdbf976 MT |
1 | #ifndef ASM_KVM_CACHE_REGS_H |
2 | #define ASM_KVM_CACHE_REGS_H | |
3 | ||
8ae09912 AK |
4 | #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS |
5 | #define KVM_POSSIBLE_CR4_GUEST_BITS \ | |
6 | (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ | |
7 | | X86_CR4_OSXMMEXCPT | X86_CR4_PGE) | |
8 | ||
5fdbf976 MT |
9 | static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, |
10 | enum kvm_reg reg) | |
11 | { | |
12 | if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail)) | |
13 | kvm_x86_ops->cache_reg(vcpu, reg); | |
14 | ||
15 | return vcpu->arch.regs[reg]; | |
16 | } | |
17 | ||
18 | static inline void kvm_register_write(struct kvm_vcpu *vcpu, | |
19 | enum kvm_reg reg, | |
20 | unsigned long val) | |
21 | { | |
22 | vcpu->arch.regs[reg] = val; | |
23 | __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); | |
24 | __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); | |
25 | } | |
26 | ||
27 | static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu) | |
28 | { | |
29 | return kvm_register_read(vcpu, VCPU_REGS_RIP); | |
30 | } | |
31 | ||
32 | static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val) | |
33 | { | |
34 | kvm_register_write(vcpu, VCPU_REGS_RIP, val); | |
35 | } | |
36 | ||
6de4f3ad AK |
37 | static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) |
38 | { | |
08acfa18 AK |
39 | might_sleep(); /* on svm */ |
40 | ||
6de4f3ad AK |
41 | if (!test_bit(VCPU_EXREG_PDPTR, |
42 | (unsigned long *)&vcpu->arch.regs_avail)) | |
43 | kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR); | |
44 | ||
ff03a073 | 45 | return vcpu->arch.walk_mmu->pdptrs[index]; |
6de4f3ad AK |
46 | } |
47 | ||
4d4ec087 AK |
48 | static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) |
49 | { | |
8ae09912 AK |
50 | ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS; |
51 | if (tmask & vcpu->arch.cr0_guest_owned_bits) | |
e8467fda | 52 | kvm_x86_ops->decache_cr0_guest_bits(vcpu); |
4d4ec087 AK |
53 | return vcpu->arch.cr0 & mask; |
54 | } | |
55 | ||
56 | static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu) | |
57 | { | |
58 | return kvm_read_cr0_bits(vcpu, ~0UL); | |
59 | } | |
60 | ||
fc78f519 AK |
61 | static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) |
62 | { | |
8ae09912 AK |
63 | ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS; |
64 | if (tmask & vcpu->arch.cr4_guest_owned_bits) | |
fc78f519 AK |
65 | kvm_x86_ops->decache_cr4_guest_bits(vcpu); |
66 | return vcpu->arch.cr4 & mask; | |
67 | } | |
68 | ||
9f8fe504 AK |
69 | static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu) |
70 | { | |
aff48baa AK |
71 | if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) |
72 | kvm_x86_ops->decache_cr3(vcpu); | |
9f8fe504 AK |
73 | return vcpu->arch.cr3; |
74 | } | |
75 | ||
fc78f519 AK |
76 | static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu) |
77 | { | |
78 | return kvm_read_cr4_bits(vcpu, ~0UL); | |
79 | } | |
80 | ||
2acf923e DC |
81 | static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu) |
82 | { | |
83 | return (kvm_register_read(vcpu, VCPU_REGS_RAX) & -1u) | |
84 | | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32); | |
85 | } | |
86 | ||
ec9e60b2 JR |
87 | static inline void enter_guest_mode(struct kvm_vcpu *vcpu) |
88 | { | |
89 | vcpu->arch.hflags |= HF_GUEST_MASK; | |
90 | } | |
91 | ||
92 | static inline void leave_guest_mode(struct kvm_vcpu *vcpu) | |
93 | { | |
94 | vcpu->arch.hflags &= ~HF_GUEST_MASK; | |
95 | } | |
96 | ||
97 | static inline bool is_guest_mode(struct kvm_vcpu *vcpu) | |
98 | { | |
99 | return vcpu->arch.hflags & HF_GUEST_MASK; | |
100 | } | |
101 | ||
5fdbf976 | 102 | #endif |