]>
Commit | Line | Data |
---|---|---|
af585b92 GN |
1 | /* |
2 | * kvm asynchronous fault support | |
3 | * | |
4 | * Copyright 2010 Red Hat, Inc. | |
5 | * | |
6 | * Author: | |
7 | * Gleb Natapov <[email protected]> | |
8 | * | |
9 | * This file is free software; you can redistribute it and/or modify | |
10 | * it under the terms of version 2 of the GNU General Public License | |
11 | * as published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software Foundation, | |
20 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. | |
21 | */ | |
22 | ||
23 | #include <linux/kvm_host.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/module.h> | |
26 | #include <linux/mmu_context.h> | |
27 | ||
28 | #include "async_pf.h" | |
29 | #include <trace/events/kvm.h> | |
30 | ||
e0ead41a DD |
31 | static inline void kvm_async_page_present_sync(struct kvm_vcpu *vcpu, |
32 | struct kvm_async_pf *work) | |
33 | { | |
34 | #ifdef CONFIG_KVM_ASYNC_PF_SYNC | |
35 | kvm_arch_async_page_present(vcpu, work); | |
36 | #endif | |
37 | } | |
38 | static inline void kvm_async_page_present_async(struct kvm_vcpu *vcpu, | |
39 | struct kvm_async_pf *work) | |
40 | { | |
41 | #ifndef CONFIG_KVM_ASYNC_PF_SYNC | |
42 | kvm_arch_async_page_present(vcpu, work); | |
43 | #endif | |
44 | } | |
45 | ||
af585b92 GN |
46 | static struct kmem_cache *async_pf_cache; |
47 | ||
48 | int kvm_async_pf_init(void) | |
49 | { | |
50 | async_pf_cache = KMEM_CACHE(kvm_async_pf, 0); | |
51 | ||
52 | if (!async_pf_cache) | |
53 | return -ENOMEM; | |
54 | ||
55 | return 0; | |
56 | } | |
57 | ||
58 | void kvm_async_pf_deinit(void) | |
59 | { | |
4f52696a | 60 | kmem_cache_destroy(async_pf_cache); |
af585b92 GN |
61 | async_pf_cache = NULL; |
62 | } | |
63 | ||
64 | void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu) | |
65 | { | |
66 | INIT_LIST_HEAD(&vcpu->async_pf.done); | |
67 | INIT_LIST_HEAD(&vcpu->async_pf.queue); | |
68 | spin_lock_init(&vcpu->async_pf.lock); | |
69 | } | |
70 | ||
71 | static void async_pf_execute(struct work_struct *work) | |
72 | { | |
af585b92 GN |
73 | struct kvm_async_pf *apf = |
74 | container_of(work, struct kvm_async_pf, work); | |
75 | struct mm_struct *mm = apf->mm; | |
76 | struct kvm_vcpu *vcpu = apf->vcpu; | |
77 | unsigned long addr = apf->addr; | |
78 | gva_t gva = apf->gva; | |
79 | ||
80 | might_sleep(); | |
81 | ||
1e987790 DH |
82 | /* |
83 | * This work is run asynchromously to the task which owns | |
84 | * mm and might be done in another context, so we must | |
85 | * use FOLL_REMOTE. | |
86 | */ | |
87 | __get_user_pages_unlocked(NULL, mm, addr, 1, 1, 0, NULL, FOLL_REMOTE); | |
88 | ||
e0ead41a | 89 | kvm_async_page_present_sync(vcpu, apf); |
af585b92 GN |
90 | |
91 | spin_lock(&vcpu->async_pf.lock); | |
92 | list_add_tail(&apf->link, &vcpu->async_pf.done); | |
af585b92 GN |
93 | spin_unlock(&vcpu->async_pf.lock); |
94 | ||
95 | /* | |
96 | * apf may be freed by kvm_check_async_pf_completion() after | |
97 | * this point | |
98 | */ | |
99 | ||
f2e10669 | 100 | trace_kvm_async_pf_completed(addr, gva); |
af585b92 | 101 | |
6003a420 KT |
102 | /* |
103 | * This memory barrier pairs with prepare_to_wait's set_current_state() | |
104 | */ | |
105 | smp_mb(); | |
8577370f MT |
106 | if (swait_active(&vcpu->wq)) |
107 | swake_up(&vcpu->wq); | |
af585b92 | 108 | |
41c22f62 | 109 | mmput(mm); |
af585b92 GN |
110 | kvm_put_kvm(vcpu->kvm); |
111 | } | |
112 | ||
113 | void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) | |
114 | { | |
115 | /* cancel outstanding work queue item */ | |
116 | while (!list_empty(&vcpu->async_pf.queue)) { | |
117 | struct kvm_async_pf *work = | |
433da860 GT |
118 | list_first_entry(&vcpu->async_pf.queue, |
119 | typeof(*work), queue); | |
af585b92 | 120 | list_del(&work->queue); |
9f2ceda4 DD |
121 | |
122 | #ifdef CONFIG_KVM_ASYNC_PF_SYNC | |
123 | flush_work(&work->work); | |
124 | #else | |
98fda169 | 125 | if (cancel_work_sync(&work->work)) { |
41c22f62 | 126 | mmput(work->mm); |
28b441e2 | 127 | kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */ |
af585b92 | 128 | kmem_cache_free(async_pf_cache, work); |
28b441e2 | 129 | } |
9f2ceda4 | 130 | #endif |
af585b92 GN |
131 | } |
132 | ||
133 | spin_lock(&vcpu->async_pf.lock); | |
134 | while (!list_empty(&vcpu->async_pf.done)) { | |
135 | struct kvm_async_pf *work = | |
433da860 GT |
136 | list_first_entry(&vcpu->async_pf.done, |
137 | typeof(*work), link); | |
af585b92 | 138 | list_del(&work->link); |
af585b92 GN |
139 | kmem_cache_free(async_pf_cache, work); |
140 | } | |
141 | spin_unlock(&vcpu->async_pf.lock); | |
142 | ||
143 | vcpu->async_pf.queued = 0; | |
144 | } | |
145 | ||
146 | void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu) | |
147 | { | |
148 | struct kvm_async_pf *work; | |
149 | ||
15096ffc XG |
150 | while (!list_empty_careful(&vcpu->async_pf.done) && |
151 | kvm_arch_can_inject_async_page_present(vcpu)) { | |
152 | spin_lock(&vcpu->async_pf.lock); | |
153 | work = list_first_entry(&vcpu->async_pf.done, typeof(*work), | |
154 | link); | |
155 | list_del(&work->link); | |
156 | spin_unlock(&vcpu->async_pf.lock); | |
af585b92 | 157 | |
f2e10669 | 158 | kvm_arch_async_page_ready(vcpu, work); |
1179ba53 | 159 | kvm_async_page_present_async(vcpu, work); |
af585b92 | 160 | |
15096ffc XG |
161 | list_del(&work->queue); |
162 | vcpu->async_pf.queued--; | |
15096ffc XG |
163 | kmem_cache_free(async_pf_cache, work); |
164 | } | |
af585b92 GN |
165 | } |
166 | ||
e0ead41a | 167 | int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, |
af585b92 GN |
168 | struct kvm_arch_async_pf *arch) |
169 | { | |
170 | struct kvm_async_pf *work; | |
171 | ||
172 | if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU) | |
173 | return 0; | |
174 | ||
175 | /* setup delayed work */ | |
176 | ||
177 | /* | |
178 | * do alloc nowait since if we are going to sleep anyway we | |
179 | * may as well sleep faulting in page | |
180 | */ | |
d7444794 | 181 | work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN); |
af585b92 GN |
182 | if (!work) |
183 | return 0; | |
184 | ||
f2e10669 | 185 | work->wakeup_all = false; |
af585b92 GN |
186 | work->vcpu = vcpu; |
187 | work->gva = gva; | |
e0ead41a | 188 | work->addr = hva; |
af585b92 GN |
189 | work->arch = *arch; |
190 | work->mm = current->mm; | |
41c22f62 | 191 | atomic_inc(&work->mm->mm_users); |
af585b92 GN |
192 | kvm_get_kvm(work->vcpu->kvm); |
193 | ||
194 | /* this can't really happen otherwise gfn_to_pfn_async | |
195 | would succeed */ | |
196 | if (unlikely(kvm_is_error_hva(work->addr))) | |
197 | goto retry_sync; | |
198 | ||
199 | INIT_WORK(&work->work, async_pf_execute); | |
200 | if (!schedule_work(&work->work)) | |
201 | goto retry_sync; | |
202 | ||
203 | list_add_tail(&work->queue, &vcpu->async_pf.queue); | |
204 | vcpu->async_pf.queued++; | |
205 | kvm_arch_async_page_not_present(vcpu, work); | |
206 | return 1; | |
207 | retry_sync: | |
208 | kvm_put_kvm(work->vcpu->kvm); | |
41c22f62 | 209 | mmput(work->mm); |
af585b92 GN |
210 | kmem_cache_free(async_pf_cache, work); |
211 | return 0; | |
212 | } | |
344d9588 GN |
213 | |
214 | int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu) | |
215 | { | |
216 | struct kvm_async_pf *work; | |
217 | ||
64f638c7 | 218 | if (!list_empty_careful(&vcpu->async_pf.done)) |
344d9588 GN |
219 | return 0; |
220 | ||
221 | work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC); | |
222 | if (!work) | |
223 | return -ENOMEM; | |
224 | ||
f2e10669 | 225 | work->wakeup_all = true; |
344d9588 GN |
226 | INIT_LIST_HEAD(&work->queue); /* for list_del to work */ |
227 | ||
64f638c7 | 228 | spin_lock(&vcpu->async_pf.lock); |
344d9588 | 229 | list_add_tail(&work->link, &vcpu->async_pf.done); |
64f638c7 XG |
230 | spin_unlock(&vcpu->async_pf.lock); |
231 | ||
344d9588 GN |
232 | vcpu->async_pf.queued++; |
233 | return 0; | |
234 | } |