]>
Commit | Line | Data |
---|---|---|
775c8a3d | 1 | // SPDX-License-Identifier: GPL-2.0-only |
af585b92 GN |
2 | /* |
3 | * kvm asynchronous fault support | |
4 | * | |
5 | * Copyright 2010 Red Hat, Inc. | |
6 | * | |
7 | * Author: | |
8 | * Gleb Natapov <[email protected]> | |
af585b92 GN |
9 | */ |
10 | ||
11 | #include <linux/kvm_host.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/mmu_context.h> | |
6e84f315 | 15 | #include <linux/sched/mm.h> |
af585b92 GN |
16 | |
17 | #include "async_pf.h" | |
18 | #include <trace/events/kvm.h> | |
19 | ||
e0ead41a DD |
20 | static inline void kvm_async_page_present_sync(struct kvm_vcpu *vcpu, |
21 | struct kvm_async_pf *work) | |
22 | { | |
23 | #ifdef CONFIG_KVM_ASYNC_PF_SYNC | |
24 | kvm_arch_async_page_present(vcpu, work); | |
25 | #endif | |
26 | } | |
27 | static inline void kvm_async_page_present_async(struct kvm_vcpu *vcpu, | |
28 | struct kvm_async_pf *work) | |
29 | { | |
30 | #ifndef CONFIG_KVM_ASYNC_PF_SYNC | |
31 | kvm_arch_async_page_present(vcpu, work); | |
32 | #endif | |
33 | } | |
34 | ||
af585b92 GN |
35 | static struct kmem_cache *async_pf_cache; |
36 | ||
37 | int kvm_async_pf_init(void) | |
38 | { | |
39 | async_pf_cache = KMEM_CACHE(kvm_async_pf, 0); | |
40 | ||
41 | if (!async_pf_cache) | |
42 | return -ENOMEM; | |
43 | ||
44 | return 0; | |
45 | } | |
46 | ||
47 | void kvm_async_pf_deinit(void) | |
48 | { | |
4f52696a | 49 | kmem_cache_destroy(async_pf_cache); |
af585b92 GN |
50 | async_pf_cache = NULL; |
51 | } | |
52 | ||
53 | void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu) | |
54 | { | |
55 | INIT_LIST_HEAD(&vcpu->async_pf.done); | |
56 | INIT_LIST_HEAD(&vcpu->async_pf.queue); | |
57 | spin_lock_init(&vcpu->async_pf.lock); | |
58 | } | |
59 | ||
60 | static void async_pf_execute(struct work_struct *work) | |
61 | { | |
af585b92 GN |
62 | struct kvm_async_pf *apf = |
63 | container_of(work, struct kvm_async_pf, work); | |
64 | struct mm_struct *mm = apf->mm; | |
65 | struct kvm_vcpu *vcpu = apf->vcpu; | |
66 | unsigned long addr = apf->addr; | |
736c291c | 67 | gpa_t cr2_or_gpa = apf->cr2_or_gpa; |
8b7457ef | 68 | int locked = 1; |
af585b92 GN |
69 | |
70 | might_sleep(); | |
71 | ||
1e987790 | 72 | /* |
bdd303cb | 73 | * This work is run asynchronously to the task which owns |
1e987790 | 74 | * mm and might be done in another context, so we must |
8b7457ef | 75 | * access remotely. |
1e987790 | 76 | */ |
8b7457ef LS |
77 | down_read(&mm->mmap_sem); |
78 | get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE, NULL, NULL, | |
79 | &locked); | |
80 | if (locked) | |
81 | up_read(&mm->mmap_sem); | |
1e987790 | 82 | |
e0ead41a | 83 | kvm_async_page_present_sync(vcpu, apf); |
af585b92 GN |
84 | |
85 | spin_lock(&vcpu->async_pf.lock); | |
86 | list_add_tail(&apf->link, &vcpu->async_pf.done); | |
22583f0d | 87 | apf->vcpu = NULL; |
af585b92 GN |
88 | spin_unlock(&vcpu->async_pf.lock); |
89 | ||
90 | /* | |
91 | * apf may be freed by kvm_check_async_pf_completion() after | |
92 | * this point | |
93 | */ | |
94 | ||
736c291c | 95 | trace_kvm_async_pf_completed(addr, cr2_or_gpa); |
af585b92 | 96 | |
b9f67a42 | 97 | if (swq_has_sleeper(&vcpu->wq)) |
b3dae109 | 98 | swake_up_one(&vcpu->wq); |
af585b92 | 99 | |
41c22f62 | 100 | mmput(mm); |
af585b92 GN |
101 | kvm_put_kvm(vcpu->kvm); |
102 | } | |
103 | ||
104 | void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) | |
105 | { | |
22583f0d PB |
106 | spin_lock(&vcpu->async_pf.lock); |
107 | ||
af585b92 GN |
108 | /* cancel outstanding work queue item */ |
109 | while (!list_empty(&vcpu->async_pf.queue)) { | |
110 | struct kvm_async_pf *work = | |
433da860 GT |
111 | list_first_entry(&vcpu->async_pf.queue, |
112 | typeof(*work), queue); | |
af585b92 | 113 | list_del(&work->queue); |
9f2ceda4 | 114 | |
22583f0d PB |
115 | /* |
116 | * We know it's present in vcpu->async_pf.done, do | |
117 | * nothing here. | |
118 | */ | |
119 | if (!work->vcpu) | |
120 | continue; | |
121 | ||
122 | spin_unlock(&vcpu->async_pf.lock); | |
9f2ceda4 DD |
123 | #ifdef CONFIG_KVM_ASYNC_PF_SYNC |
124 | flush_work(&work->work); | |
125 | #else | |
98fda169 | 126 | if (cancel_work_sync(&work->work)) { |
41c22f62 | 127 | mmput(work->mm); |
28b441e2 | 128 | kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */ |
af585b92 | 129 | kmem_cache_free(async_pf_cache, work); |
28b441e2 | 130 | } |
9f2ceda4 | 131 | #endif |
22583f0d | 132 | spin_lock(&vcpu->async_pf.lock); |
af585b92 GN |
133 | } |
134 | ||
af585b92 GN |
135 | while (!list_empty(&vcpu->async_pf.done)) { |
136 | struct kvm_async_pf *work = | |
433da860 GT |
137 | list_first_entry(&vcpu->async_pf.done, |
138 | typeof(*work), link); | |
af585b92 | 139 | list_del(&work->link); |
af585b92 GN |
140 | kmem_cache_free(async_pf_cache, work); |
141 | } | |
142 | spin_unlock(&vcpu->async_pf.lock); | |
143 | ||
144 | vcpu->async_pf.queued = 0; | |
145 | } | |
146 | ||
147 | void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu) | |
148 | { | |
149 | struct kvm_async_pf *work; | |
150 | ||
15096ffc XG |
151 | while (!list_empty_careful(&vcpu->async_pf.done) && |
152 | kvm_arch_can_inject_async_page_present(vcpu)) { | |
153 | spin_lock(&vcpu->async_pf.lock); | |
154 | work = list_first_entry(&vcpu->async_pf.done, typeof(*work), | |
155 | link); | |
156 | list_del(&work->link); | |
157 | spin_unlock(&vcpu->async_pf.lock); | |
af585b92 | 158 | |
f2e10669 | 159 | kvm_arch_async_page_ready(vcpu, work); |
1179ba53 | 160 | kvm_async_page_present_async(vcpu, work); |
af585b92 | 161 | |
15096ffc XG |
162 | list_del(&work->queue); |
163 | vcpu->async_pf.queued--; | |
15096ffc XG |
164 | kmem_cache_free(async_pf_cache, work); |
165 | } | |
af585b92 GN |
166 | } |
167 | ||
736c291c SC |
168 | int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, |
169 | unsigned long hva, struct kvm_arch_async_pf *arch) | |
af585b92 GN |
170 | { |
171 | struct kvm_async_pf *work; | |
172 | ||
173 | if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU) | |
174 | return 0; | |
175 | ||
176 | /* setup delayed work */ | |
177 | ||
178 | /* | |
179 | * do alloc nowait since if we are going to sleep anyway we | |
180 | * may as well sleep faulting in page | |
181 | */ | |
d7444794 | 182 | work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN); |
af585b92 GN |
183 | if (!work) |
184 | return 0; | |
185 | ||
f2e10669 | 186 | work->wakeup_all = false; |
af585b92 | 187 | work->vcpu = vcpu; |
736c291c | 188 | work->cr2_or_gpa = cr2_or_gpa; |
e0ead41a | 189 | work->addr = hva; |
af585b92 GN |
190 | work->arch = *arch; |
191 | work->mm = current->mm; | |
3fce371b | 192 | mmget(work->mm); |
af585b92 GN |
193 | kvm_get_kvm(work->vcpu->kvm); |
194 | ||
195 | /* this can't really happen otherwise gfn_to_pfn_async | |
196 | would succeed */ | |
197 | if (unlikely(kvm_is_error_hva(work->addr))) | |
198 | goto retry_sync; | |
199 | ||
200 | INIT_WORK(&work->work, async_pf_execute); | |
201 | if (!schedule_work(&work->work)) | |
202 | goto retry_sync; | |
203 | ||
204 | list_add_tail(&work->queue, &vcpu->async_pf.queue); | |
205 | vcpu->async_pf.queued++; | |
206 | kvm_arch_async_page_not_present(vcpu, work); | |
207 | return 1; | |
208 | retry_sync: | |
209 | kvm_put_kvm(work->vcpu->kvm); | |
41c22f62 | 210 | mmput(work->mm); |
af585b92 GN |
211 | kmem_cache_free(async_pf_cache, work); |
212 | return 0; | |
213 | } | |
344d9588 GN |
214 | |
215 | int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu) | |
216 | { | |
217 | struct kvm_async_pf *work; | |
218 | ||
64f638c7 | 219 | if (!list_empty_careful(&vcpu->async_pf.done)) |
344d9588 GN |
220 | return 0; |
221 | ||
222 | work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC); | |
223 | if (!work) | |
224 | return -ENOMEM; | |
225 | ||
f2e10669 | 226 | work->wakeup_all = true; |
344d9588 GN |
227 | INIT_LIST_HEAD(&work->queue); /* for list_del to work */ |
228 | ||
64f638c7 | 229 | spin_lock(&vcpu->async_pf.lock); |
344d9588 | 230 | list_add_tail(&work->link, &vcpu->async_pf.done); |
64f638c7 XG |
231 | spin_unlock(&vcpu->async_pf.lock); |
232 | ||
344d9588 GN |
233 | vcpu->async_pf.queued++; |
234 | return 0; | |
235 | } |