]>
Commit | Line | Data |
---|---|---|
775c8a3d | 1 | // SPDX-License-Identifier: GPL-2.0-only |
af585b92 GN |
2 | /* |
3 | * kvm asynchronous fault support | |
4 | * | |
5 | * Copyright 2010 Red Hat, Inc. | |
6 | * | |
7 | * Author: | |
8 | * Gleb Natapov <[email protected]> | |
af585b92 GN |
9 | */ |
10 | ||
11 | #include <linux/kvm_host.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/mmu_context.h> | |
6e84f315 | 15 | #include <linux/sched/mm.h> |
af585b92 GN |
16 | |
17 | #include "async_pf.h" | |
18 | #include <trace/events/kvm.h> | |
19 | ||
20 | static struct kmem_cache *async_pf_cache; | |
21 | ||
22 | int kvm_async_pf_init(void) | |
23 | { | |
24 | async_pf_cache = KMEM_CACHE(kvm_async_pf, 0); | |
25 | ||
26 | if (!async_pf_cache) | |
27 | return -ENOMEM; | |
28 | ||
29 | return 0; | |
30 | } | |
31 | ||
32 | void kvm_async_pf_deinit(void) | |
33 | { | |
4f52696a | 34 | kmem_cache_destroy(async_pf_cache); |
af585b92 GN |
35 | async_pf_cache = NULL; |
36 | } | |
37 | ||
38 | void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu) | |
39 | { | |
40 | INIT_LIST_HEAD(&vcpu->async_pf.done); | |
41 | INIT_LIST_HEAD(&vcpu->async_pf.queue); | |
42 | spin_lock_init(&vcpu->async_pf.lock); | |
43 | } | |
44 | ||
45 | static void async_pf_execute(struct work_struct *work) | |
46 | { | |
af585b92 GN |
47 | struct kvm_async_pf *apf = |
48 | container_of(work, struct kvm_async_pf, work); | |
af585b92 | 49 | struct kvm_vcpu *vcpu = apf->vcpu; |
8284765f | 50 | struct mm_struct *mm = vcpu->kvm->mm; |
af585b92 | 51 | unsigned long addr = apf->addr; |
736c291c | 52 | gpa_t cr2_or_gpa = apf->cr2_or_gpa; |
8b7457ef | 53 | int locked = 1; |
557a961a | 54 | bool first; |
af585b92 GN |
55 | |
56 | might_sleep(); | |
57 | ||
1e987790 | 58 | /* |
8284765f SC |
59 | * Attempt to pin the VM's host address space, and simply skip gup() if |
60 | * acquiring a pin fail, i.e. if the process is exiting. Note, KVM | |
61 | * holds a reference to its associated mm_struct until the very end of | |
62 | * kvm_destroy_vm(), i.e. the struct itself won't be freed before this | |
63 | * work item is fully processed. | |
1e987790 | 64 | */ |
8284765f SC |
65 | if (mmget_not_zero(mm)) { |
66 | mmap_read_lock(mm); | |
67 | get_user_pages_remote(mm, addr, 1, FOLL_WRITE, NULL, &locked); | |
68 | if (locked) | |
69 | mmap_read_unlock(mm); | |
70 | mmput(mm); | |
71 | } | |
1e987790 | 72 | |
8284765f SC |
73 | /* |
74 | * Notify and kick the vCPU even if faulting in the page failed, e.g. | |
75 | * so that the vCPU can retry the fault synchronously. | |
76 | */ | |
4425f567 PB |
77 | if (IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC)) |
78 | kvm_arch_async_page_present(vcpu, apf); | |
af585b92 GN |
79 | |
80 | spin_lock(&vcpu->async_pf.lock); | |
557a961a | 81 | first = list_empty(&vcpu->async_pf.done); |
af585b92 | 82 | list_add_tail(&apf->link, &vcpu->async_pf.done); |
22583f0d | 83 | apf->vcpu = NULL; |
af585b92 GN |
84 | spin_unlock(&vcpu->async_pf.lock); |
85 | ||
557a961a VK |
86 | if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first) |
87 | kvm_arch_async_page_present_queued(vcpu); | |
88 | ||
af585b92 GN |
89 | /* |
90 | * apf may be freed by kvm_check_async_pf_completion() after | |
91 | * this point | |
92 | */ | |
93 | ||
736c291c | 94 | trace_kvm_async_pf_completed(addr, cr2_or_gpa); |
af585b92 | 95 | |
d92a5d1c | 96 | __kvm_vcpu_wake_up(vcpu); |
3d75b8aa SC |
97 | } |
98 | ||
99 | static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work) | |
100 | { | |
101 | /* | |
102 | * The async #PF is "done", but KVM must wait for the work item itself, | |
103 | * i.e. async_pf_execute(), to run to completion. If KVM is a module, | |
104 | * KVM must ensure *no* code owned by the KVM (the module) can be run | |
105 | * after the last call to module_put(). Note, flushing the work item | |
106 | * is always required when the item is taken off the completion queue. | |
107 | * E.g. even if the vCPU handles the item in the "normal" path, the VM | |
108 | * could be terminated before async_pf_execute() completes. | |
109 | * | |
110 | * Wake all events skip the queue and go straight done, i.e. don't | |
111 | * need to be flushed (but sanity check that the work wasn't queued). | |
112 | */ | |
113 | if (work->wakeup_all) | |
114 | WARN_ON_ONCE(work->work.func); | |
115 | else | |
116 | flush_work(&work->work); | |
117 | kmem_cache_free(async_pf_cache, work); | |
af585b92 GN |
118 | } |
119 | ||
120 | void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) | |
121 | { | |
22583f0d PB |
122 | spin_lock(&vcpu->async_pf.lock); |
123 | ||
af585b92 GN |
124 | /* cancel outstanding work queue item */ |
125 | while (!list_empty(&vcpu->async_pf.queue)) { | |
126 | struct kvm_async_pf *work = | |
433da860 GT |
127 | list_first_entry(&vcpu->async_pf.queue, |
128 | typeof(*work), queue); | |
af585b92 | 129 | list_del(&work->queue); |
9f2ceda4 | 130 | |
22583f0d PB |
131 | /* |
132 | * We know it's present in vcpu->async_pf.done, do | |
133 | * nothing here. | |
134 | */ | |
135 | if (!work->vcpu) | |
136 | continue; | |
137 | ||
138 | spin_unlock(&vcpu->async_pf.lock); | |
9f2ceda4 DD |
139 | #ifdef CONFIG_KVM_ASYNC_PF_SYNC |
140 | flush_work(&work->work); | |
141 | #else | |
8284765f | 142 | if (cancel_work_sync(&work->work)) |
af585b92 | 143 | kmem_cache_free(async_pf_cache, work); |
9f2ceda4 | 144 | #endif |
22583f0d | 145 | spin_lock(&vcpu->async_pf.lock); |
af585b92 GN |
146 | } |
147 | ||
af585b92 GN |
148 | while (!list_empty(&vcpu->async_pf.done)) { |
149 | struct kvm_async_pf *work = | |
433da860 GT |
150 | list_first_entry(&vcpu->async_pf.done, |
151 | typeof(*work), link); | |
af585b92 | 152 | list_del(&work->link); |
3d75b8aa SC |
153 | |
154 | spin_unlock(&vcpu->async_pf.lock); | |
155 | kvm_flush_and_free_async_pf_work(work); | |
156 | spin_lock(&vcpu->async_pf.lock); | |
af585b92 GN |
157 | } |
158 | spin_unlock(&vcpu->async_pf.lock); | |
159 | ||
160 | vcpu->async_pf.queued = 0; | |
161 | } | |
162 | ||
163 | void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu) | |
164 | { | |
165 | struct kvm_async_pf *work; | |
166 | ||
15096ffc | 167 | while (!list_empty_careful(&vcpu->async_pf.done) && |
7c0ade6c | 168 | kvm_arch_can_dequeue_async_page_present(vcpu)) { |
15096ffc XG |
169 | spin_lock(&vcpu->async_pf.lock); |
170 | work = list_first_entry(&vcpu->async_pf.done, typeof(*work), | |
171 | link); | |
172 | list_del(&work->link); | |
173 | spin_unlock(&vcpu->async_pf.lock); | |
af585b92 | 174 | |
f2e10669 | 175 | kvm_arch_async_page_ready(vcpu, work); |
4425f567 PB |
176 | if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC)) |
177 | kvm_arch_async_page_present(vcpu, work); | |
af585b92 | 178 | |
15096ffc XG |
179 | list_del(&work->queue); |
180 | vcpu->async_pf.queued--; | |
3d75b8aa | 181 | kvm_flush_and_free_async_pf_work(work); |
15096ffc | 182 | } |
af585b92 GN |
183 | } |
184 | ||
e8c22266 VK |
185 | /* |
186 | * Try to schedule a job to handle page fault asynchronously. Returns 'true' on | |
187 | * success, 'false' on failure (page fault has to be handled synchronously). | |
188 | */ | |
189 | bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, | |
190 | unsigned long hva, struct kvm_arch_async_pf *arch) | |
af585b92 GN |
191 | { |
192 | struct kvm_async_pf *work; | |
193 | ||
194 | if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU) | |
e8c22266 | 195 | return false; |
af585b92 | 196 | |
7863e346 VK |
197 | /* Arch specific code should not do async PF in this case */ |
198 | if (unlikely(kvm_is_error_hva(hva))) | |
e8c22266 | 199 | return false; |
af585b92 GN |
200 | |
201 | /* | |
202 | * do alloc nowait since if we are going to sleep anyway we | |
203 | * may as well sleep faulting in page | |
204 | */ | |
d7444794 | 205 | work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN); |
af585b92 | 206 | if (!work) |
e8c22266 | 207 | return false; |
af585b92 | 208 | |
f2e10669 | 209 | work->wakeup_all = false; |
af585b92 | 210 | work->vcpu = vcpu; |
736c291c | 211 | work->cr2_or_gpa = cr2_or_gpa; |
e0ead41a | 212 | work->addr = hva; |
af585b92 | 213 | work->arch = *arch; |
af585b92 | 214 | |
af585b92 | 215 | INIT_WORK(&work->work, async_pf_execute); |
af585b92 GN |
216 | |
217 | list_add_tail(&work->queue, &vcpu->async_pf.queue); | |
218 | vcpu->async_pf.queued++; | |
2a18b7e7 | 219 | work->notpresent_injected = kvm_arch_async_page_not_present(vcpu, work); |
7863e346 VK |
220 | |
221 | schedule_work(&work->work); | |
222 | ||
e8c22266 | 223 | return true; |
af585b92 | 224 | } |
344d9588 GN |
225 | |
226 | int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu) | |
227 | { | |
228 | struct kvm_async_pf *work; | |
557a961a | 229 | bool first; |
344d9588 | 230 | |
64f638c7 | 231 | if (!list_empty_careful(&vcpu->async_pf.done)) |
344d9588 GN |
232 | return 0; |
233 | ||
234 | work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC); | |
235 | if (!work) | |
236 | return -ENOMEM; | |
237 | ||
f2e10669 | 238 | work->wakeup_all = true; |
344d9588 GN |
239 | INIT_LIST_HEAD(&work->queue); /* for list_del to work */ |
240 | ||
64f638c7 | 241 | spin_lock(&vcpu->async_pf.lock); |
557a961a | 242 | first = list_empty(&vcpu->async_pf.done); |
344d9588 | 243 | list_add_tail(&work->link, &vcpu->async_pf.done); |
64f638c7 XG |
244 | spin_unlock(&vcpu->async_pf.lock); |
245 | ||
557a961a VK |
246 | if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first) |
247 | kvm_arch_async_page_present_queued(vcpu); | |
248 | ||
344d9588 GN |
249 | vcpu->async_pf.queued++; |
250 | return 0; | |
251 | } |