]>
Commit | Line | Data |
---|---|---|
775c8a3d | 1 | // SPDX-License-Identifier: GPL-2.0-only |
af585b92 GN |
2 | /* |
3 | * kvm asynchronous fault support | |
4 | * | |
5 | * Copyright 2010 Red Hat, Inc. | |
6 | * | |
7 | * Author: | |
8 | * Gleb Natapov <[email protected]> | |
af585b92 GN |
9 | */ |
10 | ||
11 | #include <linux/kvm_host.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/mmu_context.h> | |
6e84f315 | 15 | #include <linux/sched/mm.h> |
af585b92 GN |
16 | |
17 | #include "async_pf.h" | |
18 | #include <trace/events/kvm.h> | |
19 | ||
20 | static struct kmem_cache *async_pf_cache; | |
21 | ||
22 | int kvm_async_pf_init(void) | |
23 | { | |
24 | async_pf_cache = KMEM_CACHE(kvm_async_pf, 0); | |
25 | ||
26 | if (!async_pf_cache) | |
27 | return -ENOMEM; | |
28 | ||
29 | return 0; | |
30 | } | |
31 | ||
32 | void kvm_async_pf_deinit(void) | |
33 | { | |
4f52696a | 34 | kmem_cache_destroy(async_pf_cache); |
af585b92 GN |
35 | async_pf_cache = NULL; |
36 | } | |
37 | ||
38 | void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu) | |
39 | { | |
40 | INIT_LIST_HEAD(&vcpu->async_pf.done); | |
41 | INIT_LIST_HEAD(&vcpu->async_pf.queue); | |
42 | spin_lock_init(&vcpu->async_pf.lock); | |
43 | } | |
44 | ||
45 | static void async_pf_execute(struct work_struct *work) | |
46 | { | |
af585b92 GN |
47 | struct kvm_async_pf *apf = |
48 | container_of(work, struct kvm_async_pf, work); | |
af585b92 | 49 | struct kvm_vcpu *vcpu = apf->vcpu; |
8284765f | 50 | struct mm_struct *mm = vcpu->kvm->mm; |
af585b92 | 51 | unsigned long addr = apf->addr; |
736c291c | 52 | gpa_t cr2_or_gpa = apf->cr2_or_gpa; |
8b7457ef | 53 | int locked = 1; |
557a961a | 54 | bool first; |
af585b92 GN |
55 | |
56 | might_sleep(); | |
57 | ||
1e987790 | 58 | /* |
8284765f SC |
59 | * Attempt to pin the VM's host address space, and simply skip gup() if |
60 | * acquiring a pin fail, i.e. if the process is exiting. Note, KVM | |
61 | * holds a reference to its associated mm_struct until the very end of | |
62 | * kvm_destroy_vm(), i.e. the struct itself won't be freed before this | |
63 | * work item is fully processed. | |
1e987790 | 64 | */ |
8284765f SC |
65 | if (mmget_not_zero(mm)) { |
66 | mmap_read_lock(mm); | |
67 | get_user_pages_remote(mm, addr, 1, FOLL_WRITE, NULL, &locked); | |
68 | if (locked) | |
69 | mmap_read_unlock(mm); | |
70 | mmput(mm); | |
71 | } | |
1e987790 | 72 | |
8284765f SC |
73 | /* |
74 | * Notify and kick the vCPU even if faulting in the page failed, e.g. | |
75 | * so that the vCPU can retry the fault synchronously. | |
76 | */ | |
4425f567 PB |
77 | if (IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC)) |
78 | kvm_arch_async_page_present(vcpu, apf); | |
af585b92 GN |
79 | |
80 | spin_lock(&vcpu->async_pf.lock); | |
557a961a | 81 | first = list_empty(&vcpu->async_pf.done); |
af585b92 | 82 | list_add_tail(&apf->link, &vcpu->async_pf.done); |
af585b92 GN |
83 | spin_unlock(&vcpu->async_pf.lock); |
84 | ||
85 | /* | |
c2744ed2 SC |
86 | * The apf struct may be freed by kvm_check_async_pf_completion() as |
87 | * soon as the lock is dropped. Nullify it to prevent improper usage. | |
af585b92 | 88 | */ |
c2744ed2 SC |
89 | apf = NULL; |
90 | ||
91 | if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first) | |
92 | kvm_arch_async_page_present_queued(vcpu); | |
af585b92 | 93 | |
736c291c | 94 | trace_kvm_async_pf_completed(addr, cr2_or_gpa); |
af585b92 | 95 | |
d92a5d1c | 96 | __kvm_vcpu_wake_up(vcpu); |
3d75b8aa SC |
97 | } |
98 | ||
99 | static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work) | |
100 | { | |
101 | /* | |
102 | * The async #PF is "done", but KVM must wait for the work item itself, | |
103 | * i.e. async_pf_execute(), to run to completion. If KVM is a module, | |
104 | * KVM must ensure *no* code owned by the KVM (the module) can be run | |
105 | * after the last call to module_put(). Note, flushing the work item | |
106 | * is always required when the item is taken off the completion queue. | |
107 | * E.g. even if the vCPU handles the item in the "normal" path, the VM | |
108 | * could be terminated before async_pf_execute() completes. | |
109 | * | |
110 | * Wake all events skip the queue and go straight done, i.e. don't | |
111 | * need to be flushed (but sanity check that the work wasn't queued). | |
112 | */ | |
113 | if (work->wakeup_all) | |
114 | WARN_ON_ONCE(work->work.func); | |
115 | else | |
116 | flush_work(&work->work); | |
117 | kmem_cache_free(async_pf_cache, work); | |
af585b92 GN |
118 | } |
119 | ||
120 | void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) | |
121 | { | |
122 | /* cancel outstanding work queue item */ | |
123 | while (!list_empty(&vcpu->async_pf.queue)) { | |
124 | struct kvm_async_pf *work = | |
433da860 GT |
125 | list_first_entry(&vcpu->async_pf.queue, |
126 | typeof(*work), queue); | |
af585b92 | 127 | list_del(&work->queue); |
9f2ceda4 DD |
128 | |
129 | #ifdef CONFIG_KVM_ASYNC_PF_SYNC | |
130 | flush_work(&work->work); | |
131 | #else | |
8284765f | 132 | if (cancel_work_sync(&work->work)) |
af585b92 | 133 | kmem_cache_free(async_pf_cache, work); |
9f2ceda4 | 134 | #endif |
af585b92 GN |
135 | } |
136 | ||
778c350e | 137 | spin_lock(&vcpu->async_pf.lock); |
af585b92 GN |
138 | while (!list_empty(&vcpu->async_pf.done)) { |
139 | struct kvm_async_pf *work = | |
433da860 GT |
140 | list_first_entry(&vcpu->async_pf.done, |
141 | typeof(*work), link); | |
af585b92 | 142 | list_del(&work->link); |
3d75b8aa SC |
143 | |
144 | spin_unlock(&vcpu->async_pf.lock); | |
145 | kvm_flush_and_free_async_pf_work(work); | |
146 | spin_lock(&vcpu->async_pf.lock); | |
af585b92 GN |
147 | } |
148 | spin_unlock(&vcpu->async_pf.lock); | |
149 | ||
150 | vcpu->async_pf.queued = 0; | |
151 | } | |
152 | ||
153 | void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu) | |
154 | { | |
155 | struct kvm_async_pf *work; | |
156 | ||
15096ffc | 157 | while (!list_empty_careful(&vcpu->async_pf.done) && |
7c0ade6c | 158 | kvm_arch_can_dequeue_async_page_present(vcpu)) { |
15096ffc XG |
159 | spin_lock(&vcpu->async_pf.lock); |
160 | work = list_first_entry(&vcpu->async_pf.done, typeof(*work), | |
161 | link); | |
162 | list_del(&work->link); | |
163 | spin_unlock(&vcpu->async_pf.lock); | |
af585b92 | 164 | |
f2e10669 | 165 | kvm_arch_async_page_ready(vcpu, work); |
4425f567 PB |
166 | if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC)) |
167 | kvm_arch_async_page_present(vcpu, work); | |
af585b92 | 168 | |
15096ffc XG |
169 | list_del(&work->queue); |
170 | vcpu->async_pf.queued--; | |
3d75b8aa | 171 | kvm_flush_and_free_async_pf_work(work); |
15096ffc | 172 | } |
af585b92 GN |
173 | } |
174 | ||
e8c22266 VK |
175 | /* |
176 | * Try to schedule a job to handle page fault asynchronously. Returns 'true' on | |
177 | * success, 'false' on failure (page fault has to be handled synchronously). | |
178 | */ | |
179 | bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, | |
180 | unsigned long hva, struct kvm_arch_async_pf *arch) | |
af585b92 GN |
181 | { |
182 | struct kvm_async_pf *work; | |
183 | ||
184 | if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU) | |
e8c22266 | 185 | return false; |
af585b92 | 186 | |
7863e346 VK |
187 | /* Arch specific code should not do async PF in this case */ |
188 | if (unlikely(kvm_is_error_hva(hva))) | |
e8c22266 | 189 | return false; |
af585b92 GN |
190 | |
191 | /* | |
192 | * do alloc nowait since if we are going to sleep anyway we | |
193 | * may as well sleep faulting in page | |
194 | */ | |
d7444794 | 195 | work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN); |
af585b92 | 196 | if (!work) |
e8c22266 | 197 | return false; |
af585b92 | 198 | |
f2e10669 | 199 | work->wakeup_all = false; |
af585b92 | 200 | work->vcpu = vcpu; |
736c291c | 201 | work->cr2_or_gpa = cr2_or_gpa; |
e0ead41a | 202 | work->addr = hva; |
af585b92 | 203 | work->arch = *arch; |
af585b92 | 204 | |
af585b92 | 205 | INIT_WORK(&work->work, async_pf_execute); |
af585b92 GN |
206 | |
207 | list_add_tail(&work->queue, &vcpu->async_pf.queue); | |
208 | vcpu->async_pf.queued++; | |
2a18b7e7 | 209 | work->notpresent_injected = kvm_arch_async_page_not_present(vcpu, work); |
7863e346 VK |
210 | |
211 | schedule_work(&work->work); | |
212 | ||
e8c22266 | 213 | return true; |
af585b92 | 214 | } |
344d9588 GN |
215 | |
216 | int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu) | |
217 | { | |
218 | struct kvm_async_pf *work; | |
557a961a | 219 | bool first; |
344d9588 | 220 | |
64f638c7 | 221 | if (!list_empty_careful(&vcpu->async_pf.done)) |
344d9588 GN |
222 | return 0; |
223 | ||
224 | work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC); | |
225 | if (!work) | |
226 | return -ENOMEM; | |
227 | ||
f2e10669 | 228 | work->wakeup_all = true; |
344d9588 GN |
229 | INIT_LIST_HEAD(&work->queue); /* for list_del to work */ |
230 | ||
64f638c7 | 231 | spin_lock(&vcpu->async_pf.lock); |
557a961a | 232 | first = list_empty(&vcpu->async_pf.done); |
344d9588 | 233 | list_add_tail(&work->link, &vcpu->async_pf.done); |
64f638c7 XG |
234 | spin_unlock(&vcpu->async_pf.lock); |
235 | ||
557a961a VK |
236 | if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first) |
237 | kvm_arch_async_page_present_queued(vcpu); | |
238 | ||
344d9588 GN |
239 | vcpu->async_pf.queued++; |
240 | return 0; | |
241 | } |