]>
Commit | Line | Data |
---|---|---|
721eecbf GH |
1 | /* |
2 | * kvm eventfd support - use eventfd objects to signal various KVM events | |
3 | * | |
4 | * Copyright 2009 Novell. All Rights Reserved. | |
221d059d | 5 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
721eecbf GH |
6 | * |
7 | * Author: | |
8 | * Gregory Haskins <[email protected]> | |
9 | * | |
10 | * This file is free software; you can redistribute it and/or modify | |
11 | * it under the terms of version 2 of the GNU General Public License | |
12 | * as published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License | |
20 | * along with this program; if not, write to the Free Software Foundation, | |
21 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. | |
22 | */ | |
23 | ||
24 | #include <linux/kvm_host.h> | |
d34e6b17 | 25 | #include <linux/kvm.h> |
166c9775 | 26 | #include <linux/kvm_irqfd.h> |
721eecbf GH |
27 | #include <linux/workqueue.h> |
28 | #include <linux/syscalls.h> | |
29 | #include <linux/wait.h> | |
30 | #include <linux/poll.h> | |
31 | #include <linux/file.h> | |
32 | #include <linux/list.h> | |
33 | #include <linux/eventfd.h> | |
d34e6b17 | 34 | #include <linux/kernel.h> |
719d93cd | 35 | #include <linux/srcu.h> |
5a0e3ad6 | 36 | #include <linux/slab.h> |
56f89f36 | 37 | #include <linux/seqlock.h> |
9016cfb5 | 38 | #include <linux/irqbypass.h> |
e4d57e1e | 39 | #include <trace/events/kvm.h> |
d34e6b17 | 40 | |
af669ac6 | 41 | #include <kvm/iodev.h> |
721eecbf | 42 | |
297e2105 | 43 | #ifdef CONFIG_HAVE_KVM_IRQFD |
721eecbf GH |
44 | |
45 | static struct workqueue_struct *irqfd_cleanup_wq; | |
46 | ||
47 | static void | |
48 | irqfd_inject(struct work_struct *work) | |
49 | { | |
166c9775 EA |
50 | struct kvm_kernel_irqfd *irqfd = |
51 | container_of(work, struct kvm_kernel_irqfd, inject); | |
721eecbf GH |
52 | struct kvm *kvm = irqfd->kvm; |
53 | ||
7a84428a | 54 | if (!irqfd->resampler) { |
aa2fbe6d YZ |
55 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1, |
56 | false); | |
57 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0, | |
58 | false); | |
7a84428a AW |
59 | } else |
60 | kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, | |
aa2fbe6d | 61 | irqfd->gsi, 1, false); |
7a84428a AW |
62 | } |
63 | ||
64 | /* | |
65 | * Since resampler irqfds share an IRQ source ID, we de-assert once | |
66 | * then notify all of the resampler irqfds using this GSI. We can't | |
67 | * do multiple de-asserts or we risk racing with incoming re-asserts. | |
68 | */ | |
69 | static void | |
70 | irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian) | |
71 | { | |
166c9775 | 72 | struct kvm_kernel_irqfd_resampler *resampler; |
719d93cd | 73 | struct kvm *kvm; |
166c9775 | 74 | struct kvm_kernel_irqfd *irqfd; |
719d93cd | 75 | int idx; |
7a84428a | 76 | |
166c9775 EA |
77 | resampler = container_of(kian, |
78 | struct kvm_kernel_irqfd_resampler, notifier); | |
719d93cd | 79 | kvm = resampler->kvm; |
7a84428a | 80 | |
719d93cd | 81 | kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, |
aa2fbe6d | 82 | resampler->notifier.gsi, 0, false); |
7a84428a | 83 | |
719d93cd | 84 | idx = srcu_read_lock(&kvm->irq_srcu); |
7a84428a AW |
85 | |
86 | list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link) | |
87 | eventfd_signal(irqfd->resamplefd, 1); | |
88 | ||
719d93cd | 89 | srcu_read_unlock(&kvm->irq_srcu, idx); |
7a84428a AW |
90 | } |
91 | ||
92 | static void | |
166c9775 | 93 | irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd) |
7a84428a | 94 | { |
166c9775 | 95 | struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler; |
7a84428a AW |
96 | struct kvm *kvm = resampler->kvm; |
97 | ||
98 | mutex_lock(&kvm->irqfds.resampler_lock); | |
99 | ||
100 | list_del_rcu(&irqfd->resampler_link); | |
719d93cd | 101 | synchronize_srcu(&kvm->irq_srcu); |
7a84428a AW |
102 | |
103 | if (list_empty(&resampler->list)) { | |
104 | list_del(&resampler->link); | |
105 | kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier); | |
106 | kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, | |
aa2fbe6d | 107 | resampler->notifier.gsi, 0, false); |
7a84428a AW |
108 | kfree(resampler); |
109 | } | |
110 | ||
111 | mutex_unlock(&kvm->irqfds.resampler_lock); | |
721eecbf GH |
112 | } |
113 | ||
114 | /* | |
115 | * Race-free decouple logic (ordering is critical) | |
116 | */ | |
117 | static void | |
118 | irqfd_shutdown(struct work_struct *work) | |
119 | { | |
166c9775 EA |
120 | struct kvm_kernel_irqfd *irqfd = |
121 | container_of(work, struct kvm_kernel_irqfd, shutdown); | |
b6a114d2 | 122 | u64 cnt; |
721eecbf GH |
123 | |
124 | /* | |
125 | * Synchronize with the wait-queue and unhook ourselves to prevent | |
126 | * further events. | |
127 | */ | |
b6a114d2 | 128 | eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt); |
721eecbf GH |
129 | |
130 | /* | |
131 | * We know no new events will be scheduled at this point, so block | |
132 | * until all previously outstanding events have completed | |
133 | */ | |
43829731 | 134 | flush_work(&irqfd->inject); |
721eecbf | 135 | |
7a84428a AW |
136 | if (irqfd->resampler) { |
137 | irqfd_resampler_shutdown(irqfd); | |
138 | eventfd_ctx_put(irqfd->resamplefd); | |
139 | } | |
140 | ||
721eecbf GH |
141 | /* |
142 | * It is now safe to release the object's resources | |
143 | */ | |
9016cfb5 EA |
144 | #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS |
145 | irq_bypass_unregister_consumer(&irqfd->consumer); | |
146 | #endif | |
721eecbf GH |
147 | eventfd_ctx_put(irqfd->eventfd); |
148 | kfree(irqfd); | |
149 | } | |
150 | ||
151 | ||
152 | /* assumes kvm->irqfds.lock is held */ | |
153 | static bool | |
166c9775 | 154 | irqfd_is_active(struct kvm_kernel_irqfd *irqfd) |
721eecbf GH |
155 | { |
156 | return list_empty(&irqfd->list) ? false : true; | |
157 | } | |
158 | ||
159 | /* | |
160 | * Mark the irqfd as inactive and schedule it for removal | |
161 | * | |
162 | * assumes kvm->irqfds.lock is held | |
163 | */ | |
164 | static void | |
166c9775 | 165 | irqfd_deactivate(struct kvm_kernel_irqfd *irqfd) |
721eecbf GH |
166 | { |
167 | BUG_ON(!irqfd_is_active(irqfd)); | |
168 | ||
169 | list_del_init(&irqfd->list); | |
170 | ||
171 | queue_work(irqfd_cleanup_wq, &irqfd->shutdown); | |
172 | } | |
173 | ||
b97e6de9 | 174 | int __attribute__((weak)) kvm_arch_set_irq_inatomic( |
c9a5ecca AS |
175 | struct kvm_kernel_irq_routing_entry *irq, |
176 | struct kvm *kvm, int irq_source_id, | |
177 | int level, | |
178 | bool line_status) | |
179 | { | |
180 | return -EWOULDBLOCK; | |
181 | } | |
182 | ||
721eecbf GH |
183 | /* |
184 | * Called with wqh->lock held and interrupts disabled | |
185 | */ | |
186 | static int | |
187 | irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key) | |
188 | { | |
166c9775 EA |
189 | struct kvm_kernel_irqfd *irqfd = |
190 | container_of(wait, struct kvm_kernel_irqfd, wait); | |
721eecbf | 191 | unsigned long flags = (unsigned long)key; |
56f89f36 | 192 | struct kvm_kernel_irq_routing_entry irq; |
bd2b53b2 | 193 | struct kvm *kvm = irqfd->kvm; |
56f89f36 | 194 | unsigned seq; |
719d93cd | 195 | int idx; |
721eecbf | 196 | |
bd2b53b2 | 197 | if (flags & POLLIN) { |
719d93cd | 198 | idx = srcu_read_lock(&kvm->irq_srcu); |
56f89f36 PM |
199 | do { |
200 | seq = read_seqcount_begin(&irqfd->irq_entry_sc); | |
201 | irq = irqfd->irq_entry; | |
202 | } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq)); | |
721eecbf | 203 | /* An event has been signaled, inject an interrupt */ |
b97e6de9 PB |
204 | if (kvm_arch_set_irq_inatomic(&irq, kvm, |
205 | KVM_USERSPACE_IRQ_SOURCE_ID, 1, | |
206 | false) == -EWOULDBLOCK) | |
bd2b53b2 | 207 | schedule_work(&irqfd->inject); |
719d93cd | 208 | srcu_read_unlock(&kvm->irq_srcu, idx); |
bd2b53b2 | 209 | } |
721eecbf GH |
210 | |
211 | if (flags & POLLHUP) { | |
212 | /* The eventfd is closing, detach from KVM */ | |
721eecbf GH |
213 | unsigned long flags; |
214 | ||
215 | spin_lock_irqsave(&kvm->irqfds.lock, flags); | |
216 | ||
217 | /* | |
218 | * We must check if someone deactivated the irqfd before | |
219 | * we could acquire the irqfds.lock since the item is | |
220 | * deactivated from the KVM side before it is unhooked from | |
221 | * the wait-queue. If it is already deactivated, we can | |
222 | * simply return knowing the other side will cleanup for us. | |
223 | * We cannot race against the irqfd going away since the | |
224 | * other side is required to acquire wqh->lock, which we hold | |
225 | */ | |
226 | if (irqfd_is_active(irqfd)) | |
227 | irqfd_deactivate(irqfd); | |
228 | ||
229 | spin_unlock_irqrestore(&kvm->irqfds.lock, flags); | |
230 | } | |
231 | ||
232 | return 0; | |
233 | } | |
234 | ||
235 | static void | |
236 | irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh, | |
237 | poll_table *pt) | |
238 | { | |
166c9775 EA |
239 | struct kvm_kernel_irqfd *irqfd = |
240 | container_of(pt, struct kvm_kernel_irqfd, pt); | |
721eecbf GH |
241 | add_wait_queue(wqh, &irqfd->wait); |
242 | } | |
243 | ||
bd2b53b2 | 244 | /* Must be called under irqfds.lock */ |
166c9775 | 245 | static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd) |
bd2b53b2 MT |
246 | { |
247 | struct kvm_kernel_irq_routing_entry *e; | |
8ba918d4 | 248 | struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS]; |
351dc647 | 249 | int n_entries; |
8ba918d4 | 250 | |
9957c86d | 251 | n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi); |
bd2b53b2 | 252 | |
56f89f36 PM |
253 | write_seqcount_begin(&irqfd->irq_entry_sc); |
254 | ||
8ba918d4 | 255 | e = entries; |
351dc647 AS |
256 | if (n_entries == 1) |
257 | irqfd->irq_entry = *e; | |
258 | else | |
259 | irqfd->irq_entry.type = 0; | |
56f89f36 | 260 | |
56f89f36 | 261 | write_seqcount_end(&irqfd->irq_entry_sc); |
bd2b53b2 MT |
262 | } |
263 | ||
1a02b270 EA |
264 | #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS |
265 | void __attribute__((weak)) kvm_arch_irq_bypass_stop( | |
266 | struct irq_bypass_consumer *cons) | |
267 | { | |
268 | } | |
269 | ||
270 | void __attribute__((weak)) kvm_arch_irq_bypass_start( | |
271 | struct irq_bypass_consumer *cons) | |
272 | { | |
273 | } | |
f70c20aa FW |
274 | |
275 | int __attribute__((weak)) kvm_arch_update_irqfd_routing( | |
276 | struct kvm *kvm, unsigned int host_irq, | |
277 | uint32_t guest_irq, bool set) | |
278 | { | |
279 | return 0; | |
280 | } | |
1a02b270 EA |
281 | #endif |
282 | ||
721eecbf | 283 | static int |
d4db2935 | 284 | kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) |
721eecbf | 285 | { |
166c9775 | 286 | struct kvm_kernel_irqfd *irqfd, *tmp; |
cffe78d9 | 287 | struct fd f; |
7a84428a | 288 | struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL; |
721eecbf GH |
289 | int ret; |
290 | unsigned int events; | |
9957c86d | 291 | int idx; |
721eecbf | 292 | |
01c94e64 EA |
293 | if (!kvm_arch_intc_initialized(kvm)) |
294 | return -EAGAIN; | |
295 | ||
721eecbf GH |
296 | irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL); |
297 | if (!irqfd) | |
298 | return -ENOMEM; | |
299 | ||
300 | irqfd->kvm = kvm; | |
d4db2935 | 301 | irqfd->gsi = args->gsi; |
721eecbf GH |
302 | INIT_LIST_HEAD(&irqfd->list); |
303 | INIT_WORK(&irqfd->inject, irqfd_inject); | |
304 | INIT_WORK(&irqfd->shutdown, irqfd_shutdown); | |
56f89f36 | 305 | seqcount_init(&irqfd->irq_entry_sc); |
721eecbf | 306 | |
cffe78d9 AV |
307 | f = fdget(args->fd); |
308 | if (!f.file) { | |
309 | ret = -EBADF; | |
310 | goto out; | |
721eecbf GH |
311 | } |
312 | ||
cffe78d9 | 313 | eventfd = eventfd_ctx_fileget(f.file); |
721eecbf GH |
314 | if (IS_ERR(eventfd)) { |
315 | ret = PTR_ERR(eventfd); | |
316 | goto fail; | |
317 | } | |
318 | ||
319 | irqfd->eventfd = eventfd; | |
320 | ||
7a84428a | 321 | if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) { |
166c9775 | 322 | struct kvm_kernel_irqfd_resampler *resampler; |
7a84428a AW |
323 | |
324 | resamplefd = eventfd_ctx_fdget(args->resamplefd); | |
325 | if (IS_ERR(resamplefd)) { | |
326 | ret = PTR_ERR(resamplefd); | |
327 | goto fail; | |
328 | } | |
329 | ||
330 | irqfd->resamplefd = resamplefd; | |
331 | INIT_LIST_HEAD(&irqfd->resampler_link); | |
332 | ||
333 | mutex_lock(&kvm->irqfds.resampler_lock); | |
334 | ||
335 | list_for_each_entry(resampler, | |
49f8a1a5 | 336 | &kvm->irqfds.resampler_list, link) { |
7a84428a AW |
337 | if (resampler->notifier.gsi == irqfd->gsi) { |
338 | irqfd->resampler = resampler; | |
339 | break; | |
340 | } | |
341 | } | |
342 | ||
343 | if (!irqfd->resampler) { | |
344 | resampler = kzalloc(sizeof(*resampler), GFP_KERNEL); | |
345 | if (!resampler) { | |
346 | ret = -ENOMEM; | |
347 | mutex_unlock(&kvm->irqfds.resampler_lock); | |
348 | goto fail; | |
349 | } | |
350 | ||
351 | resampler->kvm = kvm; | |
352 | INIT_LIST_HEAD(&resampler->list); | |
353 | resampler->notifier.gsi = irqfd->gsi; | |
354 | resampler->notifier.irq_acked = irqfd_resampler_ack; | |
355 | INIT_LIST_HEAD(&resampler->link); | |
356 | ||
357 | list_add(&resampler->link, &kvm->irqfds.resampler_list); | |
358 | kvm_register_irq_ack_notifier(kvm, | |
359 | &resampler->notifier); | |
360 | irqfd->resampler = resampler; | |
361 | } | |
362 | ||
363 | list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list); | |
719d93cd | 364 | synchronize_srcu(&kvm->irq_srcu); |
7a84428a AW |
365 | |
366 | mutex_unlock(&kvm->irqfds.resampler_lock); | |
367 | } | |
368 | ||
721eecbf GH |
369 | /* |
370 | * Install our own custom wake-up handling so we are notified via | |
371 | * a callback whenever someone signals the underlying eventfd | |
372 | */ | |
373 | init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup); | |
374 | init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc); | |
375 | ||
f1d1c309 MT |
376 | spin_lock_irq(&kvm->irqfds.lock); |
377 | ||
378 | ret = 0; | |
379 | list_for_each_entry(tmp, &kvm->irqfds.items, list) { | |
380 | if (irqfd->eventfd != tmp->eventfd) | |
381 | continue; | |
382 | /* This fd is used for another irq already. */ | |
383 | ret = -EBUSY; | |
384 | spin_unlock_irq(&kvm->irqfds.lock); | |
385 | goto fail; | |
386 | } | |
387 | ||
9957c86d PM |
388 | idx = srcu_read_lock(&kvm->irq_srcu); |
389 | irqfd_update(kvm, irqfd); | |
390 | srcu_read_unlock(&kvm->irq_srcu, idx); | |
bd2b53b2 | 391 | |
721eecbf | 392 | list_add_tail(&irqfd->list, &kvm->irqfds.items); |
721eecbf | 393 | |
684a0b71 CH |
394 | spin_unlock_irq(&kvm->irqfds.lock); |
395 | ||
721eecbf GH |
396 | /* |
397 | * Check if there was an event already pending on the eventfd | |
398 | * before we registered, and trigger it as if we didn't miss it. | |
399 | */ | |
684a0b71 CH |
400 | events = f.file->f_op->poll(f.file, &irqfd->pt); |
401 | ||
721eecbf GH |
402 | if (events & POLLIN) |
403 | schedule_work(&irqfd->inject); | |
404 | ||
405 | /* | |
406 | * do not drop the file until the irqfd is fully initialized, otherwise | |
407 | * we might race against the POLLHUP | |
408 | */ | |
cffe78d9 | 409 | fdput(f); |
9016cfb5 | 410 | #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS |
14717e20 AW |
411 | if (kvm_arch_has_irq_bypass()) { |
412 | irqfd->consumer.token = (void *)irqfd->eventfd; | |
413 | irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer; | |
414 | irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer; | |
415 | irqfd->consumer.stop = kvm_arch_irq_bypass_stop; | |
416 | irqfd->consumer.start = kvm_arch_irq_bypass_start; | |
417 | ret = irq_bypass_register_consumer(&irqfd->consumer); | |
418 | if (ret) | |
419 | pr_info("irq bypass consumer (token %p) registration fails: %d\n", | |
9016cfb5 | 420 | irqfd->consumer.token, ret); |
14717e20 | 421 | } |
9016cfb5 | 422 | #endif |
721eecbf GH |
423 | |
424 | return 0; | |
425 | ||
426 | fail: | |
7a84428a AW |
427 | if (irqfd->resampler) |
428 | irqfd_resampler_shutdown(irqfd); | |
429 | ||
430 | if (resamplefd && !IS_ERR(resamplefd)) | |
431 | eventfd_ctx_put(resamplefd); | |
432 | ||
721eecbf GH |
433 | if (eventfd && !IS_ERR(eventfd)) |
434 | eventfd_ctx_put(eventfd); | |
435 | ||
cffe78d9 | 436 | fdput(f); |
721eecbf | 437 | |
cffe78d9 | 438 | out: |
721eecbf GH |
439 | kfree(irqfd); |
440 | return ret; | |
441 | } | |
c77dcacb PB |
442 | |
443 | bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin) | |
444 | { | |
445 | struct kvm_irq_ack_notifier *kian; | |
446 | int gsi, idx; | |
447 | ||
448 | idx = srcu_read_lock(&kvm->irq_srcu); | |
449 | gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); | |
450 | if (gsi != -1) | |
451 | hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, | |
452 | link) | |
453 | if (kian->gsi == gsi) { | |
454 | srcu_read_unlock(&kvm->irq_srcu, idx); | |
455 | return true; | |
456 | } | |
457 | ||
458 | srcu_read_unlock(&kvm->irq_srcu, idx); | |
459 | ||
460 | return false; | |
461 | } | |
462 | EXPORT_SYMBOL_GPL(kvm_irq_has_notifier); | |
463 | ||
ba1aefcd | 464 | void kvm_notify_acked_gsi(struct kvm *kvm, int gsi) |
c77dcacb PB |
465 | { |
466 | struct kvm_irq_ack_notifier *kian; | |
ba1aefcd AS |
467 | |
468 | hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, | |
469 | link) | |
470 | if (kian->gsi == gsi) | |
471 | kian->irq_acked(kian); | |
472 | } | |
473 | ||
474 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) | |
475 | { | |
c77dcacb PB |
476 | int gsi, idx; |
477 | ||
478 | trace_kvm_ack_irq(irqchip, pin); | |
479 | ||
480 | idx = srcu_read_lock(&kvm->irq_srcu); | |
481 | gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); | |
482 | if (gsi != -1) | |
ba1aefcd | 483 | kvm_notify_acked_gsi(kvm, gsi); |
c77dcacb PB |
484 | srcu_read_unlock(&kvm->irq_srcu, idx); |
485 | } | |
486 | ||
487 | void kvm_register_irq_ack_notifier(struct kvm *kvm, | |
488 | struct kvm_irq_ack_notifier *kian) | |
489 | { | |
490 | mutex_lock(&kvm->irq_lock); | |
491 | hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list); | |
492 | mutex_unlock(&kvm->irq_lock); | |
c77dcacb | 493 | kvm_vcpu_request_scan_ioapic(kvm); |
c77dcacb PB |
494 | } |
495 | ||
496 | void kvm_unregister_irq_ack_notifier(struct kvm *kvm, | |
497 | struct kvm_irq_ack_notifier *kian) | |
498 | { | |
499 | mutex_lock(&kvm->irq_lock); | |
500 | hlist_del_init_rcu(&kian->link); | |
501 | mutex_unlock(&kvm->irq_lock); | |
502 | synchronize_srcu(&kvm->irq_srcu); | |
c77dcacb | 503 | kvm_vcpu_request_scan_ioapic(kvm); |
c77dcacb | 504 | } |
914daba8 | 505 | #endif |
721eecbf GH |
506 | |
507 | void | |
d34e6b17 | 508 | kvm_eventfd_init(struct kvm *kvm) |
721eecbf | 509 | { |
297e2105 | 510 | #ifdef CONFIG_HAVE_KVM_IRQFD |
721eecbf GH |
511 | spin_lock_init(&kvm->irqfds.lock); |
512 | INIT_LIST_HEAD(&kvm->irqfds.items); | |
7a84428a AW |
513 | INIT_LIST_HEAD(&kvm->irqfds.resampler_list); |
514 | mutex_init(&kvm->irqfds.resampler_lock); | |
914daba8 | 515 | #endif |
d34e6b17 | 516 | INIT_LIST_HEAD(&kvm->ioeventfds); |
721eecbf GH |
517 | } |
518 | ||
297e2105 | 519 | #ifdef CONFIG_HAVE_KVM_IRQFD |
721eecbf GH |
520 | /* |
521 | * shutdown any irqfd's that match fd+gsi | |
522 | */ | |
523 | static int | |
d4db2935 | 524 | kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args) |
721eecbf | 525 | { |
166c9775 | 526 | struct kvm_kernel_irqfd *irqfd, *tmp; |
721eecbf GH |
527 | struct eventfd_ctx *eventfd; |
528 | ||
d4db2935 | 529 | eventfd = eventfd_ctx_fdget(args->fd); |
721eecbf GH |
530 | if (IS_ERR(eventfd)) |
531 | return PTR_ERR(eventfd); | |
532 | ||
533 | spin_lock_irq(&kvm->irqfds.lock); | |
534 | ||
535 | list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) { | |
d4db2935 | 536 | if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) { |
bd2b53b2 | 537 | /* |
56f89f36 | 538 | * This clearing of irq_entry.type is needed for when |
c8ce057e MT |
539 | * another thread calls kvm_irq_routing_update before |
540 | * we flush workqueue below (we synchronize with | |
541 | * kvm_irq_routing_update using irqfds.lock). | |
bd2b53b2 | 542 | */ |
56f89f36 PM |
543 | write_seqcount_begin(&irqfd->irq_entry_sc); |
544 | irqfd->irq_entry.type = 0; | |
545 | write_seqcount_end(&irqfd->irq_entry_sc); | |
721eecbf | 546 | irqfd_deactivate(irqfd); |
bd2b53b2 | 547 | } |
721eecbf GH |
548 | } |
549 | ||
550 | spin_unlock_irq(&kvm->irqfds.lock); | |
551 | eventfd_ctx_put(eventfd); | |
552 | ||
553 | /* | |
554 | * Block until we know all outstanding shutdown jobs have completed | |
555 | * so that we guarantee there will not be any more interrupts on this | |
556 | * gsi once this deassign function returns. | |
557 | */ | |
558 | flush_workqueue(irqfd_cleanup_wq); | |
559 | ||
560 | return 0; | |
561 | } | |
562 | ||
563 | int | |
d4db2935 | 564 | kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) |
721eecbf | 565 | { |
7a84428a | 566 | if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE)) |
326cf033 AW |
567 | return -EINVAL; |
568 | ||
d4db2935 AW |
569 | if (args->flags & KVM_IRQFD_FLAG_DEASSIGN) |
570 | return kvm_irqfd_deassign(kvm, args); | |
721eecbf | 571 | |
d4db2935 | 572 | return kvm_irqfd_assign(kvm, args); |
721eecbf GH |
573 | } |
574 | ||
575 | /* | |
576 | * This function is called as the kvm VM fd is being released. Shutdown all | |
577 | * irqfds that still remain open | |
578 | */ | |
579 | void | |
580 | kvm_irqfd_release(struct kvm *kvm) | |
581 | { | |
166c9775 | 582 | struct kvm_kernel_irqfd *irqfd, *tmp; |
721eecbf GH |
583 | |
584 | spin_lock_irq(&kvm->irqfds.lock); | |
585 | ||
586 | list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) | |
587 | irqfd_deactivate(irqfd); | |
588 | ||
589 | spin_unlock_irq(&kvm->irqfds.lock); | |
590 | ||
591 | /* | |
592 | * Block until we know all outstanding shutdown jobs have completed | |
593 | * since we do not take a kvm* reference. | |
594 | */ | |
595 | flush_workqueue(irqfd_cleanup_wq); | |
596 | ||
597 | } | |
598 | ||
bd2b53b2 | 599 | /* |
9957c86d | 600 | * Take note of a change in irq routing. |
719d93cd | 601 | * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards. |
bd2b53b2 | 602 | */ |
9957c86d | 603 | void kvm_irq_routing_update(struct kvm *kvm) |
bd2b53b2 | 604 | { |
166c9775 | 605 | struct kvm_kernel_irqfd *irqfd; |
bd2b53b2 MT |
606 | |
607 | spin_lock_irq(&kvm->irqfds.lock); | |
608 | ||
f70c20aa | 609 | list_for_each_entry(irqfd, &kvm->irqfds.items, list) { |
9957c86d | 610 | irqfd_update(kvm, irqfd); |
bd2b53b2 | 611 | |
f70c20aa FW |
612 | #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS |
613 | if (irqfd->producer) { | |
614 | int ret = kvm_arch_update_irqfd_routing( | |
615 | irqfd->kvm, irqfd->producer->irq, | |
616 | irqfd->gsi, 1); | |
617 | WARN_ON(ret); | |
618 | } | |
619 | #endif | |
620 | } | |
621 | ||
bd2b53b2 MT |
622 | spin_unlock_irq(&kvm->irqfds.lock); |
623 | } | |
624 | ||
721eecbf GH |
625 | /* |
626 | * create a host-wide workqueue for issuing deferred shutdown requests | |
627 | * aggregated from all vm* instances. We need our own isolated single-thread | |
628 | * queue to prevent deadlock against flushing the normal work-queue. | |
629 | */ | |
a0f155e9 | 630 | int kvm_irqfd_init(void) |
721eecbf GH |
631 | { |
632 | irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup"); | |
633 | if (!irqfd_cleanup_wq) | |
634 | return -ENOMEM; | |
635 | ||
636 | return 0; | |
637 | } | |
638 | ||
a0f155e9 | 639 | void kvm_irqfd_exit(void) |
721eecbf GH |
640 | { |
641 | destroy_workqueue(irqfd_cleanup_wq); | |
642 | } | |
914daba8 | 643 | #endif |
d34e6b17 GH |
644 | |
645 | /* | |
646 | * -------------------------------------------------------------------- | |
647 | * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal. | |
648 | * | |
649 | * userspace can register a PIO/MMIO address with an eventfd for receiving | |
650 | * notification when the memory has been touched. | |
651 | * -------------------------------------------------------------------- | |
652 | */ | |
653 | ||
654 | struct _ioeventfd { | |
655 | struct list_head list; | |
656 | u64 addr; | |
657 | int length; | |
658 | struct eventfd_ctx *eventfd; | |
659 | u64 datamatch; | |
660 | struct kvm_io_device dev; | |
05e07f9b | 661 | u8 bus_idx; |
d34e6b17 GH |
662 | bool wildcard; |
663 | }; | |
664 | ||
665 | static inline struct _ioeventfd * | |
666 | to_ioeventfd(struct kvm_io_device *dev) | |
667 | { | |
668 | return container_of(dev, struct _ioeventfd, dev); | |
669 | } | |
670 | ||
671 | static void | |
672 | ioeventfd_release(struct _ioeventfd *p) | |
673 | { | |
674 | eventfd_ctx_put(p->eventfd); | |
675 | list_del(&p->list); | |
676 | kfree(p); | |
677 | } | |
678 | ||
679 | static bool | |
680 | ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val) | |
681 | { | |
682 | u64 _val; | |
683 | ||
f848a5a8 MT |
684 | if (addr != p->addr) |
685 | /* address must be precise for a hit */ | |
686 | return false; | |
687 | ||
688 | if (!p->length) | |
689 | /* length = 0 means only look at the address, so always a hit */ | |
690 | return true; | |
691 | ||
692 | if (len != p->length) | |
d34e6b17 GH |
693 | /* address-range must be precise for a hit */ |
694 | return false; | |
695 | ||
696 | if (p->wildcard) | |
697 | /* all else equal, wildcard is always a hit */ | |
698 | return true; | |
699 | ||
700 | /* otherwise, we have to actually compare the data */ | |
701 | ||
702 | BUG_ON(!IS_ALIGNED((unsigned long)val, len)); | |
703 | ||
704 | switch (len) { | |
705 | case 1: | |
706 | _val = *(u8 *)val; | |
707 | break; | |
708 | case 2: | |
709 | _val = *(u16 *)val; | |
710 | break; | |
711 | case 4: | |
712 | _val = *(u32 *)val; | |
713 | break; | |
714 | case 8: | |
715 | _val = *(u64 *)val; | |
716 | break; | |
717 | default: | |
718 | return false; | |
719 | } | |
720 | ||
721 | return _val == p->datamatch ? true : false; | |
722 | } | |
723 | ||
724 | /* MMIO/PIO writes trigger an event if the addr/val match */ | |
725 | static int | |
e32edf4f NN |
726 | ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr, |
727 | int len, const void *val) | |
d34e6b17 GH |
728 | { |
729 | struct _ioeventfd *p = to_ioeventfd(this); | |
730 | ||
731 | if (!ioeventfd_in_range(p, addr, len, val)) | |
732 | return -EOPNOTSUPP; | |
733 | ||
734 | eventfd_signal(p->eventfd, 1); | |
735 | return 0; | |
736 | } | |
737 | ||
738 | /* | |
739 | * This function is called as KVM is completely shutting down. We do not | |
740 | * need to worry about locking just nuke anything we have as quickly as possible | |
741 | */ | |
742 | static void | |
743 | ioeventfd_destructor(struct kvm_io_device *this) | |
744 | { | |
745 | struct _ioeventfd *p = to_ioeventfd(this); | |
746 | ||
747 | ioeventfd_release(p); | |
748 | } | |
749 | ||
750 | static const struct kvm_io_device_ops ioeventfd_ops = { | |
751 | .write = ioeventfd_write, | |
752 | .destructor = ioeventfd_destructor, | |
753 | }; | |
754 | ||
755 | /* assumes kvm->slots_lock held */ | |
756 | static bool | |
757 | ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p) | |
758 | { | |
759 | struct _ioeventfd *_p; | |
760 | ||
761 | list_for_each_entry(_p, &kvm->ioeventfds, list) | |
05e07f9b | 762 | if (_p->bus_idx == p->bus_idx && |
f848a5a8 MT |
763 | _p->addr == p->addr && |
764 | (!_p->length || !p->length || | |
765 | (_p->length == p->length && | |
766 | (_p->wildcard || p->wildcard || | |
767 | _p->datamatch == p->datamatch)))) | |
d34e6b17 GH |
768 | return true; |
769 | ||
770 | return false; | |
771 | } | |
772 | ||
2b83451b CH |
773 | static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags) |
774 | { | |
775 | if (flags & KVM_IOEVENTFD_FLAG_PIO) | |
776 | return KVM_PIO_BUS; | |
777 | if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY) | |
778 | return KVM_VIRTIO_CCW_NOTIFY_BUS; | |
779 | return KVM_MMIO_BUS; | |
780 | } | |
781 | ||
85da11ca JW |
782 | static int kvm_assign_ioeventfd_idx(struct kvm *kvm, |
783 | enum kvm_bus bus_idx, | |
784 | struct kvm_ioeventfd *args) | |
d34e6b17 | 785 | { |
d34e6b17 | 786 | |
85da11ca JW |
787 | struct eventfd_ctx *eventfd; |
788 | struct _ioeventfd *p; | |
789 | int ret; | |
f848a5a8 | 790 | |
d34e6b17 GH |
791 | eventfd = eventfd_ctx_fdget(args->fd); |
792 | if (IS_ERR(eventfd)) | |
793 | return PTR_ERR(eventfd); | |
794 | ||
795 | p = kzalloc(sizeof(*p), GFP_KERNEL); | |
796 | if (!p) { | |
797 | ret = -ENOMEM; | |
798 | goto fail; | |
799 | } | |
800 | ||
801 | INIT_LIST_HEAD(&p->list); | |
802 | p->addr = args->addr; | |
05e07f9b | 803 | p->bus_idx = bus_idx; |
d34e6b17 GH |
804 | p->length = args->len; |
805 | p->eventfd = eventfd; | |
806 | ||
807 | /* The datamatch feature is optional, otherwise this is a wildcard */ | |
808 | if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH) | |
809 | p->datamatch = args->datamatch; | |
810 | else | |
811 | p->wildcard = true; | |
812 | ||
79fac95e | 813 | mutex_lock(&kvm->slots_lock); |
d34e6b17 | 814 | |
25985edc | 815 | /* Verify that there isn't a match already */ |
d34e6b17 GH |
816 | if (ioeventfd_check_collision(kvm, p)) { |
817 | ret = -EEXIST; | |
818 | goto unlock_fail; | |
819 | } | |
820 | ||
821 | kvm_iodevice_init(&p->dev, &ioeventfd_ops); | |
822 | ||
743eeb0b SL |
823 | ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length, |
824 | &p->dev); | |
d34e6b17 GH |
825 | if (ret < 0) |
826 | goto unlock_fail; | |
827 | ||
6ea34c9b | 828 | kvm->buses[bus_idx]->ioeventfd_count++; |
d34e6b17 GH |
829 | list_add_tail(&p->list, &kvm->ioeventfds); |
830 | ||
79fac95e | 831 | mutex_unlock(&kvm->slots_lock); |
d34e6b17 GH |
832 | |
833 | return 0; | |
834 | ||
835 | unlock_fail: | |
79fac95e | 836 | mutex_unlock(&kvm->slots_lock); |
d34e6b17 GH |
837 | |
838 | fail: | |
839 | kfree(p); | |
840 | eventfd_ctx_put(eventfd); | |
841 | ||
842 | return ret; | |
843 | } | |
844 | ||
845 | static int | |
85da11ca JW |
846 | kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx, |
847 | struct kvm_ioeventfd *args) | |
d34e6b17 | 848 | { |
d34e6b17 GH |
849 | struct _ioeventfd *p, *tmp; |
850 | struct eventfd_ctx *eventfd; | |
851 | int ret = -ENOENT; | |
852 | ||
853 | eventfd = eventfd_ctx_fdget(args->fd); | |
854 | if (IS_ERR(eventfd)) | |
855 | return PTR_ERR(eventfd); | |
856 | ||
79fac95e | 857 | mutex_lock(&kvm->slots_lock); |
d34e6b17 GH |
858 | |
859 | list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) { | |
860 | bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH); | |
861 | ||
05e07f9b MT |
862 | if (p->bus_idx != bus_idx || |
863 | p->eventfd != eventfd || | |
d34e6b17 GH |
864 | p->addr != args->addr || |
865 | p->length != args->len || | |
866 | p->wildcard != wildcard) | |
867 | continue; | |
868 | ||
869 | if (!p->wildcard && p->datamatch != args->datamatch) | |
870 | continue; | |
871 | ||
e93f8a0f | 872 | kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); |
6ea34c9b | 873 | kvm->buses[bus_idx]->ioeventfd_count--; |
d34e6b17 GH |
874 | ioeventfd_release(p); |
875 | ret = 0; | |
876 | break; | |
877 | } | |
878 | ||
79fac95e | 879 | mutex_unlock(&kvm->slots_lock); |
d34e6b17 GH |
880 | |
881 | eventfd_ctx_put(eventfd); | |
882 | ||
883 | return ret; | |
884 | } | |
885 | ||
85da11ca JW |
886 | static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) |
887 | { | |
888 | enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags); | |
eefd6b06 JW |
889 | int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args); |
890 | ||
891 | if (!args->len && bus_idx == KVM_MMIO_BUS) | |
892 | kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args); | |
85da11ca | 893 | |
eefd6b06 | 894 | return ret; |
85da11ca JW |
895 | } |
896 | ||
897 | static int | |
898 | kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | |
899 | { | |
900 | enum kvm_bus bus_idx; | |
eefd6b06 | 901 | int ret; |
85da11ca JW |
902 | |
903 | bus_idx = ioeventfd_bus_from_flags(args->flags); | |
904 | /* must be natural-word sized, or 0 to ignore length */ | |
905 | switch (args->len) { | |
906 | case 0: | |
907 | case 1: | |
908 | case 2: | |
909 | case 4: | |
910 | case 8: | |
911 | break; | |
912 | default: | |
913 | return -EINVAL; | |
914 | } | |
915 | ||
916 | /* check for range overflow */ | |
917 | if (args->addr + args->len < args->addr) | |
918 | return -EINVAL; | |
919 | ||
920 | /* check for extra flags that we don't understand */ | |
921 | if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK) | |
922 | return -EINVAL; | |
923 | ||
924 | /* ioeventfd with no length can't be combined with DATAMATCH */ | |
e9ea5069 | 925 | if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)) |
85da11ca JW |
926 | return -EINVAL; |
927 | ||
eefd6b06 JW |
928 | ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args); |
929 | if (ret) | |
930 | goto fail; | |
931 | ||
932 | /* When length is ignored, MMIO is also put on a separate bus, for | |
933 | * faster lookups. | |
934 | */ | |
935 | if (!args->len && bus_idx == KVM_MMIO_BUS) { | |
936 | ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args); | |
937 | if (ret < 0) | |
938 | goto fast_fail; | |
939 | } | |
940 | ||
941 | return 0; | |
942 | ||
943 | fast_fail: | |
944 | kvm_deassign_ioeventfd_idx(kvm, bus_idx, args); | |
945 | fail: | |
946 | return ret; | |
85da11ca JW |
947 | } |
948 | ||
d34e6b17 GH |
949 | int |
950 | kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | |
951 | { | |
952 | if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN) | |
953 | return kvm_deassign_ioeventfd(kvm, args); | |
954 | ||
955 | return kvm_assign_ioeventfd(kvm, args); | |
956 | } |