]>
Commit | Line | Data |
---|---|---|
721eecbf GH |
1 | /* |
2 | * kvm eventfd support - use eventfd objects to signal various KVM events | |
3 | * | |
4 | * Copyright 2009 Novell. All Rights Reserved. | |
221d059d | 5 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
721eecbf GH |
6 | * |
7 | * Author: | |
8 | * Gregory Haskins <[email protected]> | |
9 | * | |
10 | * This file is free software; you can redistribute it and/or modify | |
11 | * it under the terms of version 2 of the GNU General Public License | |
12 | * as published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License | |
20 | * along with this program; if not, write to the Free Software Foundation, | |
21 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. | |
22 | */ | |
23 | ||
24 | #include <linux/kvm_host.h> | |
d34e6b17 | 25 | #include <linux/kvm.h> |
721eecbf GH |
26 | #include <linux/workqueue.h> |
27 | #include <linux/syscalls.h> | |
28 | #include <linux/wait.h> | |
29 | #include <linux/poll.h> | |
30 | #include <linux/file.h> | |
31 | #include <linux/list.h> | |
32 | #include <linux/eventfd.h> | |
d34e6b17 | 33 | #include <linux/kernel.h> |
5a0e3ad6 | 34 | #include <linux/slab.h> |
d34e6b17 GH |
35 | |
36 | #include "iodev.h" | |
721eecbf | 37 | |
a725d56a | 38 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
721eecbf GH |
39 | /* |
40 | * -------------------------------------------------------------------- | |
41 | * irqfd: Allows an fd to be used to inject an interrupt to the guest | |
42 | * | |
43 | * Credit goes to Avi Kivity for the original idea. | |
44 | * -------------------------------------------------------------------- | |
45 | */ | |
46 | ||
7a84428a AW |
47 | /* |
48 | * Resampling irqfds are a special variety of irqfds used to emulate | |
49 | * level triggered interrupts. The interrupt is asserted on eventfd | |
50 | * trigger. On acknowledgement through the irq ack notifier, the | |
51 | * interrupt is de-asserted and userspace is notified through the | |
52 | * resamplefd. All resamplers on the same gsi are de-asserted | |
53 | * together, so we don't need to track the state of each individual | |
54 | * user. We can also therefore share the same irq source ID. | |
55 | */ | |
56 | struct _irqfd_resampler { | |
57 | struct kvm *kvm; | |
58 | /* | |
59 | * List of resampling struct _irqfd objects sharing this gsi. | |
60 | * RCU list modified under kvm->irqfds.resampler_lock | |
61 | */ | |
62 | struct list_head list; | |
63 | struct kvm_irq_ack_notifier notifier; | |
64 | /* | |
65 | * Entry in list of kvm->irqfd.resampler_list. Use for sharing | |
66 | * resamplers among irqfds on the same gsi. | |
67 | * Accessed and modified under kvm->irqfds.resampler_lock | |
68 | */ | |
69 | struct list_head link; | |
70 | }; | |
71 | ||
721eecbf | 72 | struct _irqfd { |
bd2b53b2 MT |
73 | /* Used for MSI fast-path */ |
74 | struct kvm *kvm; | |
75 | wait_queue_t wait; | |
76 | /* Update side is protected by irqfds.lock */ | |
77 | struct kvm_kernel_irq_routing_entry __rcu *irq_entry; | |
78 | /* Used for level IRQ fast-path */ | |
79 | int gsi; | |
80 | struct work_struct inject; | |
7a84428a AW |
81 | /* The resampler used by this irqfd (resampler-only) */ |
82 | struct _irqfd_resampler *resampler; | |
83 | /* Eventfd notified on resample (resampler-only) */ | |
84 | struct eventfd_ctx *resamplefd; | |
85 | /* Entry in list of irqfds for a resampler (resampler-only) */ | |
86 | struct list_head resampler_link; | |
bd2b53b2 MT |
87 | /* Used for setup/shutdown */ |
88 | struct eventfd_ctx *eventfd; | |
89 | struct list_head list; | |
90 | poll_table pt; | |
91 | struct work_struct shutdown; | |
721eecbf GH |
92 | }; |
93 | ||
94 | static struct workqueue_struct *irqfd_cleanup_wq; | |
95 | ||
96 | static void | |
97 | irqfd_inject(struct work_struct *work) | |
98 | { | |
99 | struct _irqfd *irqfd = container_of(work, struct _irqfd, inject); | |
100 | struct kvm *kvm = irqfd->kvm; | |
101 | ||
7a84428a | 102 | if (!irqfd->resampler) { |
aa2fbe6d YZ |
103 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1, |
104 | false); | |
105 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0, | |
106 | false); | |
7a84428a AW |
107 | } else |
108 | kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, | |
aa2fbe6d | 109 | irqfd->gsi, 1, false); |
7a84428a AW |
110 | } |
111 | ||
112 | /* | |
113 | * Since resampler irqfds share an IRQ source ID, we de-assert once | |
114 | * then notify all of the resampler irqfds using this GSI. We can't | |
115 | * do multiple de-asserts or we risk racing with incoming re-asserts. | |
116 | */ | |
117 | static void | |
118 | irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian) | |
119 | { | |
120 | struct _irqfd_resampler *resampler; | |
121 | struct _irqfd *irqfd; | |
122 | ||
123 | resampler = container_of(kian, struct _irqfd_resampler, notifier); | |
124 | ||
125 | kvm_set_irq(resampler->kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, | |
aa2fbe6d | 126 | resampler->notifier.gsi, 0, false); |
7a84428a AW |
127 | |
128 | rcu_read_lock(); | |
129 | ||
130 | list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link) | |
131 | eventfd_signal(irqfd->resamplefd, 1); | |
132 | ||
133 | rcu_read_unlock(); | |
134 | } | |
135 | ||
136 | static void | |
137 | irqfd_resampler_shutdown(struct _irqfd *irqfd) | |
138 | { | |
139 | struct _irqfd_resampler *resampler = irqfd->resampler; | |
140 | struct kvm *kvm = resampler->kvm; | |
141 | ||
142 | mutex_lock(&kvm->irqfds.resampler_lock); | |
143 | ||
144 | list_del_rcu(&irqfd->resampler_link); | |
145 | synchronize_rcu(); | |
146 | ||
147 | if (list_empty(&resampler->list)) { | |
148 | list_del(&resampler->link); | |
149 | kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier); | |
150 | kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, | |
aa2fbe6d | 151 | resampler->notifier.gsi, 0, false); |
7a84428a AW |
152 | kfree(resampler); |
153 | } | |
154 | ||
155 | mutex_unlock(&kvm->irqfds.resampler_lock); | |
721eecbf GH |
156 | } |
157 | ||
158 | /* | |
159 | * Race-free decouple logic (ordering is critical) | |
160 | */ | |
161 | static void | |
162 | irqfd_shutdown(struct work_struct *work) | |
163 | { | |
164 | struct _irqfd *irqfd = container_of(work, struct _irqfd, shutdown); | |
b6a114d2 | 165 | u64 cnt; |
721eecbf GH |
166 | |
167 | /* | |
168 | * Synchronize with the wait-queue and unhook ourselves to prevent | |
169 | * further events. | |
170 | */ | |
b6a114d2 | 171 | eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt); |
721eecbf GH |
172 | |
173 | /* | |
174 | * We know no new events will be scheduled at this point, so block | |
175 | * until all previously outstanding events have completed | |
176 | */ | |
43829731 | 177 | flush_work(&irqfd->inject); |
721eecbf | 178 | |
7a84428a AW |
179 | if (irqfd->resampler) { |
180 | irqfd_resampler_shutdown(irqfd); | |
181 | eventfd_ctx_put(irqfd->resamplefd); | |
182 | } | |
183 | ||
721eecbf GH |
184 | /* |
185 | * It is now safe to release the object's resources | |
186 | */ | |
187 | eventfd_ctx_put(irqfd->eventfd); | |
188 | kfree(irqfd); | |
189 | } | |
190 | ||
191 | ||
192 | /* assumes kvm->irqfds.lock is held */ | |
193 | static bool | |
194 | irqfd_is_active(struct _irqfd *irqfd) | |
195 | { | |
196 | return list_empty(&irqfd->list) ? false : true; | |
197 | } | |
198 | ||
199 | /* | |
200 | * Mark the irqfd as inactive and schedule it for removal | |
201 | * | |
202 | * assumes kvm->irqfds.lock is held | |
203 | */ | |
204 | static void | |
205 | irqfd_deactivate(struct _irqfd *irqfd) | |
206 | { | |
207 | BUG_ON(!irqfd_is_active(irqfd)); | |
208 | ||
209 | list_del_init(&irqfd->list); | |
210 | ||
211 | queue_work(irqfd_cleanup_wq, &irqfd->shutdown); | |
212 | } | |
213 | ||
214 | /* | |
215 | * Called with wqh->lock held and interrupts disabled | |
216 | */ | |
217 | static int | |
218 | irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key) | |
219 | { | |
220 | struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait); | |
221 | unsigned long flags = (unsigned long)key; | |
bd2b53b2 MT |
222 | struct kvm_kernel_irq_routing_entry *irq; |
223 | struct kvm *kvm = irqfd->kvm; | |
721eecbf | 224 | |
bd2b53b2 MT |
225 | if (flags & POLLIN) { |
226 | rcu_read_lock(); | |
227 | irq = rcu_dereference(irqfd->irq_entry); | |
721eecbf | 228 | /* An event has been signaled, inject an interrupt */ |
bd2b53b2 | 229 | if (irq) |
aa2fbe6d YZ |
230 | kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, |
231 | false); | |
bd2b53b2 MT |
232 | else |
233 | schedule_work(&irqfd->inject); | |
234 | rcu_read_unlock(); | |
235 | } | |
721eecbf GH |
236 | |
237 | if (flags & POLLHUP) { | |
238 | /* The eventfd is closing, detach from KVM */ | |
721eecbf GH |
239 | unsigned long flags; |
240 | ||
241 | spin_lock_irqsave(&kvm->irqfds.lock, flags); | |
242 | ||
243 | /* | |
244 | * We must check if someone deactivated the irqfd before | |
245 | * we could acquire the irqfds.lock since the item is | |
246 | * deactivated from the KVM side before it is unhooked from | |
247 | * the wait-queue. If it is already deactivated, we can | |
248 | * simply return knowing the other side will cleanup for us. | |
249 | * We cannot race against the irqfd going away since the | |
250 | * other side is required to acquire wqh->lock, which we hold | |
251 | */ | |
252 | if (irqfd_is_active(irqfd)) | |
253 | irqfd_deactivate(irqfd); | |
254 | ||
255 | spin_unlock_irqrestore(&kvm->irqfds.lock, flags); | |
256 | } | |
257 | ||
258 | return 0; | |
259 | } | |
260 | ||
261 | static void | |
262 | irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh, | |
263 | poll_table *pt) | |
264 | { | |
265 | struct _irqfd *irqfd = container_of(pt, struct _irqfd, pt); | |
721eecbf GH |
266 | add_wait_queue(wqh, &irqfd->wait); |
267 | } | |
268 | ||
bd2b53b2 MT |
269 | /* Must be called under irqfds.lock */ |
270 | static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd, | |
271 | struct kvm_irq_routing_table *irq_rt) | |
272 | { | |
273 | struct kvm_kernel_irq_routing_entry *e; | |
bd2b53b2 MT |
274 | |
275 | if (irqfd->gsi >= irq_rt->nr_rt_entries) { | |
276 | rcu_assign_pointer(irqfd->irq_entry, NULL); | |
277 | return; | |
278 | } | |
279 | ||
b67bfe0d | 280 | hlist_for_each_entry(e, &irq_rt->map[irqfd->gsi], link) { |
bd2b53b2 MT |
281 | /* Only fast-path MSI. */ |
282 | if (e->type == KVM_IRQ_ROUTING_MSI) | |
283 | rcu_assign_pointer(irqfd->irq_entry, e); | |
284 | else | |
285 | rcu_assign_pointer(irqfd->irq_entry, NULL); | |
286 | } | |
287 | } | |
288 | ||
721eecbf | 289 | static int |
d4db2935 | 290 | kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) |
721eecbf | 291 | { |
bd2b53b2 | 292 | struct kvm_irq_routing_table *irq_rt; |
f1d1c309 | 293 | struct _irqfd *irqfd, *tmp; |
721eecbf | 294 | struct file *file = NULL; |
7a84428a | 295 | struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL; |
721eecbf GH |
296 | int ret; |
297 | unsigned int events; | |
298 | ||
299 | irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL); | |
300 | if (!irqfd) | |
301 | return -ENOMEM; | |
302 | ||
303 | irqfd->kvm = kvm; | |
d4db2935 | 304 | irqfd->gsi = args->gsi; |
721eecbf GH |
305 | INIT_LIST_HEAD(&irqfd->list); |
306 | INIT_WORK(&irqfd->inject, irqfd_inject); | |
307 | INIT_WORK(&irqfd->shutdown, irqfd_shutdown); | |
308 | ||
d4db2935 | 309 | file = eventfd_fget(args->fd); |
721eecbf GH |
310 | if (IS_ERR(file)) { |
311 | ret = PTR_ERR(file); | |
312 | goto fail; | |
313 | } | |
314 | ||
315 | eventfd = eventfd_ctx_fileget(file); | |
316 | if (IS_ERR(eventfd)) { | |
317 | ret = PTR_ERR(eventfd); | |
318 | goto fail; | |
319 | } | |
320 | ||
321 | irqfd->eventfd = eventfd; | |
322 | ||
7a84428a AW |
323 | if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) { |
324 | struct _irqfd_resampler *resampler; | |
325 | ||
326 | resamplefd = eventfd_ctx_fdget(args->resamplefd); | |
327 | if (IS_ERR(resamplefd)) { | |
328 | ret = PTR_ERR(resamplefd); | |
329 | goto fail; | |
330 | } | |
331 | ||
332 | irqfd->resamplefd = resamplefd; | |
333 | INIT_LIST_HEAD(&irqfd->resampler_link); | |
334 | ||
335 | mutex_lock(&kvm->irqfds.resampler_lock); | |
336 | ||
337 | list_for_each_entry(resampler, | |
49f8a1a5 | 338 | &kvm->irqfds.resampler_list, link) { |
7a84428a AW |
339 | if (resampler->notifier.gsi == irqfd->gsi) { |
340 | irqfd->resampler = resampler; | |
341 | break; | |
342 | } | |
343 | } | |
344 | ||
345 | if (!irqfd->resampler) { | |
346 | resampler = kzalloc(sizeof(*resampler), GFP_KERNEL); | |
347 | if (!resampler) { | |
348 | ret = -ENOMEM; | |
349 | mutex_unlock(&kvm->irqfds.resampler_lock); | |
350 | goto fail; | |
351 | } | |
352 | ||
353 | resampler->kvm = kvm; | |
354 | INIT_LIST_HEAD(&resampler->list); | |
355 | resampler->notifier.gsi = irqfd->gsi; | |
356 | resampler->notifier.irq_acked = irqfd_resampler_ack; | |
357 | INIT_LIST_HEAD(&resampler->link); | |
358 | ||
359 | list_add(&resampler->link, &kvm->irqfds.resampler_list); | |
360 | kvm_register_irq_ack_notifier(kvm, | |
361 | &resampler->notifier); | |
362 | irqfd->resampler = resampler; | |
363 | } | |
364 | ||
365 | list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list); | |
366 | synchronize_rcu(); | |
367 | ||
368 | mutex_unlock(&kvm->irqfds.resampler_lock); | |
369 | } | |
370 | ||
721eecbf GH |
371 | /* |
372 | * Install our own custom wake-up handling so we are notified via | |
373 | * a callback whenever someone signals the underlying eventfd | |
374 | */ | |
375 | init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup); | |
376 | init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc); | |
377 | ||
f1d1c309 MT |
378 | spin_lock_irq(&kvm->irqfds.lock); |
379 | ||
380 | ret = 0; | |
381 | list_for_each_entry(tmp, &kvm->irqfds.items, list) { | |
382 | if (irqfd->eventfd != tmp->eventfd) | |
383 | continue; | |
384 | /* This fd is used for another irq already. */ | |
385 | ret = -EBUSY; | |
386 | spin_unlock_irq(&kvm->irqfds.lock); | |
387 | goto fail; | |
388 | } | |
389 | ||
bd2b53b2 MT |
390 | irq_rt = rcu_dereference_protected(kvm->irq_routing, |
391 | lockdep_is_held(&kvm->irqfds.lock)); | |
392 | irqfd_update(kvm, irqfd, irq_rt); | |
393 | ||
721eecbf GH |
394 | events = file->f_op->poll(file, &irqfd->pt); |
395 | ||
721eecbf | 396 | list_add_tail(&irqfd->list, &kvm->irqfds.items); |
721eecbf GH |
397 | |
398 | /* | |
399 | * Check if there was an event already pending on the eventfd | |
400 | * before we registered, and trigger it as if we didn't miss it. | |
401 | */ | |
402 | if (events & POLLIN) | |
403 | schedule_work(&irqfd->inject); | |
404 | ||
6bbfb265 MT |
405 | spin_unlock_irq(&kvm->irqfds.lock); |
406 | ||
721eecbf GH |
407 | /* |
408 | * do not drop the file until the irqfd is fully initialized, otherwise | |
409 | * we might race against the POLLHUP | |
410 | */ | |
411 | fput(file); | |
412 | ||
413 | return 0; | |
414 | ||
415 | fail: | |
7a84428a AW |
416 | if (irqfd->resampler) |
417 | irqfd_resampler_shutdown(irqfd); | |
418 | ||
419 | if (resamplefd && !IS_ERR(resamplefd)) | |
420 | eventfd_ctx_put(resamplefd); | |
421 | ||
721eecbf GH |
422 | if (eventfd && !IS_ERR(eventfd)) |
423 | eventfd_ctx_put(eventfd); | |
424 | ||
6223011f | 425 | if (!IS_ERR(file)) |
721eecbf GH |
426 | fput(file); |
427 | ||
428 | kfree(irqfd); | |
429 | return ret; | |
430 | } | |
914daba8 | 431 | #endif |
721eecbf GH |
432 | |
433 | void | |
d34e6b17 | 434 | kvm_eventfd_init(struct kvm *kvm) |
721eecbf | 435 | { |
a725d56a | 436 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
721eecbf GH |
437 | spin_lock_init(&kvm->irqfds.lock); |
438 | INIT_LIST_HEAD(&kvm->irqfds.items); | |
7a84428a AW |
439 | INIT_LIST_HEAD(&kvm->irqfds.resampler_list); |
440 | mutex_init(&kvm->irqfds.resampler_lock); | |
914daba8 | 441 | #endif |
d34e6b17 | 442 | INIT_LIST_HEAD(&kvm->ioeventfds); |
721eecbf GH |
443 | } |
444 | ||
a725d56a | 445 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
721eecbf GH |
446 | /* |
447 | * shutdown any irqfd's that match fd+gsi | |
448 | */ | |
449 | static int | |
d4db2935 | 450 | kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args) |
721eecbf GH |
451 | { |
452 | struct _irqfd *irqfd, *tmp; | |
453 | struct eventfd_ctx *eventfd; | |
454 | ||
d4db2935 | 455 | eventfd = eventfd_ctx_fdget(args->fd); |
721eecbf GH |
456 | if (IS_ERR(eventfd)) |
457 | return PTR_ERR(eventfd); | |
458 | ||
459 | spin_lock_irq(&kvm->irqfds.lock); | |
460 | ||
461 | list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) { | |
d4db2935 | 462 | if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) { |
bd2b53b2 MT |
463 | /* |
464 | * This rcu_assign_pointer is needed for when | |
c8ce057e MT |
465 | * another thread calls kvm_irq_routing_update before |
466 | * we flush workqueue below (we synchronize with | |
467 | * kvm_irq_routing_update using irqfds.lock). | |
bd2b53b2 MT |
468 | * It is paired with synchronize_rcu done by caller |
469 | * of that function. | |
470 | */ | |
471 | rcu_assign_pointer(irqfd->irq_entry, NULL); | |
721eecbf | 472 | irqfd_deactivate(irqfd); |
bd2b53b2 | 473 | } |
721eecbf GH |
474 | } |
475 | ||
476 | spin_unlock_irq(&kvm->irqfds.lock); | |
477 | eventfd_ctx_put(eventfd); | |
478 | ||
479 | /* | |
480 | * Block until we know all outstanding shutdown jobs have completed | |
481 | * so that we guarantee there will not be any more interrupts on this | |
482 | * gsi once this deassign function returns. | |
483 | */ | |
484 | flush_workqueue(irqfd_cleanup_wq); | |
485 | ||
486 | return 0; | |
487 | } | |
488 | ||
489 | int | |
d4db2935 | 490 | kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) |
721eecbf | 491 | { |
7a84428a | 492 | if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE)) |
326cf033 AW |
493 | return -EINVAL; |
494 | ||
d4db2935 AW |
495 | if (args->flags & KVM_IRQFD_FLAG_DEASSIGN) |
496 | return kvm_irqfd_deassign(kvm, args); | |
721eecbf | 497 | |
d4db2935 | 498 | return kvm_irqfd_assign(kvm, args); |
721eecbf GH |
499 | } |
500 | ||
501 | /* | |
502 | * This function is called as the kvm VM fd is being released. Shutdown all | |
503 | * irqfds that still remain open | |
504 | */ | |
505 | void | |
506 | kvm_irqfd_release(struct kvm *kvm) | |
507 | { | |
508 | struct _irqfd *irqfd, *tmp; | |
509 | ||
510 | spin_lock_irq(&kvm->irqfds.lock); | |
511 | ||
512 | list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) | |
513 | irqfd_deactivate(irqfd); | |
514 | ||
515 | spin_unlock_irq(&kvm->irqfds.lock); | |
516 | ||
517 | /* | |
518 | * Block until we know all outstanding shutdown jobs have completed | |
519 | * since we do not take a kvm* reference. | |
520 | */ | |
521 | flush_workqueue(irqfd_cleanup_wq); | |
522 | ||
523 | } | |
524 | ||
bd2b53b2 MT |
525 | /* |
526 | * Change irq_routing and irqfd. | |
527 | * Caller must invoke synchronize_rcu afterwards. | |
528 | */ | |
529 | void kvm_irq_routing_update(struct kvm *kvm, | |
530 | struct kvm_irq_routing_table *irq_rt) | |
531 | { | |
532 | struct _irqfd *irqfd; | |
533 | ||
534 | spin_lock_irq(&kvm->irqfds.lock); | |
535 | ||
536 | rcu_assign_pointer(kvm->irq_routing, irq_rt); | |
537 | ||
538 | list_for_each_entry(irqfd, &kvm->irqfds.items, list) | |
539 | irqfd_update(kvm, irqfd, irq_rt); | |
540 | ||
541 | spin_unlock_irq(&kvm->irqfds.lock); | |
542 | } | |
543 | ||
721eecbf GH |
544 | /* |
545 | * create a host-wide workqueue for issuing deferred shutdown requests | |
546 | * aggregated from all vm* instances. We need our own isolated single-thread | |
547 | * queue to prevent deadlock against flushing the normal work-queue. | |
548 | */ | |
a0f155e9 | 549 | int kvm_irqfd_init(void) |
721eecbf GH |
550 | { |
551 | irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup"); | |
552 | if (!irqfd_cleanup_wq) | |
553 | return -ENOMEM; | |
554 | ||
555 | return 0; | |
556 | } | |
557 | ||
a0f155e9 | 558 | void kvm_irqfd_exit(void) |
721eecbf GH |
559 | { |
560 | destroy_workqueue(irqfd_cleanup_wq); | |
561 | } | |
914daba8 | 562 | #endif |
d34e6b17 GH |
563 | |
564 | /* | |
565 | * -------------------------------------------------------------------- | |
566 | * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal. | |
567 | * | |
568 | * userspace can register a PIO/MMIO address with an eventfd for receiving | |
569 | * notification when the memory has been touched. | |
570 | * -------------------------------------------------------------------- | |
571 | */ | |
572 | ||
573 | struct _ioeventfd { | |
574 | struct list_head list; | |
575 | u64 addr; | |
576 | int length; | |
577 | struct eventfd_ctx *eventfd; | |
578 | u64 datamatch; | |
579 | struct kvm_io_device dev; | |
05e07f9b | 580 | u8 bus_idx; |
d34e6b17 GH |
581 | bool wildcard; |
582 | }; | |
583 | ||
584 | static inline struct _ioeventfd * | |
585 | to_ioeventfd(struct kvm_io_device *dev) | |
586 | { | |
587 | return container_of(dev, struct _ioeventfd, dev); | |
588 | } | |
589 | ||
590 | static void | |
591 | ioeventfd_release(struct _ioeventfd *p) | |
592 | { | |
593 | eventfd_ctx_put(p->eventfd); | |
594 | list_del(&p->list); | |
595 | kfree(p); | |
596 | } | |
597 | ||
598 | static bool | |
599 | ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val) | |
600 | { | |
601 | u64 _val; | |
602 | ||
603 | if (!(addr == p->addr && len == p->length)) | |
604 | /* address-range must be precise for a hit */ | |
605 | return false; | |
606 | ||
607 | if (p->wildcard) | |
608 | /* all else equal, wildcard is always a hit */ | |
609 | return true; | |
610 | ||
611 | /* otherwise, we have to actually compare the data */ | |
612 | ||
613 | BUG_ON(!IS_ALIGNED((unsigned long)val, len)); | |
614 | ||
615 | switch (len) { | |
616 | case 1: | |
617 | _val = *(u8 *)val; | |
618 | break; | |
619 | case 2: | |
620 | _val = *(u16 *)val; | |
621 | break; | |
622 | case 4: | |
623 | _val = *(u32 *)val; | |
624 | break; | |
625 | case 8: | |
626 | _val = *(u64 *)val; | |
627 | break; | |
628 | default: | |
629 | return false; | |
630 | } | |
631 | ||
632 | return _val == p->datamatch ? true : false; | |
633 | } | |
634 | ||
635 | /* MMIO/PIO writes trigger an event if the addr/val match */ | |
636 | static int | |
637 | ioeventfd_write(struct kvm_io_device *this, gpa_t addr, int len, | |
638 | const void *val) | |
639 | { | |
640 | struct _ioeventfd *p = to_ioeventfd(this); | |
641 | ||
642 | if (!ioeventfd_in_range(p, addr, len, val)) | |
643 | return -EOPNOTSUPP; | |
644 | ||
645 | eventfd_signal(p->eventfd, 1); | |
646 | return 0; | |
647 | } | |
648 | ||
649 | /* | |
650 | * This function is called as KVM is completely shutting down. We do not | |
651 | * need to worry about locking just nuke anything we have as quickly as possible | |
652 | */ | |
653 | static void | |
654 | ioeventfd_destructor(struct kvm_io_device *this) | |
655 | { | |
656 | struct _ioeventfd *p = to_ioeventfd(this); | |
657 | ||
658 | ioeventfd_release(p); | |
659 | } | |
660 | ||
661 | static const struct kvm_io_device_ops ioeventfd_ops = { | |
662 | .write = ioeventfd_write, | |
663 | .destructor = ioeventfd_destructor, | |
664 | }; | |
665 | ||
666 | /* assumes kvm->slots_lock held */ | |
667 | static bool | |
668 | ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p) | |
669 | { | |
670 | struct _ioeventfd *_p; | |
671 | ||
672 | list_for_each_entry(_p, &kvm->ioeventfds, list) | |
05e07f9b MT |
673 | if (_p->bus_idx == p->bus_idx && |
674 | _p->addr == p->addr && _p->length == p->length && | |
d34e6b17 GH |
675 | (_p->wildcard || p->wildcard || |
676 | _p->datamatch == p->datamatch)) | |
677 | return true; | |
678 | ||
679 | return false; | |
680 | } | |
681 | ||
2b83451b CH |
682 | static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags) |
683 | { | |
684 | if (flags & KVM_IOEVENTFD_FLAG_PIO) | |
685 | return KVM_PIO_BUS; | |
686 | if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY) | |
687 | return KVM_VIRTIO_CCW_NOTIFY_BUS; | |
688 | return KVM_MMIO_BUS; | |
689 | } | |
690 | ||
d34e6b17 GH |
691 | static int |
692 | kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | |
693 | { | |
2b83451b | 694 | enum kvm_bus bus_idx; |
d34e6b17 GH |
695 | struct _ioeventfd *p; |
696 | struct eventfd_ctx *eventfd; | |
697 | int ret; | |
698 | ||
2b83451b | 699 | bus_idx = ioeventfd_bus_from_flags(args->flags); |
d34e6b17 GH |
700 | /* must be natural-word sized */ |
701 | switch (args->len) { | |
702 | case 1: | |
703 | case 2: | |
704 | case 4: | |
705 | case 8: | |
706 | break; | |
707 | default: | |
708 | return -EINVAL; | |
709 | } | |
710 | ||
711 | /* check for range overflow */ | |
712 | if (args->addr + args->len < args->addr) | |
713 | return -EINVAL; | |
714 | ||
715 | /* check for extra flags that we don't understand */ | |
716 | if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK) | |
717 | return -EINVAL; | |
718 | ||
719 | eventfd = eventfd_ctx_fdget(args->fd); | |
720 | if (IS_ERR(eventfd)) | |
721 | return PTR_ERR(eventfd); | |
722 | ||
723 | p = kzalloc(sizeof(*p), GFP_KERNEL); | |
724 | if (!p) { | |
725 | ret = -ENOMEM; | |
726 | goto fail; | |
727 | } | |
728 | ||
729 | INIT_LIST_HEAD(&p->list); | |
730 | p->addr = args->addr; | |
05e07f9b | 731 | p->bus_idx = bus_idx; |
d34e6b17 GH |
732 | p->length = args->len; |
733 | p->eventfd = eventfd; | |
734 | ||
735 | /* The datamatch feature is optional, otherwise this is a wildcard */ | |
736 | if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH) | |
737 | p->datamatch = args->datamatch; | |
738 | else | |
739 | p->wildcard = true; | |
740 | ||
79fac95e | 741 | mutex_lock(&kvm->slots_lock); |
d34e6b17 | 742 | |
25985edc | 743 | /* Verify that there isn't a match already */ |
d34e6b17 GH |
744 | if (ioeventfd_check_collision(kvm, p)) { |
745 | ret = -EEXIST; | |
746 | goto unlock_fail; | |
747 | } | |
748 | ||
749 | kvm_iodevice_init(&p->dev, &ioeventfd_ops); | |
750 | ||
743eeb0b SL |
751 | ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length, |
752 | &p->dev); | |
d34e6b17 GH |
753 | if (ret < 0) |
754 | goto unlock_fail; | |
755 | ||
6ea34c9b | 756 | kvm->buses[bus_idx]->ioeventfd_count++; |
d34e6b17 GH |
757 | list_add_tail(&p->list, &kvm->ioeventfds); |
758 | ||
79fac95e | 759 | mutex_unlock(&kvm->slots_lock); |
d34e6b17 GH |
760 | |
761 | return 0; | |
762 | ||
763 | unlock_fail: | |
79fac95e | 764 | mutex_unlock(&kvm->slots_lock); |
d34e6b17 GH |
765 | |
766 | fail: | |
767 | kfree(p); | |
768 | eventfd_ctx_put(eventfd); | |
769 | ||
770 | return ret; | |
771 | } | |
772 | ||
773 | static int | |
774 | kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | |
775 | { | |
2b83451b | 776 | enum kvm_bus bus_idx; |
d34e6b17 GH |
777 | struct _ioeventfd *p, *tmp; |
778 | struct eventfd_ctx *eventfd; | |
779 | int ret = -ENOENT; | |
780 | ||
2b83451b | 781 | bus_idx = ioeventfd_bus_from_flags(args->flags); |
d34e6b17 GH |
782 | eventfd = eventfd_ctx_fdget(args->fd); |
783 | if (IS_ERR(eventfd)) | |
784 | return PTR_ERR(eventfd); | |
785 | ||
79fac95e | 786 | mutex_lock(&kvm->slots_lock); |
d34e6b17 GH |
787 | |
788 | list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) { | |
789 | bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH); | |
790 | ||
05e07f9b MT |
791 | if (p->bus_idx != bus_idx || |
792 | p->eventfd != eventfd || | |
d34e6b17 GH |
793 | p->addr != args->addr || |
794 | p->length != args->len || | |
795 | p->wildcard != wildcard) | |
796 | continue; | |
797 | ||
798 | if (!p->wildcard && p->datamatch != args->datamatch) | |
799 | continue; | |
800 | ||
e93f8a0f | 801 | kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); |
6ea34c9b | 802 | kvm->buses[bus_idx]->ioeventfd_count--; |
d34e6b17 GH |
803 | ioeventfd_release(p); |
804 | ret = 0; | |
805 | break; | |
806 | } | |
807 | ||
79fac95e | 808 | mutex_unlock(&kvm->slots_lock); |
d34e6b17 GH |
809 | |
810 | eventfd_ctx_put(eventfd); | |
811 | ||
812 | return ret; | |
813 | } | |
814 | ||
815 | int | |
816 | kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | |
817 | { | |
818 | if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN) | |
819 | return kvm_deassign_ioeventfd(kvm, args); | |
820 | ||
821 | return kvm_assign_ioeventfd(kvm, args); | |
822 | } |