1 // SPDX-License-Identifier: GPL-2.0-only
3 * kvm eventfd support - use eventfd objects to signal various KVM events
5 * Copyright 2009 Novell. All Rights Reserved.
6 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12 #include <linux/kvm_host.h>
13 #include <linux/kvm.h>
14 #include <linux/kvm_irqfd.h>
15 #include <linux/workqueue.h>
16 #include <linux/syscalls.h>
17 #include <linux/wait.h>
18 #include <linux/poll.h>
19 #include <linux/file.h>
20 #include <linux/list.h>
21 #include <linux/eventfd.h>
22 #include <linux/kernel.h>
23 #include <linux/srcu.h>
24 #include <linux/slab.h>
25 #include <linux/seqlock.h>
26 #include <linux/irqbypass.h>
27 #include <trace/events/kvm.h>
29 #include <kvm/iodev.h>
31 #ifdef CONFIG_HAVE_KVM_IRQFD
33 static struct workqueue_struct *irqfd_cleanup_wq;
35 bool __attribute__((weak))
36 kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
42 irqfd_inject(struct work_struct *work)
44 struct kvm_kernel_irqfd *irqfd =
45 container_of(work, struct kvm_kernel_irqfd, inject);
46 struct kvm *kvm = irqfd->kvm;
48 if (!irqfd->resampler) {
49 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
51 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
54 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
55 irqfd->gsi, 1, false);
59 * Since resampler irqfds share an IRQ source ID, we de-assert once
60 * then notify all of the resampler irqfds using this GSI. We can't
61 * do multiple de-asserts or we risk racing with incoming re-asserts.
64 irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
66 struct kvm_kernel_irqfd_resampler *resampler;
68 struct kvm_kernel_irqfd *irqfd;
71 resampler = container_of(kian,
72 struct kvm_kernel_irqfd_resampler, notifier);
75 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
76 resampler->notifier.gsi, 0, false);
78 idx = srcu_read_lock(&kvm->irq_srcu);
80 list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
81 eventfd_signal(irqfd->resamplefd, 1);
83 srcu_read_unlock(&kvm->irq_srcu, idx);
87 irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
89 struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler;
90 struct kvm *kvm = resampler->kvm;
92 mutex_lock(&kvm->irqfds.resampler_lock);
94 list_del_rcu(&irqfd->resampler_link);
95 synchronize_srcu(&kvm->irq_srcu);
97 if (list_empty(&resampler->list)) {
98 list_del(&resampler->link);
99 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
100 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
101 resampler->notifier.gsi, 0, false);
105 mutex_unlock(&kvm->irqfds.resampler_lock);
109 * Race-free decouple logic (ordering is critical)
112 irqfd_shutdown(struct work_struct *work)
114 struct kvm_kernel_irqfd *irqfd =
115 container_of(work, struct kvm_kernel_irqfd, shutdown);
116 struct kvm *kvm = irqfd->kvm;
119 /* Make sure irqfd has been initialized in assign path. */
120 synchronize_srcu(&kvm->irq_srcu);
123 * Synchronize with the wait-queue and unhook ourselves to prevent
126 eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
129 * We know no new events will be scheduled at this point, so block
130 * until all previously outstanding events have completed
132 flush_work(&irqfd->inject);
134 if (irqfd->resampler) {
135 irqfd_resampler_shutdown(irqfd);
136 eventfd_ctx_put(irqfd->resamplefd);
140 * It is now safe to release the object's resources
142 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
143 irq_bypass_unregister_consumer(&irqfd->consumer);
145 eventfd_ctx_put(irqfd->eventfd);
150 /* assumes kvm->irqfds.lock is held */
152 irqfd_is_active(struct kvm_kernel_irqfd *irqfd)
154 return list_empty(&irqfd->list) ? false : true;
158 * Mark the irqfd as inactive and schedule it for removal
160 * assumes kvm->irqfds.lock is held
163 irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
165 BUG_ON(!irqfd_is_active(irqfd));
167 list_del_init(&irqfd->list);
169 queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
172 int __attribute__((weak)) kvm_arch_set_irq_inatomic(
173 struct kvm_kernel_irq_routing_entry *irq,
174 struct kvm *kvm, int irq_source_id,
182 * Called with wqh->lock held and interrupts disabled
185 irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
187 struct kvm_kernel_irqfd *irqfd =
188 container_of(wait, struct kvm_kernel_irqfd, wait);
189 __poll_t flags = key_to_poll(key);
190 struct kvm_kernel_irq_routing_entry irq;
191 struct kvm *kvm = irqfd->kvm;
196 if (flags & EPOLLIN) {
198 eventfd_ctx_do_read(irqfd->eventfd, &cnt);
200 idx = srcu_read_lock(&kvm->irq_srcu);
202 seq = read_seqcount_begin(&irqfd->irq_entry_sc);
203 irq = irqfd->irq_entry;
204 } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
205 /* An event has been signaled, inject an interrupt */
206 if (kvm_arch_set_irq_inatomic(&irq, kvm,
207 KVM_USERSPACE_IRQ_SOURCE_ID, 1,
208 false) == -EWOULDBLOCK)
209 schedule_work(&irqfd->inject);
210 srcu_read_unlock(&kvm->irq_srcu, idx);
214 if (flags & EPOLLHUP) {
215 /* The eventfd is closing, detach from KVM */
216 unsigned long iflags;
218 spin_lock_irqsave(&kvm->irqfds.lock, iflags);
221 * We must check if someone deactivated the irqfd before
222 * we could acquire the irqfds.lock since the item is
223 * deactivated from the KVM side before it is unhooked from
224 * the wait-queue. If it is already deactivated, we can
225 * simply return knowing the other side will cleanup for us.
226 * We cannot race against the irqfd going away since the
227 * other side is required to acquire wqh->lock, which we hold
229 if (irqfd_is_active(irqfd))
230 irqfd_deactivate(irqfd);
232 spin_unlock_irqrestore(&kvm->irqfds.lock, iflags);
239 irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
242 struct kvm_kernel_irqfd *irqfd =
243 container_of(pt, struct kvm_kernel_irqfd, pt);
244 add_wait_queue_priority(wqh, &irqfd->wait);
247 /* Must be called under irqfds.lock */
248 static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
250 struct kvm_kernel_irq_routing_entry *e;
251 struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
254 n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
256 write_seqcount_begin(&irqfd->irq_entry_sc);
260 irqfd->irq_entry = *e;
262 irqfd->irq_entry.type = 0;
264 write_seqcount_end(&irqfd->irq_entry_sc);
267 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
268 void __attribute__((weak)) kvm_arch_irq_bypass_stop(
269 struct irq_bypass_consumer *cons)
273 void __attribute__((weak)) kvm_arch_irq_bypass_start(
274 struct irq_bypass_consumer *cons)
278 int __attribute__((weak)) kvm_arch_update_irqfd_routing(
279 struct kvm *kvm, unsigned int host_irq,
280 uint32_t guest_irq, bool set)
287 kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
289 struct kvm_kernel_irqfd *irqfd, *tmp;
291 struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
296 if (!kvm_arch_intc_initialized(kvm))
299 if (!kvm_arch_irqfd_allowed(kvm, args))
302 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL_ACCOUNT);
307 irqfd->gsi = args->gsi;
308 INIT_LIST_HEAD(&irqfd->list);
309 INIT_WORK(&irqfd->inject, irqfd_inject);
310 INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
311 seqcount_spinlock_init(&irqfd->irq_entry_sc, &kvm->irqfds.lock);
319 eventfd = eventfd_ctx_fileget(f.file);
320 if (IS_ERR(eventfd)) {
321 ret = PTR_ERR(eventfd);
325 irqfd->eventfd = eventfd;
327 if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
328 struct kvm_kernel_irqfd_resampler *resampler;
330 resamplefd = eventfd_ctx_fdget(args->resamplefd);
331 if (IS_ERR(resamplefd)) {
332 ret = PTR_ERR(resamplefd);
336 irqfd->resamplefd = resamplefd;
337 INIT_LIST_HEAD(&irqfd->resampler_link);
339 mutex_lock(&kvm->irqfds.resampler_lock);
341 list_for_each_entry(resampler,
342 &kvm->irqfds.resampler_list, link) {
343 if (resampler->notifier.gsi == irqfd->gsi) {
344 irqfd->resampler = resampler;
349 if (!irqfd->resampler) {
350 resampler = kzalloc(sizeof(*resampler),
354 mutex_unlock(&kvm->irqfds.resampler_lock);
358 resampler->kvm = kvm;
359 INIT_LIST_HEAD(&resampler->list);
360 resampler->notifier.gsi = irqfd->gsi;
361 resampler->notifier.irq_acked = irqfd_resampler_ack;
362 INIT_LIST_HEAD(&resampler->link);
364 list_add(&resampler->link, &kvm->irqfds.resampler_list);
365 kvm_register_irq_ack_notifier(kvm,
366 &resampler->notifier);
367 irqfd->resampler = resampler;
370 list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
371 synchronize_srcu(&kvm->irq_srcu);
373 mutex_unlock(&kvm->irqfds.resampler_lock);
377 * Install our own custom wake-up handling so we are notified via
378 * a callback whenever someone signals the underlying eventfd
380 init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
381 init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
383 spin_lock_irq(&kvm->irqfds.lock);
386 list_for_each_entry(tmp, &kvm->irqfds.items, list) {
387 if (irqfd->eventfd != tmp->eventfd)
389 /* This fd is used for another irq already. */
391 spin_unlock_irq(&kvm->irqfds.lock);
395 idx = srcu_read_lock(&kvm->irq_srcu);
396 irqfd_update(kvm, irqfd);
398 list_add_tail(&irqfd->list, &kvm->irqfds.items);
400 spin_unlock_irq(&kvm->irqfds.lock);
403 * Check if there was an event already pending on the eventfd
404 * before we registered, and trigger it as if we didn't miss it.
406 events = vfs_poll(f.file, &irqfd->pt);
408 if (events & EPOLLIN)
409 schedule_work(&irqfd->inject);
411 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
412 if (kvm_arch_has_irq_bypass()) {
413 irqfd->consumer.token = (void *)irqfd->eventfd;
414 irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
415 irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer;
416 irqfd->consumer.stop = kvm_arch_irq_bypass_stop;
417 irqfd->consumer.start = kvm_arch_irq_bypass_start;
418 ret = irq_bypass_register_consumer(&irqfd->consumer);
420 pr_info("irq bypass consumer (token %p) registration fails: %d\n",
421 irqfd->consumer.token, ret);
425 srcu_read_unlock(&kvm->irq_srcu, idx);
428 * do not drop the file until the irqfd is fully initialized, otherwise
429 * we might race against the EPOLLHUP
435 if (irqfd->resampler)
436 irqfd_resampler_shutdown(irqfd);
438 if (resamplefd && !IS_ERR(resamplefd))
439 eventfd_ctx_put(resamplefd);
441 if (eventfd && !IS_ERR(eventfd))
442 eventfd_ctx_put(eventfd);
451 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
453 struct kvm_irq_ack_notifier *kian;
456 idx = srcu_read_lock(&kvm->irq_srcu);
457 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
459 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
461 if (kian->gsi == gsi) {
462 srcu_read_unlock(&kvm->irq_srcu, idx);
466 srcu_read_unlock(&kvm->irq_srcu, idx);
470 EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
472 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
474 struct kvm_irq_ack_notifier *kian;
476 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
478 if (kian->gsi == gsi)
479 kian->irq_acked(kian);
482 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
486 trace_kvm_ack_irq(irqchip, pin);
488 idx = srcu_read_lock(&kvm->irq_srcu);
489 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
491 kvm_notify_acked_gsi(kvm, gsi);
492 srcu_read_unlock(&kvm->irq_srcu, idx);
495 void kvm_register_irq_ack_notifier(struct kvm *kvm,
496 struct kvm_irq_ack_notifier *kian)
498 mutex_lock(&kvm->irq_lock);
499 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
500 mutex_unlock(&kvm->irq_lock);
501 kvm_arch_post_irq_ack_notifier_list_update(kvm);
504 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
505 struct kvm_irq_ack_notifier *kian)
507 mutex_lock(&kvm->irq_lock);
508 hlist_del_init_rcu(&kian->link);
509 mutex_unlock(&kvm->irq_lock);
510 synchronize_srcu(&kvm->irq_srcu);
511 kvm_arch_post_irq_ack_notifier_list_update(kvm);
516 kvm_eventfd_init(struct kvm *kvm)
518 #ifdef CONFIG_HAVE_KVM_IRQFD
519 spin_lock_init(&kvm->irqfds.lock);
520 INIT_LIST_HEAD(&kvm->irqfds.items);
521 INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
522 mutex_init(&kvm->irqfds.resampler_lock);
524 INIT_LIST_HEAD(&kvm->ioeventfds);
527 #ifdef CONFIG_HAVE_KVM_IRQFD
529 * shutdown any irqfd's that match fd+gsi
532 kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
534 struct kvm_kernel_irqfd *irqfd, *tmp;
535 struct eventfd_ctx *eventfd;
537 eventfd = eventfd_ctx_fdget(args->fd);
539 return PTR_ERR(eventfd);
541 spin_lock_irq(&kvm->irqfds.lock);
543 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
544 if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
546 * This clearing of irq_entry.type is needed for when
547 * another thread calls kvm_irq_routing_update before
548 * we flush workqueue below (we synchronize with
549 * kvm_irq_routing_update using irqfds.lock).
551 write_seqcount_begin(&irqfd->irq_entry_sc);
552 irqfd->irq_entry.type = 0;
553 write_seqcount_end(&irqfd->irq_entry_sc);
554 irqfd_deactivate(irqfd);
558 spin_unlock_irq(&kvm->irqfds.lock);
559 eventfd_ctx_put(eventfd);
562 * Block until we know all outstanding shutdown jobs have completed
563 * so that we guarantee there will not be any more interrupts on this
564 * gsi once this deassign function returns.
566 flush_workqueue(irqfd_cleanup_wq);
572 kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
574 if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
577 if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
578 return kvm_irqfd_deassign(kvm, args);
580 return kvm_irqfd_assign(kvm, args);
584 * This function is called as the kvm VM fd is being released. Shutdown all
585 * irqfds that still remain open
588 kvm_irqfd_release(struct kvm *kvm)
590 struct kvm_kernel_irqfd *irqfd, *tmp;
592 spin_lock_irq(&kvm->irqfds.lock);
594 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
595 irqfd_deactivate(irqfd);
597 spin_unlock_irq(&kvm->irqfds.lock);
600 * Block until we know all outstanding shutdown jobs have completed
601 * since we do not take a kvm* reference.
603 flush_workqueue(irqfd_cleanup_wq);
608 * Take note of a change in irq routing.
609 * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
611 void kvm_irq_routing_update(struct kvm *kvm)
613 struct kvm_kernel_irqfd *irqfd;
615 spin_lock_irq(&kvm->irqfds.lock);
617 list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
618 irqfd_update(kvm, irqfd);
620 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
621 if (irqfd->producer) {
622 int ret = kvm_arch_update_irqfd_routing(
623 irqfd->kvm, irqfd->producer->irq,
630 spin_unlock_irq(&kvm->irqfds.lock);
634 * create a host-wide workqueue for issuing deferred shutdown requests
635 * aggregated from all vm* instances. We need our own isolated
636 * queue to ease flushing work items when a VM exits.
638 int kvm_irqfd_init(void)
640 irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0);
641 if (!irqfd_cleanup_wq)
647 void kvm_irqfd_exit(void)
649 destroy_workqueue(irqfd_cleanup_wq);
654 * --------------------------------------------------------------------
655 * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
657 * userspace can register a PIO/MMIO address with an eventfd for receiving
658 * notification when the memory has been touched.
659 * --------------------------------------------------------------------
663 struct list_head list;
666 struct eventfd_ctx *eventfd;
668 struct kvm_io_device dev;
673 static inline struct _ioeventfd *
674 to_ioeventfd(struct kvm_io_device *dev)
676 return container_of(dev, struct _ioeventfd, dev);
680 ioeventfd_release(struct _ioeventfd *p)
682 eventfd_ctx_put(p->eventfd);
688 ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
693 /* address must be precise for a hit */
697 /* length = 0 means only look at the address, so always a hit */
700 if (len != p->length)
701 /* address-range must be precise for a hit */
705 /* all else equal, wildcard is always a hit */
708 /* otherwise, we have to actually compare the data */
710 BUG_ON(!IS_ALIGNED((unsigned long)val, len));
729 return _val == p->datamatch;
732 /* MMIO/PIO writes trigger an event if the addr/val match */
734 ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
735 int len, const void *val)
737 struct _ioeventfd *p = to_ioeventfd(this);
739 if (!ioeventfd_in_range(p, addr, len, val))
742 eventfd_signal(p->eventfd, 1);
747 * This function is called as KVM is completely shutting down. We do not
748 * need to worry about locking just nuke anything we have as quickly as possible
751 ioeventfd_destructor(struct kvm_io_device *this)
753 struct _ioeventfd *p = to_ioeventfd(this);
755 ioeventfd_release(p);
758 static const struct kvm_io_device_ops ioeventfd_ops = {
759 .write = ioeventfd_write,
760 .destructor = ioeventfd_destructor,
763 /* assumes kvm->slots_lock held */
765 ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
767 struct _ioeventfd *_p;
769 list_for_each_entry(_p, &kvm->ioeventfds, list)
770 if (_p->bus_idx == p->bus_idx &&
771 _p->addr == p->addr &&
772 (!_p->length || !p->length ||
773 (_p->length == p->length &&
774 (_p->wildcard || p->wildcard ||
775 _p->datamatch == p->datamatch))))
781 static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
783 if (flags & KVM_IOEVENTFD_FLAG_PIO)
785 if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
786 return KVM_VIRTIO_CCW_NOTIFY_BUS;
790 static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
791 enum kvm_bus bus_idx,
792 struct kvm_ioeventfd *args)
795 struct eventfd_ctx *eventfd;
796 struct _ioeventfd *p;
799 eventfd = eventfd_ctx_fdget(args->fd);
801 return PTR_ERR(eventfd);
803 p = kzalloc(sizeof(*p), GFP_KERNEL_ACCOUNT);
809 INIT_LIST_HEAD(&p->list);
810 p->addr = args->addr;
811 p->bus_idx = bus_idx;
812 p->length = args->len;
813 p->eventfd = eventfd;
815 /* The datamatch feature is optional, otherwise this is a wildcard */
816 if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
817 p->datamatch = args->datamatch;
821 mutex_lock(&kvm->slots_lock);
823 /* Verify that there isn't a match already */
824 if (ioeventfd_check_collision(kvm, p)) {
829 kvm_iodevice_init(&p->dev, &ioeventfd_ops);
831 ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
836 kvm_get_bus(kvm, bus_idx)->ioeventfd_count++;
837 list_add_tail(&p->list, &kvm->ioeventfds);
839 mutex_unlock(&kvm->slots_lock);
844 mutex_unlock(&kvm->slots_lock);
848 eventfd_ctx_put(eventfd);
854 kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
855 struct kvm_ioeventfd *args)
857 struct _ioeventfd *p, *tmp;
858 struct eventfd_ctx *eventfd;
859 struct kvm_io_bus *bus;
863 eventfd = eventfd_ctx_fdget(args->fd);
865 return PTR_ERR(eventfd);
867 wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
869 mutex_lock(&kvm->slots_lock);
871 list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
873 if (p->bus_idx != bus_idx ||
874 p->eventfd != eventfd ||
875 p->addr != args->addr ||
876 p->length != args->len ||
877 p->wildcard != wildcard)
880 if (!p->wildcard && p->datamatch != args->datamatch)
883 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
884 bus = kvm_get_bus(kvm, bus_idx);
886 bus->ioeventfd_count--;
887 ioeventfd_release(p);
892 mutex_unlock(&kvm->slots_lock);
894 eventfd_ctx_put(eventfd);
899 static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
901 enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
902 int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
904 if (!args->len && bus_idx == KVM_MMIO_BUS)
905 kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
911 kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
913 enum kvm_bus bus_idx;
916 bus_idx = ioeventfd_bus_from_flags(args->flags);
917 /* must be natural-word sized, or 0 to ignore length */
929 /* check for range overflow */
930 if (args->addr + args->len < args->addr)
933 /* check for extra flags that we don't understand */
934 if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
937 /* ioeventfd with no length can't be combined with DATAMATCH */
938 if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH))
941 ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
945 /* When length is ignored, MMIO is also put on a separate bus, for
948 if (!args->len && bus_idx == KVM_MMIO_BUS) {
949 ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
957 kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
963 kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
965 if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
966 return kvm_deassign_ioeventfd(kvm, args);
968 return kvm_assign_ioeventfd(kvm, args);