1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
5 * Interface to privileged domain-0 commands.
7 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
10 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
12 #include <linux/eventfd.h>
13 #include <linux/file.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/poll.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/srcu.h>
21 #include <linux/string.h>
22 #include <linux/workqueue.h>
23 #include <linux/errno.h>
25 #include <linux/mman.h>
26 #include <linux/uaccess.h>
27 #include <linux/swap.h>
28 #include <linux/highmem.h>
29 #include <linux/pagemap.h>
30 #include <linux/seq_file.h>
31 #include <linux/miscdevice.h>
32 #include <linux/moduleparam.h>
33 #include <linux/virtio_mmio.h>
35 #include <asm/xen/hypervisor.h>
36 #include <asm/xen/hypercall.h>
39 #include <xen/events.h>
40 #include <xen/privcmd.h>
41 #include <xen/interface/xen.h>
42 #include <xen/interface/memory.h>
43 #include <xen/interface/hvm/dm_op.h>
44 #include <xen/interface/hvm/ioreq.h>
45 #include <xen/features.h>
47 #include <xen/xen-ops.h>
48 #include <xen/balloon.h>
52 MODULE_DESCRIPTION("Xen hypercall passthrough driver");
53 MODULE_LICENSE("GPL");
55 #define PRIV_VMA_LOCKED ((void *)1)
57 static unsigned int privcmd_dm_op_max_num = 16;
58 module_param_named(dm_op_max_nr_bufs, privcmd_dm_op_max_num, uint, 0644);
59 MODULE_PARM_DESC(dm_op_max_nr_bufs,
60 "Maximum number of buffers per dm_op hypercall");
62 static unsigned int privcmd_dm_op_buf_max_size = 4096;
63 module_param_named(dm_op_buf_max_size, privcmd_dm_op_buf_max_size, uint,
65 MODULE_PARM_DESC(dm_op_buf_max_size,
66 "Maximum size of a dm_op hypercall buffer");
72 static int privcmd_vma_range_is_mapped(
73 struct vm_area_struct *vma,
75 unsigned long nr_pages);
77 static long privcmd_ioctl_hypercall(struct file *file, void __user *udata)
79 struct privcmd_data *data = file->private_data;
80 struct privcmd_hypercall hypercall;
83 /* Disallow arbitrary hypercalls if restricted */
84 if (data->domid != DOMID_INVALID)
87 if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
90 xen_preemptible_hcall_begin();
91 ret = privcmd_call(hypercall.op,
92 hypercall.arg[0], hypercall.arg[1],
93 hypercall.arg[2], hypercall.arg[3],
95 xen_preemptible_hcall_end();
100 static void free_page_list(struct list_head *pages)
104 list_for_each_entry_safe(p, n, pages, lru)
107 INIT_LIST_HEAD(pages);
111 * Given an array of items in userspace, return a list of pages
112 * containing the data. If copying fails, either because of memory
113 * allocation failure or a problem reading user memory, return an
114 * error code; its up to the caller to dispose of any partial list.
116 static int gather_array(struct list_head *pagelist,
117 unsigned nelem, size_t size,
118 const void __user *data)
124 if (size > PAGE_SIZE)
128 pagedata = NULL; /* quiet, gcc */
130 if (pageidx > PAGE_SIZE-size) {
131 struct page *page = alloc_page(GFP_KERNEL);
137 pagedata = page_address(page);
139 list_add_tail(&page->lru, pagelist);
144 if (copy_from_user(pagedata + pageidx, data, size))
158 * Call function "fn" on each element of the array fragmented
159 * over a list of pages.
161 static int traverse_pages(unsigned nelem, size_t size,
162 struct list_head *pos,
163 int (*fn)(void *data, void *state),
170 BUG_ON(size > PAGE_SIZE);
173 pagedata = NULL; /* hush, gcc */
176 if (pageidx > PAGE_SIZE-size) {
179 page = list_entry(pos, struct page, lru);
180 pagedata = page_address(page);
184 ret = (*fn)(pagedata + pageidx, state);
194 * Similar to traverse_pages, but use each page as a "block" of
195 * data to be processed as one unit.
197 static int traverse_pages_block(unsigned nelem, size_t size,
198 struct list_head *pos,
199 int (*fn)(void *data, int nr, void *state),
205 BUG_ON(size > PAGE_SIZE);
208 int nr = (PAGE_SIZE/size);
213 page = list_entry(pos, struct page, lru);
214 pagedata = page_address(page);
215 ret = (*fn)(pagedata, nr, state);
224 struct mmap_gfn_state {
226 struct vm_area_struct *vma;
230 static int mmap_gfn_range(void *data, void *state)
232 struct privcmd_mmap_entry *msg = data;
233 struct mmap_gfn_state *st = state;
234 struct vm_area_struct *vma = st->vma;
237 /* Do not allow range to wrap the address space. */
238 if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
239 ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
242 /* Range chunks must be contiguous in va space. */
243 if ((msg->va != st->va) ||
244 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
247 rc = xen_remap_domain_gfn_range(vma,
249 msg->mfn, msg->npages,
255 st->va += msg->npages << PAGE_SHIFT;
260 static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
262 struct privcmd_data *data = file->private_data;
263 struct privcmd_mmap mmapcmd;
264 struct mm_struct *mm = current->mm;
265 struct vm_area_struct *vma;
268 struct mmap_gfn_state state;
270 /* We only support privcmd_ioctl_mmap_batch for non-auto-translated. */
271 if (xen_feature(XENFEAT_auto_translated_physmap))
274 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
277 /* If restriction is in place, check the domid matches */
278 if (data->domid != DOMID_INVALID && data->domid != mmapcmd.dom)
281 rc = gather_array(&pagelist,
282 mmapcmd.num, sizeof(struct privcmd_mmap_entry),
285 if (rc || list_empty(&pagelist))
291 struct page *page = list_first_entry(&pagelist,
293 struct privcmd_mmap_entry *msg = page_address(page);
295 vma = vma_lookup(mm, msg->va);
298 if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
300 vma->vm_private_data = PRIV_VMA_LOCKED;
303 state.va = vma->vm_start;
305 state.domain = mmapcmd.dom;
307 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
309 mmap_gfn_range, &state);
313 mmap_write_unlock(mm);
316 free_page_list(&pagelist);
321 struct mmap_batch_state {
324 struct vm_area_struct *vma;
328 * 1 if at least one error has happened (and no
329 * -ENOENT errors have happened)
330 * -ENOENT if at least 1 -ENOENT has happened.
335 /* User-space gfn array to store errors in the second pass for V1. */
336 xen_pfn_t __user *user_gfn;
337 /* User-space int array to store errors in the second pass for V2. */
338 int __user *user_err;
341 /* auto translated dom0 note: if domU being created is PV, then gfn is
342 * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
344 static int mmap_batch_fn(void *data, int nr, void *state)
346 xen_pfn_t *gfnp = data;
347 struct mmap_batch_state *st = state;
348 struct vm_area_struct *vma = st->vma;
349 struct page **pages = vma->vm_private_data;
350 struct page **cur_pages = NULL;
353 if (xen_feature(XENFEAT_auto_translated_physmap))
354 cur_pages = &pages[st->index];
357 ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
358 (int *)gfnp, st->vma->vm_page_prot,
359 st->domain, cur_pages);
361 /* Adjust the global_error? */
364 st->global_error = -ENOENT;
366 /* Record that at least one error has happened. */
367 if (st->global_error == 0)
368 st->global_error = 1;
371 st->va += XEN_PAGE_SIZE * nr;
372 st->index += nr / XEN_PFN_PER_PAGE;
377 static int mmap_return_error(int err, struct mmap_batch_state *st)
381 if (st->version == 1) {
385 ret = get_user(gfn, st->user_gfn);
389 * V1 encodes the error codes in the 32bit top
390 * nibble of the gfn (with its known
391 * limitations vis-a-vis 64 bit callers).
393 gfn |= (err == -ENOENT) ?
394 PRIVCMD_MMAPBATCH_PAGED_ERROR :
395 PRIVCMD_MMAPBATCH_MFN_ERROR;
396 return __put_user(gfn, st->user_gfn++);
399 } else { /* st->version == 2 */
401 return __put_user(err, st->user_err++);
409 static int mmap_return_errors(void *data, int nr, void *state)
411 struct mmap_batch_state *st = state;
416 for (i = 0; i < nr; i++) {
417 ret = mmap_return_error(errs[i], st);
424 /* Allocate pfns that are then mapped with gfns from foreign domid. Update
425 * the vma with the page info to use later.
426 * Returns: 0 if success, otherwise -errno
428 static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
433 pages = kvcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
437 rc = xen_alloc_unpopulated_pages(numpgs, pages);
439 pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
444 BUG_ON(vma->vm_private_data != NULL);
445 vma->vm_private_data = pages;
450 static const struct vm_operations_struct privcmd_vm_ops;
452 static long privcmd_ioctl_mmap_batch(
453 struct file *file, void __user *udata, int version)
455 struct privcmd_data *data = file->private_data;
457 struct privcmd_mmapbatch_v2 m;
458 struct mm_struct *mm = current->mm;
459 struct vm_area_struct *vma;
460 unsigned long nr_pages;
462 struct mmap_batch_state state;
466 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
468 /* Returns per-frame error in m.arr. */
470 if (!access_ok(m.arr, m.num * sizeof(*m.arr)))
474 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
476 /* Returns per-frame error code in m.err. */
477 if (!access_ok(m.err, m.num * (sizeof(*m.err))))
484 /* If restriction is in place, check the domid matches */
485 if (data->domid != DOMID_INVALID && data->domid != m.dom)
488 nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
489 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
492 ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
496 if (list_empty(&pagelist)) {
502 /* Zero error array now to only copy back actual errors. */
503 if (clear_user(m.err, sizeof(int) * m.num)) {
511 vma = find_vma(mm, m.addr);
513 vma->vm_ops != &privcmd_vm_ops) {
519 * Caller must either:
521 * Map the whole VMA range, which will also allocate all the
522 * pages required for the auto_translated_physmap case.
526 * Map unmapped holes left from a previous map attempt (e.g.,
527 * because those foreign frames were previously paged out).
529 if (vma->vm_private_data == NULL) {
530 if (m.addr != vma->vm_start ||
531 m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
535 if (xen_feature(XENFEAT_auto_translated_physmap)) {
536 ret = alloc_empty_pages(vma, nr_pages);
540 vma->vm_private_data = PRIV_VMA_LOCKED;
542 if (m.addr < vma->vm_start ||
543 m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
547 if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
553 state.domain = m.dom;
557 state.global_error = 0;
558 state.version = version;
560 BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
561 /* mmap_batch_fn guarantees ret == 0 */
562 BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
563 &pagelist, mmap_batch_fn, &state));
565 mmap_write_unlock(mm);
567 if (state.global_error) {
568 /* Write back errors in second pass. */
569 state.user_gfn = (xen_pfn_t *)m.arr;
570 state.user_err = m.err;
571 ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
572 &pagelist, mmap_return_errors, &state);
576 /* If we have not had any EFAULT-like global errors then set the global
577 * error to -ENOENT if necessary. */
578 if ((ret == 0) && (state.global_error == -ENOENT))
582 free_page_list(&pagelist);
586 mmap_write_unlock(mm);
590 static int lock_pages(
591 struct privcmd_dm_op_buf kbufs[], unsigned int num,
592 struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
594 unsigned int i, off = 0;
596 for (i = 0; i < num; ) {
597 unsigned int requested;
600 requested = DIV_ROUND_UP(
601 offset_in_page(kbufs[i].uptr) + kbufs[i].size,
603 if (requested > nr_pages)
606 page_count = pin_user_pages_fast(
607 (unsigned long)kbufs[i].uptr + off * PAGE_SIZE,
608 requested, FOLL_WRITE, pages);
610 return page_count ? : -EFAULT;
612 *pinned += page_count;
613 nr_pages -= page_count;
616 off = (requested == page_count) ? 0 : off + page_count;
623 static void unlock_pages(struct page *pages[], unsigned int nr_pages)
625 unpin_user_pages_dirty_lock(pages, nr_pages, true);
628 static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
630 struct privcmd_data *data = file->private_data;
631 struct privcmd_dm_op kdata;
632 struct privcmd_dm_op_buf *kbufs;
633 unsigned int nr_pages = 0;
634 struct page **pages = NULL;
635 struct xen_dm_op_buf *xbufs = NULL;
638 unsigned int pinned = 0;
640 if (copy_from_user(&kdata, udata, sizeof(kdata)))
643 /* If restriction is in place, check the domid matches */
644 if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
650 if (kdata.num > privcmd_dm_op_max_num)
653 kbufs = kcalloc(kdata.num, sizeof(*kbufs), GFP_KERNEL);
657 if (copy_from_user(kbufs, kdata.ubufs,
658 sizeof(*kbufs) * kdata.num)) {
663 for (i = 0; i < kdata.num; i++) {
664 if (kbufs[i].size > privcmd_dm_op_buf_max_size) {
669 if (!access_ok(kbufs[i].uptr,
675 nr_pages += DIV_ROUND_UP(
676 offset_in_page(kbufs[i].uptr) + kbufs[i].size,
680 pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
686 xbufs = kcalloc(kdata.num, sizeof(*xbufs), GFP_KERNEL);
692 rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
696 for (i = 0; i < kdata.num; i++) {
697 set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
698 xbufs[i].size = kbufs[i].size;
701 xen_preemptible_hcall_begin();
702 rc = HYPERVISOR_dm_op(kdata.dom, kdata.num, xbufs);
703 xen_preemptible_hcall_end();
706 unlock_pages(pages, pinned);
714 static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
716 struct privcmd_data *data = file->private_data;
719 if (copy_from_user(&dom, udata, sizeof(dom)))
722 /* Set restriction to the specified domain, or check it matches */
723 if (data->domid == DOMID_INVALID)
725 else if (data->domid != dom)
731 static long privcmd_ioctl_mmap_resource(struct file *file,
732 struct privcmd_mmap_resource __user *udata)
734 struct privcmd_data *data = file->private_data;
735 struct mm_struct *mm = current->mm;
736 struct vm_area_struct *vma;
737 struct privcmd_mmap_resource kdata;
738 xen_pfn_t *pfns = NULL;
739 struct xen_mem_acquire_resource xdata = { };
742 if (copy_from_user(&kdata, udata, sizeof(kdata)))
745 /* If restriction is in place, check the domid matches */
746 if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
749 /* Both fields must be set or unset */
750 if (!!kdata.addr != !!kdata.num)
753 xdata.domid = kdata.dom;
754 xdata.type = kdata.type;
757 if (!kdata.addr && !kdata.num) {
758 /* Query the size of the resource. */
759 rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
762 return __put_user(xdata.nr_frames, &udata->num);
767 vma = find_vma(mm, kdata.addr);
768 if (!vma || vma->vm_ops != &privcmd_vm_ops) {
773 pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN);
779 if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
780 xen_feature(XENFEAT_auto_translated_physmap)) {
781 unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
785 rc = alloc_empty_pages(vma, nr);
789 pages = vma->vm_private_data;
791 for (i = 0; i < kdata.num; i++) {
793 page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
795 pfns[i] = pfn + (i % XEN_PFN_PER_PAGE);
798 vma->vm_private_data = PRIV_VMA_LOCKED;
800 xdata.frame = kdata.idx;
801 xdata.nr_frames = kdata.num;
802 set_xen_guest_handle(xdata.frame_list, pfns);
804 xen_preemptible_hcall_begin();
805 rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
806 xen_preemptible_hcall_end();
811 if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
812 xen_feature(XENFEAT_auto_translated_physmap)) {
813 rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
816 (xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
817 DOMID_SELF : kdata.dom;
818 int num, *errs = (int *)pfns;
820 BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns));
821 num = xen_remap_domain_mfn_array(vma,
822 kdata.addr & PAGE_MASK,
823 pfns, kdata.num, errs,
828 else if (num != kdata.num) {
831 for (i = 0; i < num; i++) {
841 mmap_write_unlock(mm);
847 #ifdef CONFIG_XEN_PRIVCMD_EVENTFD
849 static struct workqueue_struct *irqfd_cleanup_wq;
850 static DEFINE_SPINLOCK(irqfds_lock);
851 DEFINE_STATIC_SRCU(irqfds_srcu);
852 static LIST_HEAD(irqfds_list);
854 struct privcmd_kernel_irqfd {
855 struct xen_dm_op_buf xbufs;
858 struct eventfd_ctx *eventfd;
859 struct work_struct shutdown;
860 wait_queue_entry_t wait;
861 struct list_head list;
865 static void irqfd_deactivate(struct privcmd_kernel_irqfd *kirqfd)
867 lockdep_assert_held(&irqfds_lock);
869 list_del_init(&kirqfd->list);
870 queue_work(irqfd_cleanup_wq, &kirqfd->shutdown);
873 static void irqfd_shutdown(struct work_struct *work)
875 struct privcmd_kernel_irqfd *kirqfd =
876 container_of(work, struct privcmd_kernel_irqfd, shutdown);
879 /* Make sure irqfd has been initialized in assign path */
880 synchronize_srcu(&irqfds_srcu);
882 eventfd_ctx_remove_wait_queue(kirqfd->eventfd, &kirqfd->wait, &cnt);
883 eventfd_ctx_put(kirqfd->eventfd);
887 static void irqfd_inject(struct privcmd_kernel_irqfd *kirqfd)
892 eventfd_ctx_do_read(kirqfd->eventfd, &cnt);
894 xen_preemptible_hcall_begin();
895 rc = HYPERVISOR_dm_op(kirqfd->dom, 1, &kirqfd->xbufs);
896 xen_preemptible_hcall_end();
898 /* Don't repeat the error message for consecutive failures */
899 if (rc && !kirqfd->error) {
900 pr_err("Failed to configure irq for guest domain: %d\n",
908 irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
910 struct privcmd_kernel_irqfd *kirqfd =
911 container_of(wait, struct privcmd_kernel_irqfd, wait);
912 __poll_t flags = key_to_poll(key);
915 irqfd_inject(kirqfd);
917 if (flags & EPOLLHUP) {
920 spin_lock_irqsave(&irqfds_lock, flags);
921 irqfd_deactivate(kirqfd);
922 spin_unlock_irqrestore(&irqfds_lock, flags);
929 irqfd_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt)
931 struct privcmd_kernel_irqfd *kirqfd =
932 container_of(pt, struct privcmd_kernel_irqfd, pt);
934 add_wait_queue_priority(wqh, &kirqfd->wait);
937 static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
939 struct privcmd_kernel_irqfd *kirqfd, *tmp;
946 kirqfd = kzalloc(sizeof(*kirqfd) + irqfd->size, GFP_KERNEL);
951 if (copy_from_user(dm_op, u64_to_user_ptr(irqfd->dm_op), irqfd->size)) {
956 kirqfd->xbufs.size = irqfd->size;
957 set_xen_guest_handle(kirqfd->xbufs.h, dm_op);
958 kirqfd->dom = irqfd->dom;
959 INIT_WORK(&kirqfd->shutdown, irqfd_shutdown);
961 f = fdget(irqfd->fd);
967 kirqfd->eventfd = eventfd_ctx_fileget(f.file);
968 if (IS_ERR(kirqfd->eventfd)) {
969 ret = PTR_ERR(kirqfd->eventfd);
974 * Install our own custom wake-up handling so we are notified via a
975 * callback whenever someone signals the underlying eventfd.
977 init_waitqueue_func_entry(&kirqfd->wait, irqfd_wakeup);
978 init_poll_funcptr(&kirqfd->pt, irqfd_poll_func);
980 spin_lock_irqsave(&irqfds_lock, flags);
982 list_for_each_entry(tmp, &irqfds_list, list) {
983 if (kirqfd->eventfd == tmp->eventfd) {
985 spin_unlock_irqrestore(&irqfds_lock, flags);
990 idx = srcu_read_lock(&irqfds_srcu);
991 list_add_tail(&kirqfd->list, &irqfds_list);
992 spin_unlock_irqrestore(&irqfds_lock, flags);
995 * Check if there was an event already pending on the eventfd before we
996 * registered, and trigger it as if we didn't miss it.
998 events = vfs_poll(f.file, &kirqfd->pt);
999 if (events & EPOLLIN)
1000 irqfd_inject(kirqfd);
1002 srcu_read_unlock(&irqfds_srcu, idx);
1005 * Do not drop the file until the kirqfd is fully initialized, otherwise
1006 * we might race against the EPOLLHUP.
1012 eventfd_ctx_put(kirqfd->eventfd);
1022 static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd)
1024 struct privcmd_kernel_irqfd *kirqfd;
1025 struct eventfd_ctx *eventfd;
1026 unsigned long flags;
1028 eventfd = eventfd_ctx_fdget(irqfd->fd);
1029 if (IS_ERR(eventfd))
1030 return PTR_ERR(eventfd);
1032 spin_lock_irqsave(&irqfds_lock, flags);
1034 list_for_each_entry(kirqfd, &irqfds_list, list) {
1035 if (kirqfd->eventfd == eventfd) {
1036 irqfd_deactivate(kirqfd);
1041 spin_unlock_irqrestore(&irqfds_lock, flags);
1043 eventfd_ctx_put(eventfd);
1046 * Block until we know all outstanding shutdown jobs have completed so
1047 * that we guarantee there will not be any more interrupts once this
1048 * deassign function returns.
1050 flush_workqueue(irqfd_cleanup_wq);
1055 static long privcmd_ioctl_irqfd(struct file *file, void __user *udata)
1057 struct privcmd_data *data = file->private_data;
1058 struct privcmd_irqfd irqfd;
1060 if (copy_from_user(&irqfd, udata, sizeof(irqfd)))
1063 /* No other flags should be set */
1064 if (irqfd.flags & ~PRIVCMD_IRQFD_FLAG_DEASSIGN)
1067 /* If restriction is in place, check the domid matches */
1068 if (data->domid != DOMID_INVALID && data->domid != irqfd.dom)
1071 if (irqfd.flags & PRIVCMD_IRQFD_FLAG_DEASSIGN)
1072 return privcmd_irqfd_deassign(&irqfd);
1074 return privcmd_irqfd_assign(&irqfd);
1077 static int privcmd_irqfd_init(void)
1079 irqfd_cleanup_wq = alloc_workqueue("privcmd-irqfd-cleanup", 0, 0);
1080 if (!irqfd_cleanup_wq)
1086 static void privcmd_irqfd_exit(void)
1088 struct privcmd_kernel_irqfd *kirqfd, *tmp;
1089 unsigned long flags;
1091 spin_lock_irqsave(&irqfds_lock, flags);
1093 list_for_each_entry_safe(kirqfd, tmp, &irqfds_list, list)
1094 irqfd_deactivate(kirqfd);
1096 spin_unlock_irqrestore(&irqfds_lock, flags);
1098 destroy_workqueue(irqfd_cleanup_wq);
1101 /* Ioeventfd Support */
1102 #define QUEUE_NOTIFY_VQ_MASK 0xFFFF
1104 static DEFINE_MUTEX(ioreq_lock);
1105 static LIST_HEAD(ioreq_list);
1107 /* per-eventfd structure */
1108 struct privcmd_kernel_ioeventfd {
1109 struct eventfd_ctx *eventfd;
1110 struct list_head list;
1112 unsigned int addr_len;
1116 /* per-guest CPU / port structure */
1120 struct privcmd_kernel_ioreq *kioreq;
1123 /* per-guest structure */
1124 struct privcmd_kernel_ioreq {
1128 struct ioreq *ioreq;
1129 spinlock_t lock; /* Protects ioeventfds list */
1130 struct list_head ioeventfds;
1131 struct list_head list;
1132 struct ioreq_port ports[] __counted_by(vcpus);
1135 static irqreturn_t ioeventfd_interrupt(int irq, void *dev_id)
1137 struct ioreq_port *port = dev_id;
1138 struct privcmd_kernel_ioreq *kioreq = port->kioreq;
1139 struct ioreq *ioreq = &kioreq->ioreq[port->vcpu];
1140 struct privcmd_kernel_ioeventfd *kioeventfd;
1141 unsigned int state = STATE_IOREQ_READY;
1143 if (ioreq->state != STATE_IOREQ_READY ||
1144 ioreq->type != IOREQ_TYPE_COPY || ioreq->dir != IOREQ_WRITE)
1148 * We need a barrier, smp_mb(), here to ensure reads are finished before
1149 * `state` is updated. Since the lock implementation ensures that
1150 * appropriate barrier will be added anyway, we can avoid adding
1151 * explicit barrier here.
1153 * Ideally we don't need to update `state` within the locks, but we do
1154 * that here to avoid adding explicit barrier.
1157 spin_lock(&kioreq->lock);
1158 ioreq->state = STATE_IOREQ_INPROCESS;
1160 list_for_each_entry(kioeventfd, &kioreq->ioeventfds, list) {
1161 if (ioreq->addr == kioeventfd->addr + VIRTIO_MMIO_QUEUE_NOTIFY &&
1162 ioreq->size == kioeventfd->addr_len &&
1163 (ioreq->data & QUEUE_NOTIFY_VQ_MASK) == kioeventfd->vq) {
1164 eventfd_signal(kioeventfd->eventfd);
1165 state = STATE_IORESP_READY;
1169 spin_unlock(&kioreq->lock);
1172 * We need a barrier, smp_mb(), here to ensure writes are finished
1173 * before `state` is updated. Since the lock implementation ensures that
1174 * appropriate barrier will be added anyway, we can avoid adding
1175 * explicit barrier here.
1178 ioreq->state = state;
1180 if (state == STATE_IORESP_READY) {
1181 notify_remote_via_evtchn(port->port);
1188 static void ioreq_free(struct privcmd_kernel_ioreq *kioreq)
1190 struct ioreq_port *ports = kioreq->ports;
1193 lockdep_assert_held(&ioreq_lock);
1195 list_del(&kioreq->list);
1197 for (i = kioreq->vcpus - 1; i >= 0; i--)
1198 unbind_from_irqhandler(irq_from_evtchn(ports[i].port), &ports[i]);
1204 struct privcmd_kernel_ioreq *alloc_ioreq(struct privcmd_ioeventfd *ioeventfd)
1206 struct privcmd_kernel_ioreq *kioreq;
1207 struct mm_struct *mm = current->mm;
1208 struct vm_area_struct *vma;
1209 struct page **pages;
1210 unsigned int *ports;
1213 lockdep_assert_held(&ioreq_lock);
1215 size = struct_size(kioreq, ports, ioeventfd->vcpus);
1216 kioreq = kzalloc(size, GFP_KERNEL);
1218 return ERR_PTR(-ENOMEM);
1220 kioreq->dom = ioeventfd->dom;
1221 kioreq->vcpus = ioeventfd->vcpus;
1222 kioreq->uioreq = ioeventfd->ioreq;
1223 spin_lock_init(&kioreq->lock);
1224 INIT_LIST_HEAD(&kioreq->ioeventfds);
1226 /* The memory for ioreq server must have been mapped earlier */
1227 mmap_write_lock(mm);
1228 vma = find_vma(mm, (unsigned long)ioeventfd->ioreq);
1230 pr_err("Failed to find vma for ioreq page!\n");
1231 mmap_write_unlock(mm);
1236 pages = vma->vm_private_data;
1237 kioreq->ioreq = (struct ioreq *)(page_to_virt(pages[0]));
1238 mmap_write_unlock(mm);
1240 ports = memdup_array_user(u64_to_user_ptr(ioeventfd->ports),
1241 kioreq->vcpus, sizeof(*ports));
1242 if (IS_ERR(ports)) {
1243 ret = PTR_ERR(ports);
1247 for (i = 0; i < kioreq->vcpus; i++) {
1248 kioreq->ports[i].vcpu = i;
1249 kioreq->ports[i].port = ports[i];
1250 kioreq->ports[i].kioreq = kioreq;
1252 ret = bind_evtchn_to_irqhandler_lateeoi(ports[i],
1253 ioeventfd_interrupt, IRQF_SHARED, "ioeventfd",
1261 list_add_tail(&kioreq->list, &ioreq_list);
1267 unbind_from_irqhandler(irq_from_evtchn(ports[i]), &kioreq->ports[i]);
1272 return ERR_PTR(ret);
1275 static struct privcmd_kernel_ioreq *
1276 get_ioreq(struct privcmd_ioeventfd *ioeventfd, struct eventfd_ctx *eventfd)
1278 struct privcmd_kernel_ioreq *kioreq;
1279 unsigned long flags;
1281 list_for_each_entry(kioreq, &ioreq_list, list) {
1282 struct privcmd_kernel_ioeventfd *kioeventfd;
1285 * kioreq fields can be accessed here without a lock as they are
1286 * never updated after being added to the ioreq_list.
1288 if (kioreq->uioreq != ioeventfd->ioreq) {
1290 } else if (kioreq->dom != ioeventfd->dom ||
1291 kioreq->vcpus != ioeventfd->vcpus) {
1292 pr_err("Invalid ioeventfd configuration mismatch, dom (%u vs %u), vcpus (%u vs %u)\n",
1293 kioreq->dom, ioeventfd->dom, kioreq->vcpus,
1295 return ERR_PTR(-EINVAL);
1298 /* Look for a duplicate eventfd for the same guest */
1299 spin_lock_irqsave(&kioreq->lock, flags);
1300 list_for_each_entry(kioeventfd, &kioreq->ioeventfds, list) {
1301 if (eventfd == kioeventfd->eventfd) {
1302 spin_unlock_irqrestore(&kioreq->lock, flags);
1303 return ERR_PTR(-EBUSY);
1306 spin_unlock_irqrestore(&kioreq->lock, flags);
1311 /* Matching kioreq isn't found, allocate a new one */
1312 return alloc_ioreq(ioeventfd);
1315 static void ioeventfd_free(struct privcmd_kernel_ioeventfd *kioeventfd)
1317 list_del(&kioeventfd->list);
1318 eventfd_ctx_put(kioeventfd->eventfd);
1322 static int privcmd_ioeventfd_assign(struct privcmd_ioeventfd *ioeventfd)
1324 struct privcmd_kernel_ioeventfd *kioeventfd;
1325 struct privcmd_kernel_ioreq *kioreq;
1326 unsigned long flags;
1330 /* Check for range overflow */
1331 if (ioeventfd->addr + ioeventfd->addr_len < ioeventfd->addr)
1334 /* Vhost requires us to support length 1, 2, 4, and 8 */
1335 if (!(ioeventfd->addr_len == 1 || ioeventfd->addr_len == 2 ||
1336 ioeventfd->addr_len == 4 || ioeventfd->addr_len == 8))
1339 /* 4096 vcpus limit enough ? */
1340 if (!ioeventfd->vcpus || ioeventfd->vcpus > 4096)
1343 kioeventfd = kzalloc(sizeof(*kioeventfd), GFP_KERNEL);
1347 f = fdget(ioeventfd->event_fd);
1353 kioeventfd->eventfd = eventfd_ctx_fileget(f.file);
1356 if (IS_ERR(kioeventfd->eventfd)) {
1357 ret = PTR_ERR(kioeventfd->eventfd);
1361 kioeventfd->addr = ioeventfd->addr;
1362 kioeventfd->addr_len = ioeventfd->addr_len;
1363 kioeventfd->vq = ioeventfd->vq;
1365 mutex_lock(&ioreq_lock);
1366 kioreq = get_ioreq(ioeventfd, kioeventfd->eventfd);
1367 if (IS_ERR(kioreq)) {
1368 mutex_unlock(&ioreq_lock);
1369 ret = PTR_ERR(kioreq);
1373 spin_lock_irqsave(&kioreq->lock, flags);
1374 list_add_tail(&kioeventfd->list, &kioreq->ioeventfds);
1375 spin_unlock_irqrestore(&kioreq->lock, flags);
1377 mutex_unlock(&ioreq_lock);
1382 eventfd_ctx_put(kioeventfd->eventfd);
1389 static int privcmd_ioeventfd_deassign(struct privcmd_ioeventfd *ioeventfd)
1391 struct privcmd_kernel_ioreq *kioreq, *tkioreq;
1392 struct eventfd_ctx *eventfd;
1393 unsigned long flags;
1396 eventfd = eventfd_ctx_fdget(ioeventfd->event_fd);
1397 if (IS_ERR(eventfd))
1398 return PTR_ERR(eventfd);
1400 mutex_lock(&ioreq_lock);
1401 list_for_each_entry_safe(kioreq, tkioreq, &ioreq_list, list) {
1402 struct privcmd_kernel_ioeventfd *kioeventfd, *tmp;
1404 * kioreq fields can be accessed here without a lock as they are
1405 * never updated after being added to the ioreq_list.
1407 if (kioreq->dom != ioeventfd->dom ||
1408 kioreq->uioreq != ioeventfd->ioreq ||
1409 kioreq->vcpus != ioeventfd->vcpus)
1412 spin_lock_irqsave(&kioreq->lock, flags);
1413 list_for_each_entry_safe(kioeventfd, tmp, &kioreq->ioeventfds, list) {
1414 if (eventfd == kioeventfd->eventfd) {
1415 ioeventfd_free(kioeventfd);
1416 spin_unlock_irqrestore(&kioreq->lock, flags);
1418 if (list_empty(&kioreq->ioeventfds))
1423 spin_unlock_irqrestore(&kioreq->lock, flags);
1427 pr_err("Ioeventfd isn't already assigned, dom: %u, addr: %llu\n",
1428 ioeventfd->dom, ioeventfd->addr);
1432 mutex_unlock(&ioreq_lock);
1433 eventfd_ctx_put(eventfd);
1438 static long privcmd_ioctl_ioeventfd(struct file *file, void __user *udata)
1440 struct privcmd_data *data = file->private_data;
1441 struct privcmd_ioeventfd ioeventfd;
1443 if (copy_from_user(&ioeventfd, udata, sizeof(ioeventfd)))
1446 /* No other flags should be set */
1447 if (ioeventfd.flags & ~PRIVCMD_IOEVENTFD_FLAG_DEASSIGN)
1450 /* If restriction is in place, check the domid matches */
1451 if (data->domid != DOMID_INVALID && data->domid != ioeventfd.dom)
1454 if (ioeventfd.flags & PRIVCMD_IOEVENTFD_FLAG_DEASSIGN)
1455 return privcmd_ioeventfd_deassign(&ioeventfd);
1457 return privcmd_ioeventfd_assign(&ioeventfd);
1460 static void privcmd_ioeventfd_exit(void)
1462 struct privcmd_kernel_ioreq *kioreq, *tmp;
1463 unsigned long flags;
1465 mutex_lock(&ioreq_lock);
1466 list_for_each_entry_safe(kioreq, tmp, &ioreq_list, list) {
1467 struct privcmd_kernel_ioeventfd *kioeventfd, *tmp;
1469 spin_lock_irqsave(&kioreq->lock, flags);
1470 list_for_each_entry_safe(kioeventfd, tmp, &kioreq->ioeventfds, list)
1471 ioeventfd_free(kioeventfd);
1472 spin_unlock_irqrestore(&kioreq->lock, flags);
1476 mutex_unlock(&ioreq_lock);
1479 static inline long privcmd_ioctl_irqfd(struct file *file, void __user *udata)
1484 static inline int privcmd_irqfd_init(void)
1489 static inline void privcmd_irqfd_exit(void)
1493 static inline long privcmd_ioctl_ioeventfd(struct file *file, void __user *udata)
1498 static inline void privcmd_ioeventfd_exit(void)
1501 #endif /* CONFIG_XEN_PRIVCMD_EVENTFD */
1503 static long privcmd_ioctl(struct file *file,
1504 unsigned int cmd, unsigned long data)
1507 void __user *udata = (void __user *) data;
1510 case IOCTL_PRIVCMD_HYPERCALL:
1511 ret = privcmd_ioctl_hypercall(file, udata);
1514 case IOCTL_PRIVCMD_MMAP:
1515 ret = privcmd_ioctl_mmap(file, udata);
1518 case IOCTL_PRIVCMD_MMAPBATCH:
1519 ret = privcmd_ioctl_mmap_batch(file, udata, 1);
1522 case IOCTL_PRIVCMD_MMAPBATCH_V2:
1523 ret = privcmd_ioctl_mmap_batch(file, udata, 2);
1526 case IOCTL_PRIVCMD_DM_OP:
1527 ret = privcmd_ioctl_dm_op(file, udata);
1530 case IOCTL_PRIVCMD_RESTRICT:
1531 ret = privcmd_ioctl_restrict(file, udata);
1534 case IOCTL_PRIVCMD_MMAP_RESOURCE:
1535 ret = privcmd_ioctl_mmap_resource(file, udata);
1538 case IOCTL_PRIVCMD_IRQFD:
1539 ret = privcmd_ioctl_irqfd(file, udata);
1542 case IOCTL_PRIVCMD_IOEVENTFD:
1543 ret = privcmd_ioctl_ioeventfd(file, udata);
1553 static int privcmd_open(struct inode *ino, struct file *file)
1555 struct privcmd_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
1560 /* DOMID_INVALID implies no restriction */
1561 data->domid = DOMID_INVALID;
1563 file->private_data = data;
1567 static int privcmd_release(struct inode *ino, struct file *file)
1569 struct privcmd_data *data = file->private_data;
1575 static void privcmd_close(struct vm_area_struct *vma)
1577 struct page **pages = vma->vm_private_data;
1578 int numpgs = vma_pages(vma);
1579 int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
1582 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
1585 rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
1587 xen_free_unpopulated_pages(numpgs, pages);
1589 pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
1594 static vm_fault_t privcmd_fault(struct vm_fault *vmf)
1596 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
1597 vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
1598 vmf->pgoff, (void *)vmf->address);
1600 return VM_FAULT_SIGBUS;
1603 static const struct vm_operations_struct privcmd_vm_ops = {
1604 .close = privcmd_close,
1605 .fault = privcmd_fault
1608 static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
1610 /* DONTCOPY is essential for Xen because copy_page_range doesn't know
1611 * how to recreate these mappings */
1612 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTCOPY |
1613 VM_DONTEXPAND | VM_DONTDUMP);
1614 vma->vm_ops = &privcmd_vm_ops;
1615 vma->vm_private_data = NULL;
1621 * For MMAPBATCH*. This allows asserting the singleshot mapping
1622 * on a per pfn/pte basis. Mapping calls that fail with ENOENT
1623 * can be then retried until success.
1625 static int is_mapped_fn(pte_t *pte, unsigned long addr, void *data)
1627 return pte_none(ptep_get(pte)) ? 0 : -EBUSY;
1630 static int privcmd_vma_range_is_mapped(
1631 struct vm_area_struct *vma,
1633 unsigned long nr_pages)
1635 return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
1636 is_mapped_fn, NULL) != 0;
1639 const struct file_operations xen_privcmd_fops = {
1640 .owner = THIS_MODULE,
1641 .unlocked_ioctl = privcmd_ioctl,
1642 .open = privcmd_open,
1643 .release = privcmd_release,
1644 .mmap = privcmd_mmap,
1646 EXPORT_SYMBOL_GPL(xen_privcmd_fops);
1648 static struct miscdevice privcmd_dev = {
1649 .minor = MISC_DYNAMIC_MINOR,
1650 .name = "xen/privcmd",
1651 .fops = &xen_privcmd_fops,
1654 static int __init privcmd_init(void)
1661 err = misc_register(&privcmd_dev);
1663 pr_err("Could not register Xen privcmd device\n");
1667 err = misc_register(&xen_privcmdbuf_dev);
1669 pr_err("Could not register Xen hypercall-buf device\n");
1670 goto err_privcmdbuf;
1673 err = privcmd_irqfd_init();
1675 pr_err("irqfd init failed\n");
1682 misc_deregister(&xen_privcmdbuf_dev);
1684 misc_deregister(&privcmd_dev);
1688 static void __exit privcmd_exit(void)
1690 privcmd_ioeventfd_exit();
1691 privcmd_irqfd_exit();
1692 misc_deregister(&privcmd_dev);
1693 misc_deregister(&xen_privcmdbuf_dev);
1696 module_init(privcmd_init);
1697 module_exit(privcmd_exit);