1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/android/staging/vsoc.c
5 * Android Virtual System on a Chip (VSoC) driver
7 * Copyright (C) 2017 Google, Inc.
11 * Based on drivers/char/kvm_ivshmem.c - driver for KVM Inter-VM shared memory
14 * Based on cirrusfb.c and 8139cp.c:
15 * Copyright 1999-2001 Jeff Garzik
16 * Copyright 2001-2004 Jeff Garzik
19 #include <linux/dma-mapping.h>
20 #include <linux/freezer.h>
21 #include <linux/futex.h>
22 #include <linux/init.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/mutex.h>
26 #include <linux/pci.h>
27 #include <linux/proc_fs.h>
28 #include <linux/sched.h>
29 #include <linux/syscalls.h>
30 #include <linux/uaccess.h>
31 #include <linux/interrupt.h>
32 #include <linux/cdev.h>
33 #include <linux/file.h>
34 #include "uapi/vsoc_shm.h"
36 #define VSOC_DEV_NAME "vsoc"
39 * Description of the ivshmem-doorbell PCI device used by QEmu. These
40 * constants follow docs/specs/ivshmem-spec.txt, which can be found in
41 * the QEmu repository. This was last reconciled with the version that
46 * These constants are determined KVM Inter-VM shared memory device
50 INTR_MASK = 0x00, /* Interrupt Mask */
51 INTR_STATUS = 0x04, /* Interrupt Status */
52 IV_POSITION = 0x08, /* VM ID */
53 DOORBELL = 0x0c, /* Doorbell */
56 static const int REGISTER_BAR; /* Equal to 0 */
57 static const int MAX_REGISTER_BAR_LEN = 0x100;
59 * The MSI-x BAR is not used directly.
61 * static const int MSI_X_BAR = 1;
63 static const int SHARED_MEMORY_BAR = 2;
65 struct vsoc_region_data {
66 char name[VSOC_DEVICE_NAME_SZ + 1];
67 wait_queue_head_t interrupt_wait_queue;
68 /* TODO(b/73664181): Use multiple futex wait queues */
69 wait_queue_head_t futex_wait_queue;
70 /* Flag indicating that an interrupt has been signalled by the host. */
71 atomic_t *incoming_signalled;
72 /* Flag indicating the guest has signalled the host. */
73 atomic_t *outgoing_signalled;
79 /* Kernel virtual address of REGISTER_BAR. */
81 /* Physical address of SHARED_MEMORY_BAR. */
82 phys_addr_t shm_phys_start;
83 /* Kernel virtual address of SHARED_MEMORY_BAR. */
84 void __iomem *kernel_mapped_shm;
85 /* Size of the entire shared memory window in bytes. */
88 * Pointer to the virtual address of the shared memory layout structure.
89 * This is probably identical to kernel_mapped_shm, but saving this
90 * here saves a lot of annoying casts.
92 struct vsoc_shm_layout_descriptor *layout;
94 * Points to a table of region descriptors in the kernel's virtual
95 * address space. Calculated from
96 * vsoc_shm_layout_descriptor.vsoc_region_desc_offset
98 struct vsoc_device_region *regions;
99 /* Head of a list of permissions that have been granted. */
100 struct list_head permissions;
102 /* Per-region (and therefore per-interrupt) information. */
103 struct vsoc_region_data *regions_data;
105 * Table of msi-x entries. This has to be separated from struct
106 * vsoc_region_data because the kernel deals with them as an array.
108 struct msix_entry *msix_entries;
109 /* Mutex that protectes the permission list */
111 /* Major number assigned by the kernel */
113 /* Character device assigned by the kernel */
115 /* Device class assigned by the kernel */
118 * Flags that indicate what we've initialized. These are used to do an
119 * orderly cleanup of the device.
122 bool requested_regions;
128 static struct vsoc_device vsoc_dev;
131 * TODO(ghartman): Add a /sys filesystem entry that summarizes the permissions.
134 struct fd_scoped_permission_node {
135 struct fd_scoped_permission permission;
136 struct list_head list;
139 struct vsoc_private_data {
140 struct fd_scoped_permission_node *fd_scoped_permission_node;
143 static long vsoc_ioctl(struct file *, unsigned int, unsigned long);
144 static int vsoc_mmap(struct file *, struct vm_area_struct *);
145 static int vsoc_open(struct inode *, struct file *);
146 static int vsoc_release(struct inode *, struct file *);
147 static ssize_t vsoc_read(struct file *, char __user *, size_t, loff_t *);
148 static ssize_t vsoc_write(struct file *, const char __user *, size_t, loff_t *);
149 static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin);
151 do_create_fd_scoped_permission(struct vsoc_device_region *region_p,
152 struct fd_scoped_permission_node *np,
153 struct fd_scoped_permission_arg __user *arg);
155 do_destroy_fd_scoped_permission(struct vsoc_device_region *owner_region_p,
156 struct fd_scoped_permission *perm);
157 static long do_vsoc_describe_region(struct file *,
158 struct vsoc_device_region __user *);
159 static ssize_t vsoc_get_area(struct file *filp, __u32 *perm_off);
162 * Validate arguments on entry points to the driver.
164 inline int vsoc_validate_inode(struct inode *inode)
166 if (iminor(inode) >= vsoc_dev.layout->region_count) {
167 dev_err(&vsoc_dev.dev->dev,
168 "describe_region: invalid region %d\n", iminor(inode));
174 inline int vsoc_validate_filep(struct file *filp)
176 int ret = vsoc_validate_inode(file_inode(filp));
180 if (!filp->private_data) {
181 dev_err(&vsoc_dev.dev->dev,
182 "No private data on fd, region %d\n",
183 iminor(file_inode(filp)));
189 /* Converts from shared memory offset to virtual address */
190 static inline void *shm_off_to_virtual_addr(__u32 offset)
192 return (void __force *)vsoc_dev.kernel_mapped_shm + offset;
195 /* Converts from shared memory offset to physical address */
196 static inline phys_addr_t shm_off_to_phys_addr(__u32 offset)
198 return vsoc_dev.shm_phys_start + offset;
202 * Convenience functions to obtain the region from the inode or file.
203 * Dangerous to call before validating the inode/file.
206 inline struct vsoc_device_region *vsoc_region_from_inode(struct inode *inode)
208 return &vsoc_dev.regions[iminor(inode)];
212 inline struct vsoc_device_region *vsoc_region_from_filep(struct file *inode)
214 return vsoc_region_from_inode(file_inode(inode));
217 static inline uint32_t vsoc_device_region_size(struct vsoc_device_region *r)
219 return r->region_end_offset - r->region_begin_offset;
222 static const struct file_operations vsoc_ops = {
223 .owner = THIS_MODULE,
227 .unlocked_ioctl = vsoc_ioctl,
228 .compat_ioctl = vsoc_ioctl,
230 .llseek = vsoc_lseek,
231 .release = vsoc_release,
234 static struct pci_device_id vsoc_id_table[] = {
235 {0x1af4, 0x1110, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
239 MODULE_DEVICE_TABLE(pci, vsoc_id_table);
241 static void vsoc_remove_device(struct pci_dev *pdev);
242 static int vsoc_probe_device(struct pci_dev *pdev,
243 const struct pci_device_id *ent);
245 static struct pci_driver vsoc_pci_driver = {
247 .id_table = vsoc_id_table,
248 .probe = vsoc_probe_device,
249 .remove = vsoc_remove_device,
253 do_create_fd_scoped_permission(struct vsoc_device_region *region_p,
254 struct fd_scoped_permission_node *np,
255 struct fd_scoped_permission_arg __user *arg)
257 struct file *managed_filp;
259 atomic_t *owner_ptr = NULL;
260 struct vsoc_device_region *managed_region_p;
262 if (copy_from_user(&np->permission, &arg->perm, sizeof(*np)) ||
263 copy_from_user(&managed_fd,
264 &arg->managed_region_fd, sizeof(managed_fd))) {
267 managed_filp = fdget(managed_fd).file;
268 /* Check that it's a valid fd, */
269 if (!managed_filp || vsoc_validate_filep(managed_filp))
271 /* EEXIST if the given fd already has a permission. */
272 if (((struct vsoc_private_data *)managed_filp->private_data)->
273 fd_scoped_permission_node)
275 managed_region_p = vsoc_region_from_filep(managed_filp);
276 /* Check that the provided region is managed by this one */
277 if (&vsoc_dev.regions[managed_region_p->managed_by] != region_p)
279 /* The area must be well formed and have non-zero size */
280 if (np->permission.begin_offset >= np->permission.end_offset)
282 /* The area must fit in the memory window */
283 if (np->permission.end_offset >
284 vsoc_device_region_size(managed_region_p))
286 /* The area must be in the region data section */
287 if (np->permission.begin_offset <
288 managed_region_p->offset_of_region_data)
290 /* The area must be page aligned */
291 if (!PAGE_ALIGNED(np->permission.begin_offset) ||
292 !PAGE_ALIGNED(np->permission.end_offset))
294 /* Owner offset must be naturally aligned in the window */
295 if (np->permission.owner_offset &
296 (sizeof(np->permission.owner_offset) - 1))
298 /* The owner flag must reside in the owner memory */
299 if (np->permission.owner_offset + sizeof(np->permission.owner_offset) >
300 vsoc_device_region_size(region_p))
302 /* The owner flag must reside in the data section */
303 if (np->permission.owner_offset < region_p->offset_of_region_data)
305 /* The owner value must change to claim the memory */
306 if (np->permission.owned_value == VSOC_REGION_FREE)
309 (atomic_t *)shm_off_to_virtual_addr(region_p->region_begin_offset +
310 np->permission.owner_offset);
311 /* We've already verified that this is in the shared memory window, so
312 * it should be safe to write to this address.
314 if (atomic_cmpxchg(owner_ptr,
316 np->permission.owned_value) != VSOC_REGION_FREE) {
319 ((struct vsoc_private_data *)managed_filp->private_data)->
320 fd_scoped_permission_node = np;
321 /* The file offset needs to be adjusted if the calling
322 * process did any read/write operations on the fd
323 * before creating the permission.
325 if (managed_filp->f_pos) {
326 if (managed_filp->f_pos > np->permission.end_offset) {
327 /* If the offset is beyond the permission end, set it
330 managed_filp->f_pos = np->permission.end_offset;
332 /* If the offset is within the permission interval
333 * keep it there otherwise reset it to zero.
335 if (managed_filp->f_pos < np->permission.begin_offset) {
336 managed_filp->f_pos = 0;
338 managed_filp->f_pos -=
339 np->permission.begin_offset;
347 do_destroy_fd_scoped_permission_node(struct vsoc_device_region *owner_region_p,
348 struct fd_scoped_permission_node *node)
351 do_destroy_fd_scoped_permission(owner_region_p,
353 mutex_lock(&vsoc_dev.mtx);
354 list_del(&node->list);
355 mutex_unlock(&vsoc_dev.mtx);
361 do_destroy_fd_scoped_permission(struct vsoc_device_region *owner_region_p,
362 struct fd_scoped_permission *perm)
364 atomic_t *owner_ptr = NULL;
369 owner_ptr = (atomic_t *)shm_off_to_virtual_addr
370 (owner_region_p->region_begin_offset + perm->owner_offset);
371 prev = atomic_xchg(owner_ptr, VSOC_REGION_FREE);
372 if (prev != perm->owned_value)
373 dev_err(&vsoc_dev.dev->dev,
374 "%x-%x: owner (%s) %x: expected to be %x was %x",
375 perm->begin_offset, perm->end_offset,
376 owner_region_p->device_name, perm->owner_offset,
377 perm->owned_value, prev);
380 static long do_vsoc_describe_region(struct file *filp,
381 struct vsoc_device_region __user *dest)
383 struct vsoc_device_region *region_p;
384 int retval = vsoc_validate_filep(filp);
388 region_p = vsoc_region_from_filep(filp);
389 if (copy_to_user(dest, region_p, sizeof(*region_p)))
395 * Implements the inner logic of cond_wait. Copies to and from userspace are
396 * done in the helper function below.
398 static int handle_vsoc_cond_wait(struct file *filp, struct vsoc_cond_wait *arg)
401 u32 region_number = iminor(file_inode(filp));
402 struct vsoc_region_data *data = vsoc_dev.regions_data + region_number;
403 struct hrtimer_sleeper timeout, *to = NULL;
405 struct vsoc_device_region *region_p = vsoc_region_from_filep(filp);
406 atomic_t *address = NULL;
409 /* Ensure that the offset is aligned */
410 if (arg->offset & (sizeof(uint32_t) - 1))
411 return -EADDRNOTAVAIL;
412 /* Ensure that the offset is within shared memory */
413 if (((uint64_t)arg->offset) + region_p->region_begin_offset +
414 sizeof(uint32_t) > region_p->region_end_offset)
416 address = shm_off_to_virtual_addr(region_p->region_begin_offset +
419 /* Ensure that the type of wait is valid */
420 switch (arg->wait_type) {
421 case VSOC_WAIT_IF_EQUAL:
423 case VSOC_WAIT_IF_EQUAL_TIMEOUT:
431 /* Copy the user-supplied timesec into the kernel structure.
432 * We do things this way to flatten differences between 32 bit
433 * and 64 bit timespecs.
435 if (arg->wake_time_nsec >= NSEC_PER_SEC)
437 wake_time = ktime_set(arg->wake_time_sec, arg->wake_time_nsec);
439 hrtimer_init_on_stack(&to->timer, CLOCK_MONOTONIC,
441 hrtimer_set_expires_range_ns(&to->timer, wake_time,
442 current->timer_slack_ns);
444 hrtimer_init_sleeper(to, current);
448 prepare_to_wait(&data->futex_wait_queue, &wait,
451 * Check the sentinel value after prepare_to_wait. If the value
452 * changes after this check the writer will call signal,
453 * changing the task state from INTERRUPTIBLE to RUNNING. That
454 * will ensure that schedule() will eventually schedule this
457 if (atomic_read(address) != arg->value) {
462 hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
463 if (likely(to->task))
464 freezable_schedule();
465 hrtimer_cancel(&to->timer);
471 freezable_schedule();
473 /* Count the number of times that we woke up. This is useful
477 if (signal_pending(current)) {
482 finish_wait(&data->futex_wait_queue, &wait);
484 destroy_hrtimer_on_stack(&to->timer);
489 * Handles the details of copying from/to userspace to ensure that the copies
490 * happen on all of the return paths of cond_wait.
492 static int do_vsoc_cond_wait(struct file *filp,
493 struct vsoc_cond_wait __user *untrusted_in)
495 struct vsoc_cond_wait arg;
498 if (copy_from_user(&arg, untrusted_in, sizeof(arg)))
500 /* wakes is an out parameter. Initialize it to something sensible. */
502 rval = handle_vsoc_cond_wait(filp, &arg);
503 if (copy_to_user(untrusted_in, &arg, sizeof(arg)))
508 static int do_vsoc_cond_wake(struct file *filp, uint32_t offset)
510 struct vsoc_device_region *region_p = vsoc_region_from_filep(filp);
511 u32 region_number = iminor(file_inode(filp));
512 struct vsoc_region_data *data = vsoc_dev.regions_data + region_number;
513 /* Ensure that the offset is aligned */
514 if (offset & (sizeof(uint32_t) - 1))
515 return -EADDRNOTAVAIL;
516 /* Ensure that the offset is within shared memory */
517 if (((uint64_t)offset) + region_p->region_begin_offset +
518 sizeof(uint32_t) > region_p->region_end_offset)
521 * TODO(b/73664181): Use multiple futex wait queues.
522 * We need to wake every sleeper when the condition changes. Typically
523 * only a single thread will be waiting on the condition, but there
524 * are exceptions. The worst case is about 10 threads.
526 wake_up_interruptible_all(&data->futex_wait_queue);
530 static long vsoc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
533 struct vsoc_device_region *region_p;
535 struct vsoc_region_data *reg_data;
536 int retval = vsoc_validate_filep(filp);
540 region_p = vsoc_region_from_filep(filp);
541 reg_num = iminor(file_inode(filp));
542 reg_data = vsoc_dev.regions_data + reg_num;
544 case VSOC_CREATE_FD_SCOPED_PERMISSION:
546 struct fd_scoped_permission_node *node = NULL;
548 node = kzalloc(sizeof(*node), GFP_KERNEL);
549 /* We can't allocate memory for the permission */
552 INIT_LIST_HEAD(&node->list);
553 rv = do_create_fd_scoped_permission
556 (struct fd_scoped_permission_arg __user *)arg);
558 mutex_lock(&vsoc_dev.mtx);
559 list_add(&node->list, &vsoc_dev.permissions);
560 mutex_unlock(&vsoc_dev.mtx);
568 case VSOC_GET_FD_SCOPED_PERMISSION:
570 struct fd_scoped_permission_node *node =
571 ((struct vsoc_private_data *)filp->private_data)->
572 fd_scoped_permission_node;
576 ((struct fd_scoped_permission __user *)arg,
577 &node->permission, sizeof(node->permission)))
582 case VSOC_MAYBE_SEND_INTERRUPT_TO_HOST:
583 if (!atomic_xchg(reg_data->outgoing_signalled, 1)) {
584 writel(reg_num, vsoc_dev.regs + DOORBELL);
591 case VSOC_SEND_INTERRUPT_TO_HOST:
592 writel(reg_num, vsoc_dev.regs + DOORBELL);
594 case VSOC_WAIT_FOR_INCOMING_INTERRUPT:
595 wait_event_interruptible
596 (reg_data->interrupt_wait_queue,
597 (atomic_read(reg_data->incoming_signalled) != 0));
600 case VSOC_DESCRIBE_REGION:
601 return do_vsoc_describe_region
603 (struct vsoc_device_region __user *)arg);
605 case VSOC_SELF_INTERRUPT:
606 atomic_set(reg_data->incoming_signalled, 1);
607 wake_up_interruptible(®_data->interrupt_wait_queue);
611 return do_vsoc_cond_wait(filp,
612 (struct vsoc_cond_wait __user *)arg);
614 return do_vsoc_cond_wake(filp, arg);
622 static ssize_t vsoc_read(struct file *filp, char __user *buffer, size_t len,
628 int retval = vsoc_validate_filep(filp);
632 area_len = vsoc_get_area(filp, &area_off);
633 area_p = shm_off_to_virtual_addr(area_off);
635 area_len -= *poffset;
640 if (copy_to_user(buffer, area_p, len))
646 static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin)
648 ssize_t area_len = 0;
649 int retval = vsoc_validate_filep(filp);
653 area_len = vsoc_get_area(filp, NULL);
659 if (offset > 0 && offset + filp->f_pos < 0)
661 offset += filp->f_pos;
665 if (offset > 0 && offset + area_len < 0)
671 if (offset >= area_len)
678 /* Next hole is always the end of the region, unless offset is
681 if (offset < area_len)
689 if (offset < 0 || offset > area_len)
691 filp->f_pos = offset;
696 static ssize_t vsoc_write(struct file *filp, const char __user *buffer,
697 size_t len, loff_t *poffset)
702 int retval = vsoc_validate_filep(filp);
706 area_len = vsoc_get_area(filp, &area_off);
707 area_p = shm_off_to_virtual_addr(area_off);
709 area_len -= *poffset;
714 if (copy_from_user(area_p, buffer, len))
720 static irqreturn_t vsoc_interrupt(int irq, void *region_data_v)
722 struct vsoc_region_data *region_data =
723 (struct vsoc_region_data *)region_data_v;
724 int reg_num = region_data - vsoc_dev.regions_data;
726 if (unlikely(!region_data))
729 if (unlikely(reg_num < 0 ||
730 reg_num >= vsoc_dev.layout->region_count)) {
731 dev_err(&vsoc_dev.dev->dev,
732 "invalid irq @%p reg_num=0x%04x\n",
733 region_data, reg_num);
736 if (unlikely(vsoc_dev.regions_data + reg_num != region_data)) {
737 dev_err(&vsoc_dev.dev->dev,
738 "irq not aligned @%p reg_num=0x%04x\n",
739 region_data, reg_num);
742 wake_up_interruptible(®ion_data->interrupt_wait_queue);
746 static int vsoc_probe_device(struct pci_dev *pdev,
747 const struct pci_device_id *ent)
751 resource_size_t reg_size;
755 result = pci_enable_device(pdev);
758 "pci_enable_device failed %s: error %d\n",
759 pci_name(pdev), result);
762 vsoc_dev.enabled_device = true;
763 result = pci_request_regions(pdev, "vsoc");
765 dev_err(&pdev->dev, "pci_request_regions failed\n");
766 vsoc_remove_device(pdev);
769 vsoc_dev.requested_regions = true;
770 /* Set up the control registers in BAR 0 */
771 reg_size = pci_resource_len(pdev, REGISTER_BAR);
772 if (reg_size > MAX_REGISTER_BAR_LEN)
774 pci_iomap(pdev, REGISTER_BAR, MAX_REGISTER_BAR_LEN);
776 vsoc_dev.regs = pci_iomap(pdev, REGISTER_BAR, reg_size);
778 if (!vsoc_dev.regs) {
780 "cannot map registers of size %zu\n",
782 vsoc_remove_device(pdev);
786 /* Map the shared memory in BAR 2 */
787 vsoc_dev.shm_phys_start = pci_resource_start(pdev, SHARED_MEMORY_BAR);
788 vsoc_dev.shm_size = pci_resource_len(pdev, SHARED_MEMORY_BAR);
790 dev_info(&pdev->dev, "shared memory @ DMA %pa size=0x%zx\n",
791 &vsoc_dev.shm_phys_start, vsoc_dev.shm_size);
792 vsoc_dev.kernel_mapped_shm = pci_iomap_wc(pdev, SHARED_MEMORY_BAR, 0);
793 if (!vsoc_dev.kernel_mapped_shm) {
794 dev_err(&vsoc_dev.dev->dev, "cannot iomap region\n");
795 vsoc_remove_device(pdev);
799 vsoc_dev.layout = (struct vsoc_shm_layout_descriptor __force *)
800 vsoc_dev.kernel_mapped_shm;
801 dev_info(&pdev->dev, "major_version: %d\n",
802 vsoc_dev.layout->major_version);
803 dev_info(&pdev->dev, "minor_version: %d\n",
804 vsoc_dev.layout->minor_version);
805 dev_info(&pdev->dev, "size: 0x%x\n", vsoc_dev.layout->size);
806 dev_info(&pdev->dev, "regions: %d\n", vsoc_dev.layout->region_count);
807 if (vsoc_dev.layout->major_version !=
808 CURRENT_VSOC_LAYOUT_MAJOR_VERSION) {
809 dev_err(&vsoc_dev.dev->dev,
810 "driver supports only major_version %d\n",
811 CURRENT_VSOC_LAYOUT_MAJOR_VERSION);
812 vsoc_remove_device(pdev);
815 result = alloc_chrdev_region(&devt, 0, vsoc_dev.layout->region_count,
818 dev_err(&vsoc_dev.dev->dev, "alloc_chrdev_region failed\n");
819 vsoc_remove_device(pdev);
822 vsoc_dev.major = MAJOR(devt);
823 cdev_init(&vsoc_dev.cdev, &vsoc_ops);
824 vsoc_dev.cdev.owner = THIS_MODULE;
825 result = cdev_add(&vsoc_dev.cdev, devt, vsoc_dev.layout->region_count);
827 dev_err(&vsoc_dev.dev->dev, "cdev_add error\n");
828 vsoc_remove_device(pdev);
831 vsoc_dev.cdev_added = true;
832 vsoc_dev.class = class_create(THIS_MODULE, VSOC_DEV_NAME);
833 if (IS_ERR(vsoc_dev.class)) {
834 dev_err(&vsoc_dev.dev->dev, "class_create failed\n");
835 vsoc_remove_device(pdev);
836 return PTR_ERR(vsoc_dev.class);
838 vsoc_dev.class_added = true;
839 vsoc_dev.regions = (struct vsoc_device_region __force *)
840 ((void *)vsoc_dev.layout +
841 vsoc_dev.layout->vsoc_region_desc_offset);
842 vsoc_dev.msix_entries =
843 kcalloc(vsoc_dev.layout->region_count,
844 sizeof(vsoc_dev.msix_entries[0]), GFP_KERNEL);
845 if (!vsoc_dev.msix_entries) {
846 dev_err(&vsoc_dev.dev->dev,
847 "unable to allocate msix_entries\n");
848 vsoc_remove_device(pdev);
851 vsoc_dev.regions_data =
852 kcalloc(vsoc_dev.layout->region_count,
853 sizeof(vsoc_dev.regions_data[0]), GFP_KERNEL);
854 if (!vsoc_dev.regions_data) {
855 dev_err(&vsoc_dev.dev->dev,
856 "unable to allocate regions' data\n");
857 vsoc_remove_device(pdev);
860 for (i = 0; i < vsoc_dev.layout->region_count; ++i)
861 vsoc_dev.msix_entries[i].entry = i;
863 result = pci_enable_msix_exact(vsoc_dev.dev, vsoc_dev.msix_entries,
864 vsoc_dev.layout->region_count);
866 dev_info(&pdev->dev, "pci_enable_msix failed: %d\n", result);
867 vsoc_remove_device(pdev);
870 /* Check that all regions are well formed */
871 for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
872 const struct vsoc_device_region *region = vsoc_dev.regions + i;
874 if (!PAGE_ALIGNED(region->region_begin_offset) ||
875 !PAGE_ALIGNED(region->region_end_offset)) {
876 dev_err(&vsoc_dev.dev->dev,
877 "region %d not aligned (%x:%x)", i,
878 region->region_begin_offset,
879 region->region_end_offset);
880 vsoc_remove_device(pdev);
883 if (region->region_begin_offset >= region->region_end_offset ||
884 region->region_end_offset > vsoc_dev.shm_size) {
885 dev_err(&vsoc_dev.dev->dev,
886 "region %d offsets are wrong: %x %x %zx",
887 i, region->region_begin_offset,
888 region->region_end_offset, vsoc_dev.shm_size);
889 vsoc_remove_device(pdev);
892 if (region->managed_by >= vsoc_dev.layout->region_count) {
893 dev_err(&vsoc_dev.dev->dev,
894 "region %d has invalid owner: %u",
895 i, region->managed_by);
896 vsoc_remove_device(pdev);
900 vsoc_dev.msix_enabled = true;
901 for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
902 const struct vsoc_device_region *region = vsoc_dev.regions + i;
903 size_t name_sz = sizeof(vsoc_dev.regions_data[i].name) - 1;
904 const struct vsoc_signal_table_layout *h_to_g_signal_table =
905 ®ion->host_to_guest_signal_table;
906 const struct vsoc_signal_table_layout *g_to_h_signal_table =
907 ®ion->guest_to_host_signal_table;
909 vsoc_dev.regions_data[i].name[name_sz] = '\0';
910 memcpy(vsoc_dev.regions_data[i].name, region->device_name,
912 dev_info(&pdev->dev, "region %d name=%s\n",
913 i, vsoc_dev.regions_data[i].name);
915 (&vsoc_dev.regions_data[i].interrupt_wait_queue);
916 init_waitqueue_head(&vsoc_dev.regions_data[i].futex_wait_queue);
917 vsoc_dev.regions_data[i].incoming_signalled =
918 shm_off_to_virtual_addr(region->region_begin_offset) +
919 h_to_g_signal_table->interrupt_signalled_offset;
920 vsoc_dev.regions_data[i].outgoing_signalled =
921 shm_off_to_virtual_addr(region->region_begin_offset) +
922 g_to_h_signal_table->interrupt_signalled_offset;
923 result = request_irq(vsoc_dev.msix_entries[i].vector,
925 vsoc_dev.regions_data[i].name,
926 vsoc_dev.regions_data + i);
929 "request_irq failed irq=%d vector=%d\n",
930 i, vsoc_dev.msix_entries[i].vector);
931 vsoc_remove_device(pdev);
934 vsoc_dev.regions_data[i].irq_requested = true;
935 if (!device_create(vsoc_dev.class, NULL,
936 MKDEV(vsoc_dev.major, i),
937 NULL, vsoc_dev.regions_data[i].name)) {
938 dev_err(&vsoc_dev.dev->dev, "device_create failed\n");
939 vsoc_remove_device(pdev);
942 vsoc_dev.regions_data[i].device_created = true;
948 * This should undo all of the allocations in the probe function in reverse
953 * The device may have been partially initialized, so double check
954 * that the allocations happened.
956 * This function may be called multiple times, so mark resources as freed
957 * as they are deallocated.
959 static void vsoc_remove_device(struct pci_dev *pdev)
963 * pdev is the first thing to be set on probe and the last thing
964 * to be cleared here. If it's NULL then there is no cleanup.
966 if (!pdev || !vsoc_dev.dev)
968 dev_info(&pdev->dev, "remove_device\n");
969 if (vsoc_dev.regions_data) {
970 for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
971 if (vsoc_dev.regions_data[i].device_created) {
972 device_destroy(vsoc_dev.class,
973 MKDEV(vsoc_dev.major, i));
974 vsoc_dev.regions_data[i].device_created = false;
976 if (vsoc_dev.regions_data[i].irq_requested)
977 free_irq(vsoc_dev.msix_entries[i].vector, NULL);
978 vsoc_dev.regions_data[i].irq_requested = false;
980 kfree(vsoc_dev.regions_data);
981 vsoc_dev.regions_data = NULL;
983 if (vsoc_dev.msix_enabled) {
984 pci_disable_msix(pdev);
985 vsoc_dev.msix_enabled = false;
987 kfree(vsoc_dev.msix_entries);
988 vsoc_dev.msix_entries = NULL;
989 vsoc_dev.regions = NULL;
990 if (vsoc_dev.class_added) {
991 class_destroy(vsoc_dev.class);
992 vsoc_dev.class_added = false;
994 if (vsoc_dev.cdev_added) {
995 cdev_del(&vsoc_dev.cdev);
996 vsoc_dev.cdev_added = false;
998 if (vsoc_dev.major && vsoc_dev.layout) {
999 unregister_chrdev_region(MKDEV(vsoc_dev.major, 0),
1000 vsoc_dev.layout->region_count);
1003 vsoc_dev.layout = NULL;
1004 if (vsoc_dev.kernel_mapped_shm) {
1005 pci_iounmap(pdev, vsoc_dev.kernel_mapped_shm);
1006 vsoc_dev.kernel_mapped_shm = NULL;
1008 if (vsoc_dev.regs) {
1009 pci_iounmap(pdev, vsoc_dev.regs);
1010 vsoc_dev.regs = NULL;
1012 if (vsoc_dev.requested_regions) {
1013 pci_release_regions(pdev);
1014 vsoc_dev.requested_regions = false;
1016 if (vsoc_dev.enabled_device) {
1017 pci_disable_device(pdev);
1018 vsoc_dev.enabled_device = false;
1020 /* Do this last: it indicates that the device is not initialized. */
1021 vsoc_dev.dev = NULL;
1024 static void __exit vsoc_cleanup_module(void)
1026 vsoc_remove_device(vsoc_dev.dev);
1027 pci_unregister_driver(&vsoc_pci_driver);
1030 static int __init vsoc_init_module(void)
1034 INIT_LIST_HEAD(&vsoc_dev.permissions);
1035 mutex_init(&vsoc_dev.mtx);
1037 err = pci_register_driver(&vsoc_pci_driver);
1043 static int vsoc_open(struct inode *inode, struct file *filp)
1045 /* Can't use vsoc_validate_filep because filp is still incomplete */
1046 int ret = vsoc_validate_inode(inode);
1050 filp->private_data =
1051 kzalloc(sizeof(struct vsoc_private_data), GFP_KERNEL);
1052 if (!filp->private_data)
1057 static int vsoc_release(struct inode *inode, struct file *filp)
1059 struct vsoc_private_data *private_data = NULL;
1060 struct fd_scoped_permission_node *node = NULL;
1061 struct vsoc_device_region *owner_region_p = NULL;
1062 int retval = vsoc_validate_filep(filp);
1066 private_data = (struct vsoc_private_data *)filp->private_data;
1070 node = private_data->fd_scoped_permission_node;
1072 owner_region_p = vsoc_region_from_inode(inode);
1073 if (owner_region_p->managed_by != VSOC_REGION_WHOLE) {
1075 &vsoc_dev.regions[owner_region_p->managed_by];
1077 do_destroy_fd_scoped_permission_node(owner_region_p, node);
1078 private_data->fd_scoped_permission_node = NULL;
1080 kfree(private_data);
1081 filp->private_data = NULL;
1087 * Returns the device relative offset and length of the area specified by the
1088 * fd scoped permission. If there is no fd scoped permission set, a default
1089 * permission covering the entire region is assumed, unless the region is owned
1090 * by another one, in which case the default is a permission with zero size.
1092 static ssize_t vsoc_get_area(struct file *filp, __u32 *area_offset)
1096 struct vsoc_device_region *region_p;
1097 struct fd_scoped_permission *perm;
1099 region_p = vsoc_region_from_filep(filp);
1100 off = region_p->region_begin_offset;
1101 perm = &((struct vsoc_private_data *)filp->private_data)->
1102 fd_scoped_permission_node->permission;
1104 off += perm->begin_offset;
1105 length = perm->end_offset - perm->begin_offset;
1106 } else if (region_p->managed_by == VSOC_REGION_WHOLE) {
1107 /* No permission set and the regions is not owned by another,
1108 * default to full region access.
1110 length = vsoc_device_region_size(region_p);
1112 /* return zero length, access is denied. */
1120 static int vsoc_mmap(struct file *filp, struct vm_area_struct *vma)
1122 unsigned long len = vma->vm_end - vma->vm_start;
1124 phys_addr_t mem_off;
1126 int retval = vsoc_validate_filep(filp);
1130 area_len = vsoc_get_area(filp, &area_off);
1131 /* Add the requested offset */
1132 area_off += (vma->vm_pgoff << PAGE_SHIFT);
1133 area_len -= (vma->vm_pgoff << PAGE_SHIFT);
1136 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1137 mem_off = shm_off_to_phys_addr(area_off);
1138 if (io_remap_pfn_range(vma, vma->vm_start, mem_off >> PAGE_SHIFT,
1139 len, vma->vm_page_prot))
1144 module_init(vsoc_init_module);
1145 module_exit(vsoc_cleanup_module);
1147 MODULE_LICENSE("GPL");
1149 MODULE_DESCRIPTION("VSoC interpretation of QEmu's ivshmem device");
1150 MODULE_VERSION("1.0");