2 * Copyright (C) 2013 - Virtual Open Systems
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
15 #include <linux/device.h>
16 #include <linux/iommu.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/slab.h>
20 #include <linux/types.h>
21 #include <linux/uaccess.h>
22 #include <linux/vfio.h>
24 #include "vfio_platform_private.h"
26 #define DRIVER_VERSION "0.10"
28 #define DRIVER_DESC "VFIO platform base module"
30 static LIST_HEAD(reset_list);
31 static DEFINE_MUTEX(driver_lock);
33 static vfio_platform_reset_fn_t vfio_platform_lookup_reset(const char *compat,
34 struct module **module)
36 struct vfio_platform_reset_node *iter;
37 vfio_platform_reset_fn_t reset_fn = NULL;
39 mutex_lock(&driver_lock);
40 list_for_each_entry(iter, &reset_list, link) {
41 if (!strcmp(iter->compat, compat) &&
42 try_module_get(iter->owner)) {
43 *module = iter->owner;
44 reset_fn = iter->reset;
48 mutex_unlock(&driver_lock);
52 static void vfio_platform_get_reset(struct vfio_platform_device *vdev)
54 vdev->reset = vfio_platform_lookup_reset(vdev->compat,
57 request_module("vfio-reset:%s", vdev->compat);
58 vdev->reset = vfio_platform_lookup_reset(vdev->compat,
63 static void vfio_platform_put_reset(struct vfio_platform_device *vdev)
66 module_put(vdev->reset_module);
69 static int vfio_platform_regions_init(struct vfio_platform_device *vdev)
73 while (vdev->get_resource(vdev, cnt))
76 vdev->regions = kcalloc(cnt, sizeof(struct vfio_platform_region),
81 for (i = 0; i < cnt; i++) {
82 struct resource *res =
83 vdev->get_resource(vdev, i);
88 vdev->regions[i].addr = res->start;
89 vdev->regions[i].size = resource_size(res);
90 vdev->regions[i].flags = 0;
92 switch (resource_type(res)) {
94 vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_MMIO;
95 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
96 if (!(res->flags & IORESOURCE_READONLY))
97 vdev->regions[i].flags |=
98 VFIO_REGION_INFO_FLAG_WRITE;
101 * Only regions addressed with PAGE granularity may be
104 if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
105 !(vdev->regions[i].size & ~PAGE_MASK))
106 vdev->regions[i].flags |=
107 VFIO_REGION_INFO_FLAG_MMAP;
111 vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_PIO;
118 vdev->num_regions = cnt;
122 kfree(vdev->regions);
126 static void vfio_platform_regions_cleanup(struct vfio_platform_device *vdev)
130 for (i = 0; i < vdev->num_regions; i++)
131 iounmap(vdev->regions[i].ioaddr);
133 vdev->num_regions = 0;
134 kfree(vdev->regions);
137 static void vfio_platform_release(void *device_data)
139 struct vfio_platform_device *vdev = device_data;
141 mutex_lock(&driver_lock);
143 if (!(--vdev->refcnt)) {
145 dev_info(vdev->device, "reset\n");
148 dev_warn(vdev->device, "no reset function found!\n");
150 vfio_platform_regions_cleanup(vdev);
151 vfio_platform_irq_cleanup(vdev);
154 mutex_unlock(&driver_lock);
156 module_put(vdev->parent_module);
159 static int vfio_platform_open(void *device_data)
161 struct vfio_platform_device *vdev = device_data;
164 if (!try_module_get(vdev->parent_module))
167 mutex_lock(&driver_lock);
170 ret = vfio_platform_regions_init(vdev);
174 ret = vfio_platform_irq_init(vdev);
179 dev_info(vdev->device, "reset\n");
182 dev_warn(vdev->device, "no reset function found!\n");
188 mutex_unlock(&driver_lock);
192 vfio_platform_regions_cleanup(vdev);
194 mutex_unlock(&driver_lock);
195 module_put(THIS_MODULE);
199 static long vfio_platform_ioctl(void *device_data,
200 unsigned int cmd, unsigned long arg)
202 struct vfio_platform_device *vdev = device_data;
205 if (cmd == VFIO_DEVICE_GET_INFO) {
206 struct vfio_device_info info;
208 minsz = offsetofend(struct vfio_device_info, num_irqs);
210 if (copy_from_user(&info, (void __user *)arg, minsz))
213 if (info.argsz < minsz)
217 vdev->flags |= VFIO_DEVICE_FLAGS_RESET;
218 info.flags = vdev->flags;
219 info.num_regions = vdev->num_regions;
220 info.num_irqs = vdev->num_irqs;
222 return copy_to_user((void __user *)arg, &info, minsz);
224 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
225 struct vfio_region_info info;
227 minsz = offsetofend(struct vfio_region_info, offset);
229 if (copy_from_user(&info, (void __user *)arg, minsz))
232 if (info.argsz < minsz)
235 if (info.index >= vdev->num_regions)
238 /* map offset to the physical address */
239 info.offset = VFIO_PLATFORM_INDEX_TO_OFFSET(info.index);
240 info.size = vdev->regions[info.index].size;
241 info.flags = vdev->regions[info.index].flags;
243 return copy_to_user((void __user *)arg, &info, minsz);
245 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
246 struct vfio_irq_info info;
248 minsz = offsetofend(struct vfio_irq_info, count);
250 if (copy_from_user(&info, (void __user *)arg, minsz))
253 if (info.argsz < minsz)
256 if (info.index >= vdev->num_irqs)
259 info.flags = vdev->irqs[info.index].flags;
260 info.count = vdev->irqs[info.index].count;
262 return copy_to_user((void __user *)arg, &info, minsz);
264 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
265 struct vfio_irq_set hdr;
269 minsz = offsetofend(struct vfio_irq_set, count);
271 if (copy_from_user(&hdr, (void __user *)arg, minsz))
274 if (hdr.argsz < minsz)
277 if (hdr.index >= vdev->num_irqs)
280 if (hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
281 VFIO_IRQ_SET_ACTION_TYPE_MASK))
284 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
287 if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
288 size = sizeof(uint8_t);
289 else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
290 size = sizeof(int32_t);
294 if (hdr.argsz - minsz < size)
297 data = memdup_user((void __user *)(arg + minsz), size);
299 return PTR_ERR(data);
302 mutex_lock(&vdev->igate);
304 ret = vfio_platform_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
305 hdr.start, hdr.count, data);
306 mutex_unlock(&vdev->igate);
311 } else if (cmd == VFIO_DEVICE_RESET) {
313 return vdev->reset(vdev);
321 static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg,
322 char __user *buf, size_t count,
325 unsigned int done = 0;
329 ioremap_nocache(reg->addr, reg->size);
338 if (count >= 4 && !(off % 4)) {
341 val = ioread32(reg->ioaddr + off);
342 if (copy_to_user(buf, &val, 4))
346 } else if (count >= 2 && !(off % 2)) {
349 val = ioread16(reg->ioaddr + off);
350 if (copy_to_user(buf, &val, 2))
357 val = ioread8(reg->ioaddr + off);
358 if (copy_to_user(buf, &val, 1))
376 static ssize_t vfio_platform_read(void *device_data, char __user *buf,
377 size_t count, loff_t *ppos)
379 struct vfio_platform_device *vdev = device_data;
380 unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
381 loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
383 if (index >= vdev->num_regions)
386 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ))
389 if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
390 return vfio_platform_read_mmio(&vdev->regions[index],
392 else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
393 return -EINVAL; /* not implemented */
398 static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg,
399 const char __user *buf, size_t count,
402 unsigned int done = 0;
406 ioremap_nocache(reg->addr, reg->size);
415 if (count >= 4 && !(off % 4)) {
418 if (copy_from_user(&val, buf, 4))
420 iowrite32(val, reg->ioaddr + off);
423 } else if (count >= 2 && !(off % 2)) {
426 if (copy_from_user(&val, buf, 2))
428 iowrite16(val, reg->ioaddr + off);
434 if (copy_from_user(&val, buf, 1))
436 iowrite8(val, reg->ioaddr + off);
452 static ssize_t vfio_platform_write(void *device_data, const char __user *buf,
453 size_t count, loff_t *ppos)
455 struct vfio_platform_device *vdev = device_data;
456 unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
457 loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
459 if (index >= vdev->num_regions)
462 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE))
465 if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
466 return vfio_platform_write_mmio(&vdev->regions[index],
468 else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
469 return -EINVAL; /* not implemented */
474 static int vfio_platform_mmap_mmio(struct vfio_platform_region region,
475 struct vm_area_struct *vma)
477 u64 req_len, pgoff, req_start;
479 req_len = vma->vm_end - vma->vm_start;
480 pgoff = vma->vm_pgoff &
481 ((1U << (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
482 req_start = pgoff << PAGE_SHIFT;
484 if (region.size < PAGE_SIZE || req_start + req_len > region.size)
487 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
488 vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
490 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
491 req_len, vma->vm_page_prot);
494 static int vfio_platform_mmap(void *device_data, struct vm_area_struct *vma)
496 struct vfio_platform_device *vdev = device_data;
499 index = vma->vm_pgoff >> (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT);
501 if (vma->vm_end < vma->vm_start)
503 if (!(vma->vm_flags & VM_SHARED))
505 if (index >= vdev->num_regions)
507 if (vma->vm_start & ~PAGE_MASK)
509 if (vma->vm_end & ~PAGE_MASK)
512 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
515 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
516 && (vma->vm_flags & VM_READ))
519 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
520 && (vma->vm_flags & VM_WRITE))
523 vma->vm_private_data = vdev;
525 if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
526 return vfio_platform_mmap_mmio(vdev->regions[index], vma);
528 else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
529 return -EINVAL; /* not implemented */
534 static const struct vfio_device_ops vfio_platform_ops = {
535 .name = "vfio-platform",
536 .open = vfio_platform_open,
537 .release = vfio_platform_release,
538 .ioctl = vfio_platform_ioctl,
539 .read = vfio_platform_read,
540 .write = vfio_platform_write,
541 .mmap = vfio_platform_mmap,
544 int vfio_platform_probe_common(struct vfio_platform_device *vdev,
547 struct iommu_group *group;
553 ret = device_property_read_string(dev, "compatible", &vdev->compat);
555 pr_err("VFIO: cannot retrieve compat for %s\n", vdev->name);
561 group = iommu_group_get(dev);
563 pr_err("VFIO: No IOMMU group for device %s\n", vdev->name);
567 ret = vfio_add_group_dev(dev, &vfio_platform_ops, vdev);
569 iommu_group_put(group);
573 vfio_platform_get_reset(vdev);
575 mutex_init(&vdev->igate);
579 EXPORT_SYMBOL_GPL(vfio_platform_probe_common);
581 struct vfio_platform_device *vfio_platform_remove_common(struct device *dev)
583 struct vfio_platform_device *vdev;
585 vdev = vfio_del_group_dev(dev);
588 vfio_platform_put_reset(vdev);
589 iommu_group_put(dev->iommu_group);
594 EXPORT_SYMBOL_GPL(vfio_platform_remove_common);
596 void __vfio_platform_register_reset(struct vfio_platform_reset_node *node)
598 mutex_lock(&driver_lock);
599 list_add(&node->link, &reset_list);
600 mutex_unlock(&driver_lock);
602 EXPORT_SYMBOL_GPL(__vfio_platform_register_reset);
604 void vfio_platform_unregister_reset(const char *compat,
605 vfio_platform_reset_fn_t fn)
607 struct vfio_platform_reset_node *iter, *temp;
609 mutex_lock(&driver_lock);
610 list_for_each_entry_safe(iter, temp, &reset_list, link) {
611 if (!strcmp(iter->compat, compat) && (iter->reset == fn)) {
612 list_del(&iter->link);
617 mutex_unlock(&driver_lock);
620 EXPORT_SYMBOL_GPL(vfio_platform_unregister_reset);
622 MODULE_VERSION(DRIVER_VERSION);
623 MODULE_LICENSE("GPL v2");
624 MODULE_AUTHOR(DRIVER_AUTHOR);
625 MODULE_DESCRIPTION(DRIVER_DESC);