1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
6 #include <linux/vfio.h>
7 #include <linux/cdx/cdx_bus.h>
11 static int vfio_cdx_open_device(struct vfio_device *core_vdev)
13 struct vfio_cdx_device *vdev =
14 container_of(core_vdev, struct vfio_cdx_device, vdev);
15 struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev);
16 int count = cdx_dev->res_count;
19 vdev->regions = kcalloc(count, sizeof(struct vfio_cdx_region),
24 for (i = 0; i < count; i++) {
25 struct resource *res = &cdx_dev->res[i];
27 vdev->regions[i].addr = res->start;
28 vdev->regions[i].size = resource_size(res);
29 vdev->regions[i].type = res->flags;
31 * Only regions addressed with PAGE granularity may be
34 if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
35 !(vdev->regions[i].size & ~PAGE_MASK))
36 vdev->regions[i].flags |=
37 VFIO_REGION_INFO_FLAG_MMAP;
38 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
39 if (!(cdx_dev->res[i].flags & IORESOURCE_READONLY))
40 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE;
46 static void vfio_cdx_close_device(struct vfio_device *core_vdev)
48 struct vfio_cdx_device *vdev =
49 container_of(core_vdev, struct vfio_cdx_device, vdev);
52 cdx_dev_reset(core_vdev->dev);
55 static int vfio_cdx_ioctl_get_info(struct vfio_cdx_device *vdev,
56 struct vfio_device_info __user *arg)
58 unsigned long minsz = offsetofend(struct vfio_device_info, num_irqs);
59 struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
60 struct vfio_device_info info;
62 if (copy_from_user(&info, arg, minsz))
65 if (info.argsz < minsz)
68 info.flags = VFIO_DEVICE_FLAGS_CDX;
69 info.flags |= VFIO_DEVICE_FLAGS_RESET;
71 info.num_regions = cdx_dev->res_count;
74 return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
77 static int vfio_cdx_ioctl_get_region_info(struct vfio_cdx_device *vdev,
78 struct vfio_region_info __user *arg)
80 unsigned long minsz = offsetofend(struct vfio_region_info, offset);
81 struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
82 struct vfio_region_info info;
84 if (copy_from_user(&info, arg, minsz))
87 if (info.argsz < minsz)
90 if (info.index >= cdx_dev->res_count)
93 /* map offset to the physical address */
94 info.offset = vfio_cdx_index_to_offset(info.index);
95 info.size = vdev->regions[info.index].size;
96 info.flags = vdev->regions[info.index].flags;
98 return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
101 static long vfio_cdx_ioctl(struct vfio_device *core_vdev,
102 unsigned int cmd, unsigned long arg)
104 struct vfio_cdx_device *vdev =
105 container_of(core_vdev, struct vfio_cdx_device, vdev);
106 void __user *uarg = (void __user *)arg;
109 case VFIO_DEVICE_GET_INFO:
110 return vfio_cdx_ioctl_get_info(vdev, uarg);
111 case VFIO_DEVICE_GET_REGION_INFO:
112 return vfio_cdx_ioctl_get_region_info(vdev, uarg);
113 case VFIO_DEVICE_RESET:
114 return cdx_dev_reset(core_vdev->dev);
120 static int vfio_cdx_mmap_mmio(struct vfio_cdx_region region,
121 struct vm_area_struct *vma)
123 u64 size = vma->vm_end - vma->vm_start;
126 pgoff = vma->vm_pgoff &
127 ((1U << (VFIO_CDX_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
128 base = pgoff << PAGE_SHIFT;
130 if (base + size > region.size)
133 vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
134 vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
136 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
137 size, vma->vm_page_prot);
140 static int vfio_cdx_mmap(struct vfio_device *core_vdev,
141 struct vm_area_struct *vma)
143 struct vfio_cdx_device *vdev =
144 container_of(core_vdev, struct vfio_cdx_device, vdev);
145 struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev);
148 index = vma->vm_pgoff >> (VFIO_CDX_OFFSET_SHIFT - PAGE_SHIFT);
150 if (index >= cdx_dev->res_count)
153 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
156 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ) &&
157 (vma->vm_flags & VM_READ))
160 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE) &&
161 (vma->vm_flags & VM_WRITE))
164 return vfio_cdx_mmap_mmio(vdev->regions[index], vma);
167 static const struct vfio_device_ops vfio_cdx_ops = {
169 .open_device = vfio_cdx_open_device,
170 .close_device = vfio_cdx_close_device,
171 .ioctl = vfio_cdx_ioctl,
172 .mmap = vfio_cdx_mmap,
173 .bind_iommufd = vfio_iommufd_physical_bind,
174 .unbind_iommufd = vfio_iommufd_physical_unbind,
175 .attach_ioas = vfio_iommufd_physical_attach_ioas,
178 static int vfio_cdx_probe(struct cdx_device *cdx_dev)
180 struct vfio_cdx_device *vdev;
181 struct device *dev = &cdx_dev->dev;
184 vdev = vfio_alloc_device(vfio_cdx_device, vdev, dev,
187 return PTR_ERR(vdev);
189 ret = vfio_register_group_dev(&vdev->vdev);
193 dev_set_drvdata(dev, vdev);
197 vfio_put_device(&vdev->vdev);
201 static int vfio_cdx_remove(struct cdx_device *cdx_dev)
203 struct device *dev = &cdx_dev->dev;
204 struct vfio_cdx_device *vdev = dev_get_drvdata(dev);
206 vfio_unregister_group_dev(&vdev->vdev);
207 vfio_put_device(&vdev->vdev);
212 static const struct cdx_device_id vfio_cdx_table[] = {
213 { CDX_DEVICE_DRIVER_OVERRIDE(CDX_ANY_ID, CDX_ANY_ID,
214 CDX_ID_F_VFIO_DRIVER_OVERRIDE) }, /* match all by default */
218 MODULE_DEVICE_TABLE(cdx, vfio_cdx_table);
220 static struct cdx_driver vfio_cdx_driver = {
221 .probe = vfio_cdx_probe,
222 .remove = vfio_cdx_remove,
223 .match_id_table = vfio_cdx_table,
226 .owner = THIS_MODULE,
228 .driver_managed_dma = true,
231 module_driver(vfio_cdx_driver, cdx_driver_register, cdx_driver_unregister);
233 MODULE_LICENSE("GPL");
234 MODULE_DESCRIPTION("VFIO for CDX devices - User Level meta-driver");