2 * VFIO: IOMMU DMA mapping support for Type1 IOMMU
4 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Derived from original vfio:
12 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
15 * We arbitrarily define a Type1 IOMMU as one matching the below code.
16 * It could be called the x86 IOMMU as it's designed for AMD-Vi & Intel
17 * VT-d, but that makes it harder to re-use as theoretically anyone
18 * implementing a similar IOMMU could make use of this. We expect the
19 * IOMMU to support the IOMMU API and have few to no restrictions around
20 * the IOVA range that can be mapped. The Type1 IOMMU is currently
21 * optimized for relatively static mappings of a userspace process with
22 * userpsace pages pinned into memory. We also assume devices and IOMMU
23 * domains are PCI based as the IOMMU API is still centered around a
24 * device/bus interface rather than a group interface.
27 #include <linux/compat.h>
28 #include <linux/device.h>
30 #include <linux/iommu.h>
31 #include <linux/module.h>
33 #include <linux/pci.h> /* pci_bus_type */
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/uaccess.h>
37 #include <linux/vfio.h>
38 #include <linux/workqueue.h>
40 #define DRIVER_VERSION "0.2"
42 #define DRIVER_DESC "Type1 IOMMU driver for VFIO"
44 static bool allow_unsafe_interrupts;
45 module_param_named(allow_unsafe_interrupts,
46 allow_unsafe_interrupts, bool, S_IRUGO | S_IWUSR);
47 MODULE_PARM_DESC(allow_unsafe_interrupts,
48 "Enable VFIO IOMMU support for on platforms without interrupt remapping support.");
51 struct iommu_domain *domain;
53 struct list_head dma_list;
54 struct list_head group_list;
59 struct list_head next;
60 dma_addr_t iova; /* Device address */
61 unsigned long vaddr; /* Process virtual addr */
62 long npage; /* Number of pages */
63 int prot; /* IOMMU_READ/WRITE */
67 struct iommu_group *iommu_group;
68 struct list_head next;
72 * This code handles mapping and unmapping of user data buffers
73 * into DMA'ble space using the IOMMU
76 #define NPAGE_TO_SIZE(npage) ((size_t)(npage) << PAGE_SHIFT)
81 struct work_struct work;
84 /* delayed decrement/increment for locked_vm */
85 static void vfio_lock_acct_bg(struct work_struct *work)
87 struct vwork *vwork = container_of(work, struct vwork, work);
91 down_write(&mm->mmap_sem);
92 mm->locked_vm += vwork->npage;
93 up_write(&mm->mmap_sem);
98 static void vfio_lock_acct(long npage)
101 struct mm_struct *mm;
104 return; /* process exited */
106 if (down_write_trylock(¤t->mm->mmap_sem)) {
107 current->mm->locked_vm += npage;
108 up_write(¤t->mm->mmap_sem);
113 * Couldn't get mmap_sem lock, so must setup to update
114 * mm->locked_vm later. If locked_vm were atomic, we
115 * wouldn't need this silliness
117 vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
120 mm = get_task_mm(current);
125 INIT_WORK(&vwork->work, vfio_lock_acct_bg);
127 vwork->npage = npage;
128 schedule_work(&vwork->work);
132 * Some mappings aren't backed by a struct page, for example an mmap'd
133 * MMIO range for our own or another device. These use a different
134 * pfn conversion and shouldn't be tracked as locked pages.
136 static bool is_invalid_reserved_pfn(unsigned long pfn)
138 if (pfn_valid(pfn)) {
140 struct page *tail = pfn_to_page(pfn);
141 struct page *head = compound_trans_head(tail);
142 reserved = !!(PageReserved(head));
145 * "head" is not a dangling pointer
146 * (compound_trans_head takes care of that)
147 * but the hugepage may have been split
148 * from under us (and we may not hold a
149 * reference count on the head page so it can
150 * be reused before we run PageReferenced), so
151 * we've to check PageTail before returning
158 return PageReserved(tail);
164 static int put_pfn(unsigned long pfn, int prot)
166 if (!is_invalid_reserved_pfn(pfn)) {
167 struct page *page = pfn_to_page(pfn);
168 if (prot & IOMMU_WRITE)
176 /* Unmap DMA region */
177 static long __vfio_dma_do_unmap(struct vfio_iommu *iommu, dma_addr_t iova,
178 long npage, int prot)
180 long i, unlocked = 0;
182 for (i = 0; i < npage; i++, iova += PAGE_SIZE) {
185 pfn = iommu_iova_to_phys(iommu->domain, iova) >> PAGE_SHIFT;
187 iommu_unmap(iommu->domain, iova, PAGE_SIZE);
188 unlocked += put_pfn(pfn, prot);
194 static void vfio_dma_unmap(struct vfio_iommu *iommu, dma_addr_t iova,
195 long npage, int prot)
199 unlocked = __vfio_dma_do_unmap(iommu, iova, npage, prot);
200 vfio_lock_acct(-unlocked);
203 static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn)
205 struct page *page[1];
206 struct vm_area_struct *vma;
209 if (get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE), page) == 1) {
210 *pfn = page_to_pfn(page[0]);
214 down_read(¤t->mm->mmap_sem);
216 vma = find_vma_intersection(current->mm, vaddr, vaddr + 1);
218 if (vma && vma->vm_flags & VM_PFNMAP) {
219 *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
220 if (is_invalid_reserved_pfn(*pfn))
224 up_read(¤t->mm->mmap_sem);
230 static int __vfio_dma_map(struct vfio_iommu *iommu, dma_addr_t iova,
231 unsigned long vaddr, long npage, int prot)
233 dma_addr_t start = iova;
237 /* Verify that pages are not already mapped */
238 for (i = 0; i < npage; i++, iova += PAGE_SIZE)
239 if (iommu_iova_to_phys(iommu->domain, iova))
248 * XXX We break mappings into pages and use get_user_pages_fast to
249 * pin the pages in memory. It's been suggested that mlock might
250 * provide a more efficient mechanism, but nothing prevents the
251 * user from munlocking the pages, which could then allow the user
252 * access to random host memory. We also have no guarantee from the
253 * IOMMU API that the iommu driver can unmap sub-pages of previous
254 * mappings. This means we might lose an entire range if a single
255 * page within it is unmapped. Single page mappings are inefficient,
256 * but provide the most flexibility for now.
258 for (i = 0; i < npage; i++, iova += PAGE_SIZE, vaddr += PAGE_SIZE) {
259 unsigned long pfn = 0;
261 ret = vaddr_get_pfn(vaddr, prot, &pfn);
263 __vfio_dma_do_unmap(iommu, start, i, prot);
268 * Only add actual locked pages to accounting
269 * XXX We're effectively marking a page locked for every
270 * IOVA page even though it's possible the user could be
271 * backing multiple IOVAs with the same vaddr. This over-
272 * penalizes the user process, but we currently have no
273 * easy way to do this properly.
275 if (!is_invalid_reserved_pfn(pfn))
278 ret = iommu_map(iommu->domain, iova,
279 (phys_addr_t)pfn << PAGE_SHIFT,
282 /* Back out mappings on error */
284 __vfio_dma_do_unmap(iommu, start, i, prot);
288 vfio_lock_acct(locked);
292 static inline bool ranges_overlap(dma_addr_t start1, size_t size1,
293 dma_addr_t start2, size_t size2)
296 return (start2 - start1 < size1);
297 else if (start2 < start1)
298 return (start1 - start2 < size2);
299 return (size1 > 0 && size2 > 0);
302 static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,
303 dma_addr_t start, size_t size)
305 struct vfio_dma *dma;
307 list_for_each_entry(dma, &iommu->dma_list, next) {
308 if (ranges_overlap(dma->iova, NPAGE_TO_SIZE(dma->npage),
315 static long vfio_remove_dma_overlap(struct vfio_iommu *iommu, dma_addr_t start,
316 size_t size, struct vfio_dma *dma)
318 struct vfio_dma *split;
319 long npage_lo, npage_hi;
321 /* Existing dma region is completely covered, unmap all */
322 if (start <= dma->iova &&
323 start + size >= dma->iova + NPAGE_TO_SIZE(dma->npage)) {
324 vfio_dma_unmap(iommu, dma->iova, dma->npage, dma->prot);
325 list_del(&dma->next);
326 npage_lo = dma->npage;
331 /* Overlap low address of existing range */
332 if (start <= dma->iova) {
335 overlap = start + size - dma->iova;
336 npage_lo = overlap >> PAGE_SHIFT;
338 vfio_dma_unmap(iommu, dma->iova, npage_lo, dma->prot);
339 dma->iova += overlap;
340 dma->vaddr += overlap;
341 dma->npage -= npage_lo;
345 /* Overlap high address of existing range */
346 if (start + size >= dma->iova + NPAGE_TO_SIZE(dma->npage)) {
349 overlap = dma->iova + NPAGE_TO_SIZE(dma->npage) - start;
350 npage_hi = overlap >> PAGE_SHIFT;
352 vfio_dma_unmap(iommu, start, npage_hi, dma->prot);
353 dma->npage -= npage_hi;
358 npage_lo = (start - dma->iova) >> PAGE_SHIFT;
359 npage_hi = dma->npage - (size >> PAGE_SHIFT) - npage_lo;
361 split = kzalloc(sizeof *split, GFP_KERNEL);
365 vfio_dma_unmap(iommu, start, size >> PAGE_SHIFT, dma->prot);
367 dma->npage = npage_lo;
369 split->npage = npage_hi;
370 split->iova = start + size;
371 split->vaddr = dma->vaddr + NPAGE_TO_SIZE(npage_lo) + size;
372 split->prot = dma->prot;
373 list_add(&split->next, &iommu->dma_list);
374 return size >> PAGE_SHIFT;
377 static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
378 struct vfio_iommu_type1_dma_unmap *unmap)
380 long ret = 0, npage = unmap->size >> PAGE_SHIFT;
381 struct vfio_dma *dma, *tmp;
384 mask = ((uint64_t)1 << __ffs(iommu->domain->ops->pgsize_bitmap)) - 1;
386 if (unmap->iova & mask)
388 if (unmap->size & mask)
391 /* XXX We still break these down into PAGE_SIZE */
392 WARN_ON(mask & PAGE_MASK);
394 mutex_lock(&iommu->lock);
396 list_for_each_entry_safe(dma, tmp, &iommu->dma_list, next) {
397 if (ranges_overlap(dma->iova, NPAGE_TO_SIZE(dma->npage),
398 unmap->iova, unmap->size)) {
399 ret = vfio_remove_dma_overlap(iommu, unmap->iova,
403 if (ret < 0 || npage == 0)
407 mutex_unlock(&iommu->lock);
408 return ret > 0 ? 0 : (int)ret;
411 static int vfio_dma_do_map(struct vfio_iommu *iommu,
412 struct vfio_iommu_type1_dma_map *map)
414 struct vfio_dma *dma, *pdma = NULL;
415 dma_addr_t iova = map->iova;
416 unsigned long locked, lock_limit, vaddr = map->vaddr;
417 size_t size = map->size;
418 int ret = 0, prot = 0;
422 mask = ((uint64_t)1 << __ffs(iommu->domain->ops->pgsize_bitmap)) - 1;
424 /* READ/WRITE from device perspective */
425 if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)
427 if (map->flags & VFIO_DMA_MAP_FLAG_READ)
431 return -EINVAL; /* No READ/WRITE? */
440 /* XXX We still break these down into PAGE_SIZE */
441 WARN_ON(mask & PAGE_MASK);
443 /* Don't allow IOVA wrap */
444 if (iova + size && iova + size < iova)
447 /* Don't allow virtual address wrap */
448 if (vaddr + size && vaddr + size < vaddr)
451 npage = size >> PAGE_SHIFT;
455 mutex_lock(&iommu->lock);
457 if (vfio_find_dma(iommu, iova, size)) {
462 /* account for locked pages */
463 locked = current->mm->locked_vm + npage;
464 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
465 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
466 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
467 __func__, rlimit(RLIMIT_MEMLOCK));
472 ret = __vfio_dma_map(iommu, iova, vaddr, npage, prot);
476 /* Check if we abut a region below - nothing below 0 */
478 dma = vfio_find_dma(iommu, iova - 1, 1);
479 if (dma && dma->prot == prot &&
480 dma->vaddr + NPAGE_TO_SIZE(dma->npage) == vaddr) {
486 size = NPAGE_TO_SIZE(npage);
492 /* Check if we abut a region above - nothing above ~0 + 1 */
494 dma = vfio_find_dma(iommu, iova + size, 1);
495 if (dma && dma->prot == prot &&
496 dma->vaddr == vaddr + size) {
503 * If merged above and below, remove previously
504 * merged entry. New entry covers it.
507 list_del(&pdma->next);
514 /* Isolated, new region */
516 dma = kzalloc(sizeof *dma, GFP_KERNEL);
519 vfio_dma_unmap(iommu, iova, npage, prot);
527 list_add(&dma->next, &iommu->dma_list);
531 mutex_unlock(&iommu->lock);
535 static int vfio_iommu_type1_attach_group(void *iommu_data,
536 struct iommu_group *iommu_group)
538 struct vfio_iommu *iommu = iommu_data;
539 struct vfio_group *group, *tmp;
542 group = kzalloc(sizeof(*group), GFP_KERNEL);
546 mutex_lock(&iommu->lock);
548 list_for_each_entry(tmp, &iommu->group_list, next) {
549 if (tmp->iommu_group == iommu_group) {
550 mutex_unlock(&iommu->lock);
557 * TODO: Domain have capabilities that might change as we add
558 * groups (see iommu->cache, currently never set). Check for
559 * them and potentially disallow groups to be attached when it
560 * would change capabilities (ugh).
562 ret = iommu_attach_group(iommu->domain, iommu_group);
564 mutex_unlock(&iommu->lock);
569 group->iommu_group = iommu_group;
570 list_add(&group->next, &iommu->group_list);
572 mutex_unlock(&iommu->lock);
577 static void vfio_iommu_type1_detach_group(void *iommu_data,
578 struct iommu_group *iommu_group)
580 struct vfio_iommu *iommu = iommu_data;
581 struct vfio_group *group;
583 mutex_lock(&iommu->lock);
585 list_for_each_entry(group, &iommu->group_list, next) {
586 if (group->iommu_group == iommu_group) {
587 iommu_detach_group(iommu->domain, iommu_group);
588 list_del(&group->next);
594 mutex_unlock(&iommu->lock);
597 static void *vfio_iommu_type1_open(unsigned long arg)
599 struct vfio_iommu *iommu;
601 if (arg != VFIO_TYPE1_IOMMU)
602 return ERR_PTR(-EINVAL);
604 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
606 return ERR_PTR(-ENOMEM);
608 INIT_LIST_HEAD(&iommu->group_list);
609 INIT_LIST_HEAD(&iommu->dma_list);
610 mutex_init(&iommu->lock);
613 * Wish we didn't have to know about bus_type here.
615 iommu->domain = iommu_domain_alloc(&pci_bus_type);
616 if (!iommu->domain) {
618 return ERR_PTR(-EIO);
622 * Wish we could specify required capabilities rather than create
623 * a domain, see what comes out and hope it doesn't change along
624 * the way. Fortunately we know interrupt remapping is global for
627 if (!allow_unsafe_interrupts &&
628 !iommu_domain_has_cap(iommu->domain, IOMMU_CAP_INTR_REMAP)) {
629 pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
631 iommu_domain_free(iommu->domain);
633 return ERR_PTR(-EPERM);
639 static void vfio_iommu_type1_release(void *iommu_data)
641 struct vfio_iommu *iommu = iommu_data;
642 struct vfio_group *group, *group_tmp;
643 struct vfio_dma *dma, *dma_tmp;
645 list_for_each_entry_safe(group, group_tmp, &iommu->group_list, next) {
646 iommu_detach_group(iommu->domain, group->iommu_group);
647 list_del(&group->next);
651 list_for_each_entry_safe(dma, dma_tmp, &iommu->dma_list, next) {
652 vfio_dma_unmap(iommu, dma->iova, dma->npage, dma->prot);
653 list_del(&dma->next);
657 iommu_domain_free(iommu->domain);
658 iommu->domain = NULL;
662 static long vfio_iommu_type1_ioctl(void *iommu_data,
663 unsigned int cmd, unsigned long arg)
665 struct vfio_iommu *iommu = iommu_data;
668 if (cmd == VFIO_CHECK_EXTENSION) {
670 case VFIO_TYPE1_IOMMU:
675 } else if (cmd == VFIO_IOMMU_GET_INFO) {
676 struct vfio_iommu_type1_info info;
678 minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
680 if (copy_from_user(&info, (void __user *)arg, minsz))
683 if (info.argsz < minsz)
688 info.iova_pgsizes = iommu->domain->ops->pgsize_bitmap;
690 return copy_to_user((void __user *)arg, &info, minsz);
692 } else if (cmd == VFIO_IOMMU_MAP_DMA) {
693 struct vfio_iommu_type1_dma_map map;
694 uint32_t mask = VFIO_DMA_MAP_FLAG_READ |
695 VFIO_DMA_MAP_FLAG_WRITE;
697 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
699 if (copy_from_user(&map, (void __user *)arg, minsz))
702 if (map.argsz < minsz || map.flags & ~mask)
705 return vfio_dma_do_map(iommu, &map);
707 } else if (cmd == VFIO_IOMMU_UNMAP_DMA) {
708 struct vfio_iommu_type1_dma_unmap unmap;
710 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
712 if (copy_from_user(&unmap, (void __user *)arg, minsz))
715 if (unmap.argsz < minsz || unmap.flags)
718 return vfio_dma_do_unmap(iommu, &unmap);
724 static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
725 .name = "vfio-iommu-type1",
726 .owner = THIS_MODULE,
727 .open = vfio_iommu_type1_open,
728 .release = vfio_iommu_type1_release,
729 .ioctl = vfio_iommu_type1_ioctl,
730 .attach_group = vfio_iommu_type1_attach_group,
731 .detach_group = vfio_iommu_type1_detach_group,
734 static int __init vfio_iommu_type1_init(void)
736 if (!iommu_present(&pci_bus_type))
739 return vfio_register_iommu_driver(&vfio_iommu_driver_ops_type1);
742 static void __exit vfio_iommu_type1_cleanup(void)
744 vfio_unregister_iommu_driver(&vfio_iommu_driver_ops_type1);
747 module_init(vfio_iommu_type1_init);
748 module_exit(vfio_iommu_type1_cleanup);
750 MODULE_VERSION(DRIVER_VERSION);
751 MODULE_LICENSE("GPL v2");
752 MODULE_AUTHOR(DRIVER_AUTHOR);
753 MODULE_DESCRIPTION(DRIVER_DESC);