2 * VFIO: IOMMU DMA mapping support for TCE on POWER
4 * Copyright (C) 2013 IBM Corp. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Derived from original vfio_iommu_type1.c:
12 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <linux/err.h>
21 #include <linux/vfio.h>
22 #include <linux/vmalloc.h>
23 #include <linux/sched/mm.h>
24 #include <linux/sched/signal.h>
26 #include <asm/iommu.h>
28 #include <asm/mmu_context.h>
30 #define DRIVER_VERSION "0.1"
32 #define DRIVER_DESC "VFIO IOMMU SPAPR TCE"
34 static void tce_iommu_detach_group(void *iommu_data,
35 struct iommu_group *iommu_group);
37 static long try_increment_locked_vm(struct mm_struct *mm, long npages)
39 long ret = 0, locked, lock_limit;
41 if (WARN_ON_ONCE(!mm))
47 down_write(&mm->mmap_sem);
48 locked = mm->locked_vm + npages;
49 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
50 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
53 mm->locked_vm += npages;
55 pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
57 mm->locked_vm << PAGE_SHIFT,
58 rlimit(RLIMIT_MEMLOCK),
59 ret ? " - exceeded" : "");
61 up_write(&mm->mmap_sem);
66 static void decrement_locked_vm(struct mm_struct *mm, long npages)
71 down_write(&mm->mmap_sem);
72 if (WARN_ON_ONCE(npages > mm->locked_vm))
73 npages = mm->locked_vm;
74 mm->locked_vm -= npages;
75 pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
77 mm->locked_vm << PAGE_SHIFT,
78 rlimit(RLIMIT_MEMLOCK));
79 up_write(&mm->mmap_sem);
83 * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
85 * This code handles mapping and unmapping of user data buffers
86 * into DMA'ble space using the IOMMU
89 struct tce_iommu_group {
90 struct list_head next;
91 struct iommu_group *grp;
95 * A container needs to remember which preregistered region it has
96 * referenced to do proper cleanup at the userspace process exit.
98 struct tce_iommu_prereg {
99 struct list_head next;
100 struct mm_iommu_table_group_mem_t *mem;
104 * The container descriptor supports only a single group per container.
105 * Required by the API as the container is not supplied with the IOMMU group
106 * at the moment of initialization.
108 struct tce_container {
112 bool def_window_pending;
113 unsigned long locked_pages;
114 struct mm_struct *mm;
115 struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
116 struct list_head group_list;
117 struct list_head prereg_list;
120 static long tce_iommu_mm_set(struct tce_container *container)
123 if (container->mm == current->mm)
127 BUG_ON(!current->mm);
128 container->mm = current->mm;
129 atomic_inc(&container->mm->mm_count);
134 static long tce_iommu_prereg_free(struct tce_container *container,
135 struct tce_iommu_prereg *tcemem)
139 ret = mm_iommu_put(container->mm, tcemem->mem);
143 list_del(&tcemem->next);
149 static long tce_iommu_unregister_pages(struct tce_container *container,
150 __u64 vaddr, __u64 size)
152 struct mm_iommu_table_group_mem_t *mem;
153 struct tce_iommu_prereg *tcemem;
157 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
160 mem = mm_iommu_get(container->mm, vaddr, size >> PAGE_SHIFT);
164 list_for_each_entry(tcemem, &container->prereg_list, next) {
165 if (tcemem->mem == mem) {
174 ret = tce_iommu_prereg_free(container, tcemem);
176 mm_iommu_put(container->mm, mem);
181 static long tce_iommu_register_pages(struct tce_container *container,
182 __u64 vaddr, __u64 size)
185 struct mm_iommu_table_group_mem_t *mem = NULL;
186 struct tce_iommu_prereg *tcemem;
187 unsigned long entries = size >> PAGE_SHIFT;
189 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
190 ((vaddr + size) < vaddr))
193 mem = mm_iommu_get(container->mm, vaddr, entries);
195 list_for_each_entry(tcemem, &container->prereg_list, next) {
196 if (tcemem->mem == mem) {
202 ret = mm_iommu_new(container->mm, vaddr, entries, &mem);
207 tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
214 list_add(&tcemem->next, &container->prereg_list);
216 container->enabled = true;
221 mm_iommu_put(container->mm, mem);
225 static bool tce_page_is_contained(struct mm_struct *mm, unsigned long hpa,
226 unsigned int page_shift)
229 unsigned long size = 0;
231 if (mm_iommu_is_devmem(mm, hpa, page_shift, &size))
232 return size == (1UL << page_shift);
234 page = pfn_to_page(hpa >> PAGE_SHIFT);
236 * Check that the TCE table granularity is not bigger than the size of
237 * a page we just found. Otherwise the hardware can get access to
238 * a bigger memory chunk that it should.
240 return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift;
243 static inline bool tce_groups_attached(struct tce_container *container)
245 return !list_empty(&container->group_list);
248 static long tce_iommu_find_table(struct tce_container *container,
249 phys_addr_t ioba, struct iommu_table **ptbl)
253 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
254 struct iommu_table *tbl = container->tables[i];
257 unsigned long entry = ioba >> tbl->it_page_shift;
258 unsigned long start = tbl->it_offset;
259 unsigned long end = start + tbl->it_size;
261 if ((start <= entry) && (entry < end)) {
271 static int tce_iommu_find_free_table(struct tce_container *container)
275 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
276 if (!container->tables[i])
283 static int tce_iommu_enable(struct tce_container *container)
286 unsigned long locked;
287 struct iommu_table_group *table_group;
288 struct tce_iommu_group *tcegrp;
290 if (container->enabled)
294 * When userspace pages are mapped into the IOMMU, they are effectively
295 * locked memory, so, theoretically, we need to update the accounting
296 * of locked pages on each map and unmap. For powerpc, the map unmap
297 * paths can be very hot, though, and the accounting would kill
298 * performance, especially since it would be difficult to impossible
299 * to handle the accounting in real mode only.
301 * To address that, rather than precisely accounting every page, we
302 * instead account for a worst case on locked memory when the iommu is
303 * enabled and disabled. The worst case upper bound on locked memory
304 * is the size of the whole iommu window, which is usually relatively
305 * small (compared to total memory sizes) on POWER hardware.
307 * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
308 * that would effectively kill the guest at random points, much better
309 * enforcing the limit based on the max that the guest can map.
311 * Unfortunately at the moment it counts whole tables, no matter how
312 * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
313 * each with 2GB DMA window, 8GB will be counted here. The reason for
314 * this is that we cannot tell here the amount of RAM used by the guest
315 * as this information is only available from KVM and VFIO is
318 * So we do not allow enabling a container without a group attached
319 * as there is no way to know how much we should increment
320 * the locked_vm counter.
322 if (!tce_groups_attached(container))
325 tcegrp = list_first_entry(&container->group_list,
326 struct tce_iommu_group, next);
327 table_group = iommu_group_get_iommudata(tcegrp->grp);
331 if (!table_group->tce32_size)
334 ret = tce_iommu_mm_set(container);
338 locked = table_group->tce32_size >> PAGE_SHIFT;
339 ret = try_increment_locked_vm(container->mm, locked);
343 container->locked_pages = locked;
345 container->enabled = true;
350 static void tce_iommu_disable(struct tce_container *container)
352 if (!container->enabled)
355 container->enabled = false;
357 BUG_ON(!container->mm);
358 decrement_locked_vm(container->mm, container->locked_pages);
361 static void *tce_iommu_open(unsigned long arg)
363 struct tce_container *container;
365 if ((arg != VFIO_SPAPR_TCE_IOMMU) && (arg != VFIO_SPAPR_TCE_v2_IOMMU)) {
366 pr_err("tce_vfio: Wrong IOMMU type\n");
367 return ERR_PTR(-EINVAL);
370 container = kzalloc(sizeof(*container), GFP_KERNEL);
372 return ERR_PTR(-ENOMEM);
374 mutex_init(&container->lock);
375 INIT_LIST_HEAD_RCU(&container->group_list);
376 INIT_LIST_HEAD_RCU(&container->prereg_list);
378 container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
383 static int tce_iommu_clear(struct tce_container *container,
384 struct iommu_table *tbl,
385 unsigned long entry, unsigned long pages);
386 static void tce_iommu_free_table(struct tce_container *container,
387 struct iommu_table *tbl);
389 static void tce_iommu_release(void *iommu_data)
391 struct tce_container *container = iommu_data;
392 struct tce_iommu_group *tcegrp;
393 struct tce_iommu_prereg *tcemem, *tmtmp;
396 while (tce_groups_attached(container)) {
397 tcegrp = list_first_entry(&container->group_list,
398 struct tce_iommu_group, next);
399 tce_iommu_detach_group(iommu_data, tcegrp->grp);
403 * If VFIO created a table, it was not disposed
404 * by tce_iommu_detach_group() so do it now.
406 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
407 struct iommu_table *tbl = container->tables[i];
412 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
413 tce_iommu_free_table(container, tbl);
416 list_for_each_entry_safe(tcemem, tmtmp, &container->prereg_list, next)
417 WARN_ON(tce_iommu_prereg_free(container, tcemem));
419 tce_iommu_disable(container);
421 mmdrop(container->mm);
422 mutex_destroy(&container->lock);
427 static void tce_iommu_unuse_page(struct tce_container *container,
432 page = pfn_to_page(hpa >> PAGE_SHIFT);
436 static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
437 unsigned long tce, unsigned long shift,
438 unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
441 struct mm_iommu_table_group_mem_t *mem;
443 mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift);
447 ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa);
456 static void tce_iommu_unuse_page_v2(struct tce_container *container,
457 struct iommu_table *tbl, unsigned long entry)
459 struct mm_iommu_table_group_mem_t *mem = NULL;
461 unsigned long hpa = 0;
462 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
467 ret = tce_iommu_prereg_ua_to_hpa(container, be64_to_cpu(*pua),
468 tbl->it_page_shift, &hpa, &mem);
470 pr_debug("%s: tce %llx at #%lx was not cached, ret=%d\n",
471 __func__, be64_to_cpu(*pua), entry, ret);
473 mm_iommu_mapped_dec(mem);
475 *pua = cpu_to_be64(0);
478 static int tce_iommu_clear(struct tce_container *container,
479 struct iommu_table *tbl,
480 unsigned long entry, unsigned long pages)
482 unsigned long oldhpa;
484 enum dma_data_direction direction;
485 unsigned long lastentry = entry + pages;
487 for ( ; entry < lastentry; ++entry) {
488 if (tbl->it_indirect_levels && tbl->it_userspace) {
490 * For multilevel tables, we can take a shortcut here
491 * and skip some TCEs as we know that the userspace
492 * addresses cache is a mirror of the real TCE table
493 * and if it is missing some indirect levels, then
494 * the hardware table does not have them allocated
495 * either and therefore does not require updating.
497 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl,
500 /* align to level_size which is power of two */
501 entry |= tbl->it_level_size - 1;
508 direction = DMA_NONE;
510 ret = iommu_tce_xchg(container->mm, tbl, entry, &oldhpa,
515 if (direction == DMA_NONE)
519 tce_iommu_unuse_page_v2(container, tbl, entry);
523 tce_iommu_unuse_page(container, oldhpa);
529 static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
531 struct page *page = NULL;
532 enum dma_data_direction direction = iommu_tce_direction(tce);
534 if (get_user_pages_fast(tce & PAGE_MASK, 1,
535 direction != DMA_TO_DEVICE ? FOLL_WRITE : 0,
539 *hpa = __pa((unsigned long) page_address(page));
544 static long tce_iommu_build(struct tce_container *container,
545 struct iommu_table *tbl,
546 unsigned long entry, unsigned long tce, unsigned long pages,
547 enum dma_data_direction direction)
551 enum dma_data_direction dirtmp;
553 for (i = 0; i < pages; ++i) {
554 unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
556 ret = tce_iommu_use_page(tce, &hpa);
560 if (!tce_page_is_contained(container->mm, hpa,
561 tbl->it_page_shift)) {
568 ret = iommu_tce_xchg(container->mm, tbl, entry + i, &hpa,
571 tce_iommu_unuse_page(container, hpa);
572 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
573 __func__, entry << tbl->it_page_shift,
578 if (dirtmp != DMA_NONE)
579 tce_iommu_unuse_page(container, hpa);
581 tce += IOMMU_PAGE_SIZE(tbl);
585 tce_iommu_clear(container, tbl, entry, i);
590 static long tce_iommu_build_v2(struct tce_container *container,
591 struct iommu_table *tbl,
592 unsigned long entry, unsigned long tce, unsigned long pages,
593 enum dma_data_direction direction)
597 enum dma_data_direction dirtmp;
599 for (i = 0; i < pages; ++i) {
600 struct mm_iommu_table_group_mem_t *mem = NULL;
601 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry + i);
603 ret = tce_iommu_prereg_ua_to_hpa(container,
604 tce, tbl->it_page_shift, &hpa, &mem);
608 if (!tce_page_is_contained(container->mm, hpa,
609 tbl->it_page_shift)) {
614 /* Preserve offset within IOMMU page */
615 hpa |= tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
618 /* The registered region is being unregistered */
619 if (mm_iommu_mapped_inc(mem))
622 ret = iommu_tce_xchg(container->mm, tbl, entry + i, &hpa,
625 /* dirtmp cannot be DMA_NONE here */
626 tce_iommu_unuse_page_v2(container, tbl, entry + i);
627 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
628 __func__, entry << tbl->it_page_shift,
633 if (dirtmp != DMA_NONE)
634 tce_iommu_unuse_page_v2(container, tbl, entry + i);
636 *pua = cpu_to_be64(tce);
638 tce += IOMMU_PAGE_SIZE(tbl);
642 tce_iommu_clear(container, tbl, entry, i);
647 static long tce_iommu_create_table(struct tce_container *container,
648 struct iommu_table_group *table_group,
653 struct iommu_table **ptbl)
655 long ret, table_size;
657 table_size = table_group->ops->get_table_size(page_shift, window_size,
662 ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT);
666 ret = table_group->ops->create_table(table_group, num,
667 page_shift, window_size, levels, ptbl);
669 WARN_ON(!ret && !(*ptbl)->it_ops->free);
670 WARN_ON(!ret && ((*ptbl)->it_allocated_size > table_size));
675 static void tce_iommu_free_table(struct tce_container *container,
676 struct iommu_table *tbl)
678 unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
680 iommu_tce_table_put(tbl);
681 decrement_locked_vm(container->mm, pages);
684 static long tce_iommu_create_window(struct tce_container *container,
685 __u32 page_shift, __u64 window_size, __u32 levels,
688 struct tce_iommu_group *tcegrp;
689 struct iommu_table_group *table_group;
690 struct iommu_table *tbl = NULL;
693 num = tce_iommu_find_free_table(container);
697 /* Get the first group for ops::create_table */
698 tcegrp = list_first_entry(&container->group_list,
699 struct tce_iommu_group, next);
700 table_group = iommu_group_get_iommudata(tcegrp->grp);
704 if (!(table_group->pgsizes & (1ULL << page_shift)))
707 if (!table_group->ops->set_window || !table_group->ops->unset_window ||
708 !table_group->ops->get_table_size ||
709 !table_group->ops->create_table)
712 /* Create TCE table */
713 ret = tce_iommu_create_table(container, table_group, num,
714 page_shift, window_size, levels, &tbl);
718 BUG_ON(!tbl->it_ops->free);
721 * Program the table to every group.
722 * Groups have been tested for compatibility at the attach time.
724 list_for_each_entry(tcegrp, &container->group_list, next) {
725 table_group = iommu_group_get_iommudata(tcegrp->grp);
727 ret = table_group->ops->set_window(table_group, num, tbl);
732 container->tables[num] = tbl;
734 /* Return start address assigned by platform in create_table() */
735 *start_addr = tbl->it_offset << tbl->it_page_shift;
740 list_for_each_entry(tcegrp, &container->group_list, next) {
741 table_group = iommu_group_get_iommudata(tcegrp->grp);
742 table_group->ops->unset_window(table_group, num);
744 tce_iommu_free_table(container, tbl);
749 static long tce_iommu_remove_window(struct tce_container *container,
752 struct iommu_table_group *table_group = NULL;
753 struct iommu_table *tbl;
754 struct tce_iommu_group *tcegrp;
757 num = tce_iommu_find_table(container, start_addr, &tbl);
761 BUG_ON(!tbl->it_size);
763 /* Detach groups from IOMMUs */
764 list_for_each_entry(tcegrp, &container->group_list, next) {
765 table_group = iommu_group_get_iommudata(tcegrp->grp);
768 * SPAPR TCE IOMMU exposes the default DMA window to
769 * the guest via dma32_window_start/size of
770 * VFIO_IOMMU_SPAPR_TCE_GET_INFO. Some platforms allow
771 * the userspace to remove this window, some do not so
772 * here we check for the platform capability.
774 if (!table_group->ops || !table_group->ops->unset_window)
777 table_group->ops->unset_window(table_group, num);
781 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
782 tce_iommu_free_table(container, tbl);
783 container->tables[num] = NULL;
788 static long tce_iommu_create_default_window(struct tce_container *container)
791 __u64 start_addr = 0;
792 struct tce_iommu_group *tcegrp;
793 struct iommu_table_group *table_group;
795 if (!container->def_window_pending)
798 if (!tce_groups_attached(container))
801 tcegrp = list_first_entry(&container->group_list,
802 struct tce_iommu_group, next);
803 table_group = iommu_group_get_iommudata(tcegrp->grp);
807 ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K,
808 table_group->tce32_size, 1, &start_addr);
809 WARN_ON_ONCE(!ret && start_addr);
812 container->def_window_pending = false;
817 static long tce_iommu_ioctl(void *iommu_data,
818 unsigned int cmd, unsigned long arg)
820 struct tce_container *container = iommu_data;
821 unsigned long minsz, ddwsz;
825 case VFIO_CHECK_EXTENSION:
827 case VFIO_SPAPR_TCE_IOMMU:
828 case VFIO_SPAPR_TCE_v2_IOMMU:
832 ret = vfio_spapr_iommu_eeh_ioctl(NULL, cmd, arg);
836 return (ret < 0) ? 0 : ret;
840 * Sanity check to prevent one userspace from manipulating
841 * another userspace mm.
844 if (container->mm && container->mm != current->mm)
848 case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
849 struct vfio_iommu_spapr_tce_info info;
850 struct tce_iommu_group *tcegrp;
851 struct iommu_table_group *table_group;
853 if (!tce_groups_attached(container))
856 tcegrp = list_first_entry(&container->group_list,
857 struct tce_iommu_group, next);
858 table_group = iommu_group_get_iommudata(tcegrp->grp);
863 minsz = offsetofend(struct vfio_iommu_spapr_tce_info,
866 if (copy_from_user(&info, (void __user *)arg, minsz))
869 if (info.argsz < minsz)
872 info.dma32_window_start = table_group->tce32_start;
873 info.dma32_window_size = table_group->tce32_size;
875 memset(&info.ddw, 0, sizeof(info.ddw));
877 if (table_group->max_dynamic_windows_supported &&
879 info.flags |= VFIO_IOMMU_SPAPR_INFO_DDW;
880 info.ddw.pgsizes = table_group->pgsizes;
881 info.ddw.max_dynamic_windows_supported =
882 table_group->max_dynamic_windows_supported;
883 info.ddw.levels = table_group->max_levels;
886 ddwsz = offsetofend(struct vfio_iommu_spapr_tce_info, ddw);
888 if (info.argsz >= ddwsz)
891 if (copy_to_user((void __user *)arg, &info, minsz))
896 case VFIO_IOMMU_MAP_DMA: {
897 struct vfio_iommu_type1_dma_map param;
898 struct iommu_table *tbl = NULL;
900 enum dma_data_direction direction;
902 if (!container->enabled)
905 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
907 if (copy_from_user(¶m, (void __user *)arg, minsz))
910 if (param.argsz < minsz)
913 if (param.flags & ~(VFIO_DMA_MAP_FLAG_READ |
914 VFIO_DMA_MAP_FLAG_WRITE))
917 ret = tce_iommu_create_default_window(container);
921 num = tce_iommu_find_table(container, param.iova, &tbl);
925 if ((param.size & ~IOMMU_PAGE_MASK(tbl)) ||
926 (param.vaddr & ~IOMMU_PAGE_MASK(tbl)))
929 /* iova is checked by the IOMMU API */
930 if (param.flags & VFIO_DMA_MAP_FLAG_READ) {
931 if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
932 direction = DMA_BIDIRECTIONAL;
934 direction = DMA_TO_DEVICE;
936 if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
937 direction = DMA_FROM_DEVICE;
942 ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr);
947 ret = tce_iommu_build_v2(container, tbl,
948 param.iova >> tbl->it_page_shift,
950 param.size >> tbl->it_page_shift,
953 ret = tce_iommu_build(container, tbl,
954 param.iova >> tbl->it_page_shift,
956 param.size >> tbl->it_page_shift,
959 iommu_flush_tce(tbl);
963 case VFIO_IOMMU_UNMAP_DMA: {
964 struct vfio_iommu_type1_dma_unmap param;
965 struct iommu_table *tbl = NULL;
968 if (!container->enabled)
971 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap,
974 if (copy_from_user(¶m, (void __user *)arg, minsz))
977 if (param.argsz < minsz)
980 /* No flag is supported now */
984 ret = tce_iommu_create_default_window(container);
988 num = tce_iommu_find_table(container, param.iova, &tbl);
992 if (param.size & ~IOMMU_PAGE_MASK(tbl))
995 ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
996 param.size >> tbl->it_page_shift);
1000 ret = tce_iommu_clear(container, tbl,
1001 param.iova >> tbl->it_page_shift,
1002 param.size >> tbl->it_page_shift);
1003 iommu_flush_tce(tbl);
1007 case VFIO_IOMMU_SPAPR_REGISTER_MEMORY: {
1008 struct vfio_iommu_spapr_register_memory param;
1013 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
1016 ret = tce_iommu_mm_set(container);
1020 if (copy_from_user(¶m, (void __user *)arg, minsz))
1023 if (param.argsz < minsz)
1026 /* No flag is supported now */
1030 mutex_lock(&container->lock);
1031 ret = tce_iommu_register_pages(container, param.vaddr,
1033 mutex_unlock(&container->lock);
1037 case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY: {
1038 struct vfio_iommu_spapr_register_memory param;
1046 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
1049 if (copy_from_user(¶m, (void __user *)arg, minsz))
1052 if (param.argsz < minsz)
1055 /* No flag is supported now */
1059 mutex_lock(&container->lock);
1060 ret = tce_iommu_unregister_pages(container, param.vaddr,
1062 mutex_unlock(&container->lock);
1066 case VFIO_IOMMU_ENABLE:
1070 mutex_lock(&container->lock);
1071 ret = tce_iommu_enable(container);
1072 mutex_unlock(&container->lock);
1076 case VFIO_IOMMU_DISABLE:
1080 mutex_lock(&container->lock);
1081 tce_iommu_disable(container);
1082 mutex_unlock(&container->lock);
1085 case VFIO_EEH_PE_OP: {
1086 struct tce_iommu_group *tcegrp;
1089 list_for_each_entry(tcegrp, &container->group_list, next) {
1090 ret = vfio_spapr_iommu_eeh_ioctl(tcegrp->grp,
1098 case VFIO_IOMMU_SPAPR_TCE_CREATE: {
1099 struct vfio_iommu_spapr_tce_create create;
1104 ret = tce_iommu_mm_set(container);
1108 if (!tce_groups_attached(container))
1111 minsz = offsetofend(struct vfio_iommu_spapr_tce_create,
1114 if (copy_from_user(&create, (void __user *)arg, minsz))
1117 if (create.argsz < minsz)
1123 mutex_lock(&container->lock);
1125 ret = tce_iommu_create_default_window(container);
1127 ret = tce_iommu_create_window(container,
1129 create.window_size, create.levels,
1130 &create.start_addr);
1132 mutex_unlock(&container->lock);
1134 if (!ret && copy_to_user((void __user *)arg, &create, minsz))
1139 case VFIO_IOMMU_SPAPR_TCE_REMOVE: {
1140 struct vfio_iommu_spapr_tce_remove remove;
1145 ret = tce_iommu_mm_set(container);
1149 if (!tce_groups_attached(container))
1152 minsz = offsetofend(struct vfio_iommu_spapr_tce_remove,
1155 if (copy_from_user(&remove, (void __user *)arg, minsz))
1158 if (remove.argsz < minsz)
1164 if (container->def_window_pending && !remove.start_addr) {
1165 container->def_window_pending = false;
1169 mutex_lock(&container->lock);
1171 ret = tce_iommu_remove_window(container, remove.start_addr);
1173 mutex_unlock(&container->lock);
1182 static void tce_iommu_release_ownership(struct tce_container *container,
1183 struct iommu_table_group *table_group)
1187 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1188 struct iommu_table *tbl = container->tables[i];
1193 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
1195 iommu_release_ownership(tbl);
1197 container->tables[i] = NULL;
1201 static int tce_iommu_take_ownership(struct tce_container *container,
1202 struct iommu_table_group *table_group)
1206 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1207 struct iommu_table *tbl = table_group->tables[i];
1209 if (!tbl || !tbl->it_map)
1212 rc = iommu_take_ownership(tbl);
1214 for (j = 0; j < i; ++j)
1215 iommu_release_ownership(
1216 table_group->tables[j]);
1222 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1223 container->tables[i] = table_group->tables[i];
1228 static void tce_iommu_release_ownership_ddw(struct tce_container *container,
1229 struct iommu_table_group *table_group)
1233 if (!table_group->ops->unset_window) {
1238 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1239 if (container->tables[i])
1240 table_group->ops->unset_window(table_group, i);
1242 table_group->ops->release_ownership(table_group);
1245 static long tce_iommu_take_ownership_ddw(struct tce_container *container,
1246 struct iommu_table_group *table_group)
1250 if (!table_group->ops->create_table || !table_group->ops->set_window ||
1251 !table_group->ops->release_ownership) {
1256 table_group->ops->take_ownership(table_group);
1258 /* Set all windows to the new group */
1259 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1260 struct iommu_table *tbl = container->tables[i];
1265 ret = table_group->ops->set_window(table_group, i, tbl);
1273 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1274 table_group->ops->unset_window(table_group, i);
1276 table_group->ops->release_ownership(table_group);
1281 static int tce_iommu_attach_group(void *iommu_data,
1282 struct iommu_group *iommu_group)
1285 struct tce_container *container = iommu_data;
1286 struct iommu_table_group *table_group;
1287 struct tce_iommu_group *tcegrp = NULL;
1289 mutex_lock(&container->lock);
1291 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
1292 iommu_group_id(iommu_group), iommu_group); */
1293 table_group = iommu_group_get_iommudata(iommu_group);
1299 if (tce_groups_attached(container) && (!table_group->ops ||
1300 !table_group->ops->take_ownership ||
1301 !table_group->ops->release_ownership)) {
1306 /* Check if new group has the same iommu_ops (i.e. compatible) */
1307 list_for_each_entry(tcegrp, &container->group_list, next) {
1308 struct iommu_table_group *table_group_tmp;
1310 if (tcegrp->grp == iommu_group) {
1311 pr_warn("tce_vfio: Group %d is already attached\n",
1312 iommu_group_id(iommu_group));
1316 table_group_tmp = iommu_group_get_iommudata(tcegrp->grp);
1317 if (table_group_tmp->ops->create_table !=
1318 table_group->ops->create_table) {
1319 pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
1320 iommu_group_id(iommu_group),
1321 iommu_group_id(tcegrp->grp));
1327 tcegrp = kzalloc(sizeof(*tcegrp), GFP_KERNEL);
1333 if (!table_group->ops || !table_group->ops->take_ownership ||
1334 !table_group->ops->release_ownership) {
1335 if (container->v2) {
1339 ret = tce_iommu_take_ownership(container, table_group);
1341 if (!container->v2) {
1345 ret = tce_iommu_take_ownership_ddw(container, table_group);
1346 if (!tce_groups_attached(container) && !container->tables[0])
1347 container->def_window_pending = true;
1351 tcegrp->grp = iommu_group;
1352 list_add(&tcegrp->next, &container->group_list);
1359 mutex_unlock(&container->lock);
1364 static void tce_iommu_detach_group(void *iommu_data,
1365 struct iommu_group *iommu_group)
1367 struct tce_container *container = iommu_data;
1368 struct iommu_table_group *table_group;
1370 struct tce_iommu_group *tcegrp;
1372 mutex_lock(&container->lock);
1374 list_for_each_entry(tcegrp, &container->group_list, next) {
1375 if (tcegrp->grp == iommu_group) {
1382 pr_warn("tce_vfio: detaching unattached group #%u\n",
1383 iommu_group_id(iommu_group));
1387 list_del(&tcegrp->next);
1390 table_group = iommu_group_get_iommudata(iommu_group);
1391 BUG_ON(!table_group);
1393 if (!table_group->ops || !table_group->ops->release_ownership)
1394 tce_iommu_release_ownership(container, table_group);
1396 tce_iommu_release_ownership_ddw(container, table_group);
1399 mutex_unlock(&container->lock);
1402 static const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
1403 .name = "iommu-vfio-powerpc",
1404 .owner = THIS_MODULE,
1405 .open = tce_iommu_open,
1406 .release = tce_iommu_release,
1407 .ioctl = tce_iommu_ioctl,
1408 .attach_group = tce_iommu_attach_group,
1409 .detach_group = tce_iommu_detach_group,
1412 static int __init tce_iommu_init(void)
1414 return vfio_register_iommu_driver(&tce_iommu_driver_ops);
1417 static void __exit tce_iommu_cleanup(void)
1419 vfio_unregister_iommu_driver(&tce_iommu_driver_ops);
1422 module_init(tce_iommu_init);
1423 module_exit(tce_iommu_cleanup);
1425 MODULE_VERSION(DRIVER_VERSION);
1426 MODULE_LICENSE("GPL v2");
1427 MODULE_AUTHOR(DRIVER_AUTHOR);
1428 MODULE_DESCRIPTION(DRIVER_DESC);