2 * SWIOTLB-based DMA API implementation
4 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/gfp.h>
21 #include <linux/acpi.h>
22 #include <linux/bootmem.h>
23 #include <linux/cache.h>
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/genalloc.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/dma-contiguous.h>
29 #include <linux/vmalloc.h>
30 #include <linux/swiotlb.h>
32 #include <asm/cacheflush.h>
34 static int swiotlb __ro_after_init;
36 static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
39 if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
40 return pgprot_writecombine(prot);
44 static struct gen_pool *atomic_pool;
46 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
47 static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
49 static int __init early_coherent_pool(char *p)
51 atomic_pool_size = memparse(p, &p);
54 early_param("coherent_pool", early_coherent_pool);
56 static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
62 WARN(1, "coherent pool not initialised!\n");
66 val = gen_pool_alloc(atomic_pool, size);
68 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
70 *ret_page = phys_to_page(phys);
78 static bool __in_atomic_pool(void *start, size_t size)
80 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
83 static int __free_from_pool(void *start, size_t size)
85 if (!__in_atomic_pool(start, size))
88 gen_pool_free(atomic_pool, (unsigned long)start, size);
93 static void *__dma_alloc_coherent(struct device *dev, size_t size,
94 dma_addr_t *dma_handle, gfp_t flags,
98 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
102 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
103 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
105 if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
109 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
110 get_order(size), flags);
114 *dma_handle = phys_to_dma(dev, page_to_phys(page));
115 addr = page_address(page);
116 memset(addr, 0, size);
119 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
123 static void __dma_free_coherent(struct device *dev, size_t size,
124 void *vaddr, dma_addr_t dma_handle,
128 phys_addr_t paddr = dma_to_phys(dev, dma_handle);
131 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
135 freed = dma_release_from_contiguous(dev,
139 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
142 static void *__dma_alloc(struct device *dev, size_t size,
143 dma_addr_t *dma_handle, gfp_t flags,
147 void *ptr, *coherent_ptr;
148 bool coherent = is_device_dma_coherent(dev);
149 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
151 size = PAGE_ALIGN(size);
153 if (!coherent && !gfpflags_allow_blocking(flags)) {
154 struct page *page = NULL;
155 void *addr = __alloc_from_pool(size, &page, flags);
158 *dma_handle = phys_to_dma(dev, page_to_phys(page));
163 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
167 /* no need for non-cacheable mapping if coherent */
171 /* remove any dirty cache lines on the kernel alias */
172 __dma_flush_area(ptr, size);
174 /* create a coherent mapping */
175 page = virt_to_page(ptr);
176 coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
184 __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
186 *dma_handle = DMA_ERROR_CODE;
190 static void __dma_free(struct device *dev, size_t size,
191 void *vaddr, dma_addr_t dma_handle,
194 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
196 size = PAGE_ALIGN(size);
198 if (!is_device_dma_coherent(dev)) {
199 if (__free_from_pool(vaddr, size))
203 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
206 static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
207 unsigned long offset, size_t size,
208 enum dma_data_direction dir,
213 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
214 if (!is_device_dma_coherent(dev) &&
215 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
216 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
222 static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
223 size_t size, enum dma_data_direction dir,
226 if (!is_device_dma_coherent(dev) &&
227 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
228 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
229 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
232 static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
233 int nelems, enum dma_data_direction dir,
236 struct scatterlist *sg;
239 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
240 if (!is_device_dma_coherent(dev) &&
241 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
242 for_each_sg(sgl, sg, ret, i)
243 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
249 static void __swiotlb_unmap_sg_attrs(struct device *dev,
250 struct scatterlist *sgl, int nelems,
251 enum dma_data_direction dir,
254 struct scatterlist *sg;
257 if (!is_device_dma_coherent(dev) &&
258 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
259 for_each_sg(sgl, sg, nelems, i)
260 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
262 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
265 static void __swiotlb_sync_single_for_cpu(struct device *dev,
266 dma_addr_t dev_addr, size_t size,
267 enum dma_data_direction dir)
269 if (!is_device_dma_coherent(dev))
270 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
271 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
274 static void __swiotlb_sync_single_for_device(struct device *dev,
275 dma_addr_t dev_addr, size_t size,
276 enum dma_data_direction dir)
278 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
279 if (!is_device_dma_coherent(dev))
280 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
283 static void __swiotlb_sync_sg_for_cpu(struct device *dev,
284 struct scatterlist *sgl, int nelems,
285 enum dma_data_direction dir)
287 struct scatterlist *sg;
290 if (!is_device_dma_coherent(dev))
291 for_each_sg(sgl, sg, nelems, i)
292 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
294 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
297 static void __swiotlb_sync_sg_for_device(struct device *dev,
298 struct scatterlist *sgl, int nelems,
299 enum dma_data_direction dir)
301 struct scatterlist *sg;
304 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
305 if (!is_device_dma_coherent(dev))
306 for_each_sg(sgl, sg, nelems, i)
307 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
311 static int __swiotlb_mmap(struct device *dev,
312 struct vm_area_struct *vma,
313 void *cpu_addr, dma_addr_t dma_addr, size_t size,
317 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
319 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
320 unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
321 unsigned long off = vma->vm_pgoff;
323 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
324 is_device_dma_coherent(dev));
326 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
329 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
330 ret = remap_pfn_range(vma, vma->vm_start,
332 vma->vm_end - vma->vm_start,
339 static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
340 void *cpu_addr, dma_addr_t handle, size_t size,
343 int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
346 sg_set_page(sgt->sgl, phys_to_page(dma_to_phys(dev, handle)),
347 PAGE_ALIGN(size), 0);
352 static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
355 return swiotlb_dma_supported(hwdev, mask);
359 static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
362 return swiotlb_dma_mapping_error(hwdev, addr);
366 static const struct dma_map_ops swiotlb_dma_ops = {
367 .alloc = __dma_alloc,
369 .mmap = __swiotlb_mmap,
370 .get_sgtable = __swiotlb_get_sgtable,
371 .map_page = __swiotlb_map_page,
372 .unmap_page = __swiotlb_unmap_page,
373 .map_sg = __swiotlb_map_sg_attrs,
374 .unmap_sg = __swiotlb_unmap_sg_attrs,
375 .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
376 .sync_single_for_device = __swiotlb_sync_single_for_device,
377 .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
378 .sync_sg_for_device = __swiotlb_sync_sg_for_device,
379 .dma_supported = __swiotlb_dma_supported,
380 .mapping_error = __swiotlb_dma_mapping_error,
383 static int __init atomic_pool_init(void)
385 pgprot_t prot = __pgprot(PROT_NORMAL_NC);
386 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
389 unsigned int pool_size_order = get_order(atomic_pool_size);
391 if (dev_get_cma_area(NULL))
392 page = dma_alloc_from_contiguous(NULL, nr_pages,
393 pool_size_order, GFP_KERNEL);
395 page = alloc_pages(GFP_DMA, pool_size_order);
399 void *page_addr = page_address(page);
401 memset(page_addr, 0, atomic_pool_size);
402 __dma_flush_area(page_addr, atomic_pool_size);
404 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
408 addr = dma_common_contiguous_remap(page, atomic_pool_size,
409 VM_USERMAP, prot, atomic_pool_init);
412 goto destroy_genpool;
414 ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
416 atomic_pool_size, -1);
420 gen_pool_set_algo(atomic_pool,
421 gen_pool_first_fit_order_align,
424 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
425 atomic_pool_size / 1024);
431 dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
433 gen_pool_destroy(atomic_pool);
436 if (!dma_release_from_contiguous(NULL, page, nr_pages))
437 __free_pages(page, pool_size_order);
439 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
440 atomic_pool_size / 1024);
444 /********************************************
445 * The following APIs are for dummy DMA ops *
446 ********************************************/
448 static void *__dummy_alloc(struct device *dev, size_t size,
449 dma_addr_t *dma_handle, gfp_t flags,
455 static void __dummy_free(struct device *dev, size_t size,
456 void *vaddr, dma_addr_t dma_handle,
461 static int __dummy_mmap(struct device *dev,
462 struct vm_area_struct *vma,
463 void *cpu_addr, dma_addr_t dma_addr, size_t size,
469 static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
470 unsigned long offset, size_t size,
471 enum dma_data_direction dir,
474 return DMA_ERROR_CODE;
477 static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
478 size_t size, enum dma_data_direction dir,
483 static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
484 int nelems, enum dma_data_direction dir,
490 static void __dummy_unmap_sg(struct device *dev,
491 struct scatterlist *sgl, int nelems,
492 enum dma_data_direction dir,
497 static void __dummy_sync_single(struct device *dev,
498 dma_addr_t dev_addr, size_t size,
499 enum dma_data_direction dir)
503 static void __dummy_sync_sg(struct device *dev,
504 struct scatterlist *sgl, int nelems,
505 enum dma_data_direction dir)
509 static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
514 static int __dummy_dma_supported(struct device *hwdev, u64 mask)
519 const struct dma_map_ops dummy_dma_ops = {
520 .alloc = __dummy_alloc,
521 .free = __dummy_free,
522 .mmap = __dummy_mmap,
523 .map_page = __dummy_map_page,
524 .unmap_page = __dummy_unmap_page,
525 .map_sg = __dummy_map_sg,
526 .unmap_sg = __dummy_unmap_sg,
527 .sync_single_for_cpu = __dummy_sync_single,
528 .sync_single_for_device = __dummy_sync_single,
529 .sync_sg_for_cpu = __dummy_sync_sg,
530 .sync_sg_for_device = __dummy_sync_sg,
531 .mapping_error = __dummy_mapping_error,
532 .dma_supported = __dummy_dma_supported,
534 EXPORT_SYMBOL(dummy_dma_ops);
536 static int __init arm64_dma_init(void)
538 if (swiotlb_force == SWIOTLB_FORCE ||
539 max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
542 return atomic_pool_init();
544 arch_initcall(arm64_dma_init);
546 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
548 static int __init dma_debug_do_init(void)
550 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
553 fs_initcall(dma_debug_do_init);
556 #ifdef CONFIG_IOMMU_DMA
557 #include <linux/dma-iommu.h>
558 #include <linux/platform_device.h>
559 #include <linux/amba/bus.h>
561 /* Thankfully, all cache ops are by VA so we can ignore phys here */
562 static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
564 __dma_flush_area(virt, PAGE_SIZE);
567 static void *__iommu_alloc_attrs(struct device *dev, size_t size,
568 dma_addr_t *handle, gfp_t gfp,
571 bool coherent = is_device_dma_coherent(dev);
572 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
573 size_t iosize = size;
576 if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
579 size = PAGE_ALIGN(size);
582 * Some drivers rely on this, and we probably don't want the
583 * possibility of stale kernel data being read by devices anyway.
587 if (gfpflags_allow_blocking(gfp)) {
589 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
591 pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
596 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
597 __builtin_return_address(0));
599 iommu_dma_free(dev, pages, iosize, handle);
603 * In atomic context we can't remap anything, so we'll only
604 * get the virtually contiguous buffer we need by way of a
605 * physically contiguous allocation.
608 page = alloc_pages(gfp, get_order(size));
609 addr = page ? page_address(page) : NULL;
611 addr = __alloc_from_pool(size, &page, gfp);
616 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
617 if (iommu_dma_mapping_error(dev, *handle)) {
619 __free_pages(page, get_order(size));
621 __free_from_pool(addr, size);
628 static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
629 dma_addr_t handle, unsigned long attrs)
631 size_t iosize = size;
633 size = PAGE_ALIGN(size);
635 * @cpu_addr will be one of 3 things depending on how it was allocated:
636 * - A remapped array of pages from iommu_dma_alloc(), for all
637 * non-atomic allocations.
638 * - A non-cacheable alias from the atomic pool, for atomic
639 * allocations by non-coherent devices.
640 * - A normal lowmem address, for atomic allocations by
642 * Hence how dodgy the below logic looks...
644 if (__in_atomic_pool(cpu_addr, size)) {
645 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
646 __free_from_pool(cpu_addr, size);
647 } else if (is_vmalloc_addr(cpu_addr)){
648 struct vm_struct *area = find_vm_area(cpu_addr);
650 if (WARN_ON(!area || !area->pages))
652 iommu_dma_free(dev, area->pages, iosize, &handle);
653 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
655 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
656 __free_pages(virt_to_page(cpu_addr), get_order(size));
660 static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
661 void *cpu_addr, dma_addr_t dma_addr, size_t size,
664 struct vm_struct *area;
667 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
668 is_device_dma_coherent(dev));
670 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
673 area = find_vm_area(cpu_addr);
674 if (WARN_ON(!area || !area->pages))
677 return iommu_dma_mmap(area->pages, size, vma);
680 static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
681 void *cpu_addr, dma_addr_t dma_addr,
682 size_t size, unsigned long attrs)
684 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
685 struct vm_struct *area = find_vm_area(cpu_addr);
687 if (WARN_ON(!area || !area->pages))
690 return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
694 static void __iommu_sync_single_for_cpu(struct device *dev,
695 dma_addr_t dev_addr, size_t size,
696 enum dma_data_direction dir)
700 if (is_device_dma_coherent(dev))
703 phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
704 __dma_unmap_area(phys_to_virt(phys), size, dir);
707 static void __iommu_sync_single_for_device(struct device *dev,
708 dma_addr_t dev_addr, size_t size,
709 enum dma_data_direction dir)
713 if (is_device_dma_coherent(dev))
716 phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
717 __dma_map_area(phys_to_virt(phys), size, dir);
720 static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
721 unsigned long offset, size_t size,
722 enum dma_data_direction dir,
725 bool coherent = is_device_dma_coherent(dev);
726 int prot = dma_info_to_prot(dir, coherent, attrs);
727 dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
729 if (!iommu_dma_mapping_error(dev, dev_addr) &&
730 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
731 __iommu_sync_single_for_device(dev, dev_addr, size, dir);
736 static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
737 size_t size, enum dma_data_direction dir,
740 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
741 __iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
743 iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
746 static void __iommu_sync_sg_for_cpu(struct device *dev,
747 struct scatterlist *sgl, int nelems,
748 enum dma_data_direction dir)
750 struct scatterlist *sg;
753 if (is_device_dma_coherent(dev))
756 for_each_sg(sgl, sg, nelems, i)
757 __dma_unmap_area(sg_virt(sg), sg->length, dir);
760 static void __iommu_sync_sg_for_device(struct device *dev,
761 struct scatterlist *sgl, int nelems,
762 enum dma_data_direction dir)
764 struct scatterlist *sg;
767 if (is_device_dma_coherent(dev))
770 for_each_sg(sgl, sg, nelems, i)
771 __dma_map_area(sg_virt(sg), sg->length, dir);
774 static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
775 int nelems, enum dma_data_direction dir,
778 bool coherent = is_device_dma_coherent(dev);
780 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
781 __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
783 return iommu_dma_map_sg(dev, sgl, nelems,
784 dma_info_to_prot(dir, coherent, attrs));
787 static void __iommu_unmap_sg_attrs(struct device *dev,
788 struct scatterlist *sgl, int nelems,
789 enum dma_data_direction dir,
792 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
793 __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
795 iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
798 static const struct dma_map_ops iommu_dma_ops = {
799 .alloc = __iommu_alloc_attrs,
800 .free = __iommu_free_attrs,
801 .mmap = __iommu_mmap_attrs,
802 .get_sgtable = __iommu_get_sgtable,
803 .map_page = __iommu_map_page,
804 .unmap_page = __iommu_unmap_page,
805 .map_sg = __iommu_map_sg_attrs,
806 .unmap_sg = __iommu_unmap_sg_attrs,
807 .sync_single_for_cpu = __iommu_sync_single_for_cpu,
808 .sync_single_for_device = __iommu_sync_single_for_device,
809 .sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
810 .sync_sg_for_device = __iommu_sync_sg_for_device,
811 .map_resource = iommu_dma_map_resource,
812 .unmap_resource = iommu_dma_unmap_resource,
813 .mapping_error = iommu_dma_mapping_error,
817 * TODO: Right now __iommu_setup_dma_ops() gets called too early to do
818 * everything it needs to - the device is only partially created and the
819 * IOMMU driver hasn't seen it yet, so it can't have a group. Thus we
820 * need this delayed attachment dance. Once IOMMU probe ordering is sorted
821 * to move the arch_setup_dma_ops() call later, all the notifier bits below
822 * become unnecessary, and will go away.
824 struct iommu_dma_notifier_data {
825 struct list_head list;
827 const struct iommu_ops *ops;
831 static LIST_HEAD(iommu_dma_masters);
832 static DEFINE_MUTEX(iommu_dma_notifier_lock);
834 static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
835 u64 dma_base, u64 size)
837 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
840 * If the IOMMU driver has the DMA domain support that we require,
841 * then the IOMMU core will have already configured a group for this
842 * device, and allocated the default domain for that group.
847 if (domain->type == IOMMU_DOMAIN_DMA) {
848 if (iommu_dma_init_domain(domain, dma_base, size, dev))
851 dev->dma_ops = &iommu_dma_ops;
856 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
861 static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops,
862 u64 dma_base, u64 size)
864 struct iommu_dma_notifier_data *iommudata;
866 iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
870 iommudata->dev = dev;
871 iommudata->ops = ops;
872 iommudata->dma_base = dma_base;
873 iommudata->size = size;
875 mutex_lock(&iommu_dma_notifier_lock);
876 list_add(&iommudata->list, &iommu_dma_masters);
877 mutex_unlock(&iommu_dma_notifier_lock);
880 static int __iommu_attach_notifier(struct notifier_block *nb,
881 unsigned long action, void *data)
883 struct iommu_dma_notifier_data *master, *tmp;
885 if (action != BUS_NOTIFY_BIND_DRIVER)
888 mutex_lock(&iommu_dma_notifier_lock);
889 list_for_each_entry_safe(master, tmp, &iommu_dma_masters, list) {
890 if (data == master->dev && do_iommu_attach(master->dev,
891 master->ops, master->dma_base, master->size)) {
892 list_del(&master->list);
897 mutex_unlock(&iommu_dma_notifier_lock);
901 static int __init register_iommu_dma_ops_notifier(struct bus_type *bus)
903 struct notifier_block *nb = kzalloc(sizeof(*nb), GFP_KERNEL);
909 nb->notifier_call = __iommu_attach_notifier;
911 ret = bus_register_notifier(bus, nb);
913 pr_warn("Failed to register DMA domain notifier; IOMMU DMA ops unavailable on bus '%s'\n",
920 static int __init __iommu_dma_init(void)
924 ret = iommu_dma_init();
926 ret = register_iommu_dma_ops_notifier(&platform_bus_type);
928 ret = register_iommu_dma_ops_notifier(&amba_bustype);
931 ret = register_iommu_dma_ops_notifier(&pci_bus_type);
935 arch_initcall(__iommu_dma_init);
937 static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
938 const struct iommu_ops *ops)
940 struct iommu_group *group;
945 * TODO: As a concession to the future, we're ready to handle being
946 * called both early and late (i.e. after bus_add_device). Once all
947 * the platform bus code is reworked to call us late and the notifier
948 * junk above goes away, move the body of do_iommu_attach here.
950 group = iommu_group_get(dev);
952 do_iommu_attach(dev, ops, dma_base, size);
953 iommu_group_put(group);
955 queue_iommu_attach(dev, ops, dma_base, size);
959 void arch_teardown_dma_ops(struct device *dev)
966 static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
967 const struct iommu_ops *iommu)
970 #endif /* CONFIG_IOMMU_DMA */
972 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
973 const struct iommu_ops *iommu, bool coherent)
976 dev->dma_ops = &swiotlb_dma_ops;
978 dev->archdata.dma_coherent = coherent;
979 __iommu_setup_dma_ops(dev, dma_base, size, iommu);