1 // SPDX-License-Identifier: GPL-2.0+
3 * NVIDIA Tegra Video decoder driver
5 * Copyright (C) 2016-2019 GRATE-DRIVER project
8 #include <linux/iommu.h>
9 #include <linux/iova.h>
10 #include <linux/kernel.h>
11 #include <linux/platform_device.h>
13 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
14 #include <asm/dma-iommu.h>
19 int tegra_vde_iommu_map(struct tegra_vde *vde,
29 end = vde->domain->geometry.aperture_end;
30 size = iova_align(&vde->iova, size);
31 shift = iova_shift(&vde->iova);
33 iova = alloc_iova(&vde->iova, size >> shift, end >> shift, true);
37 addr = iova_dma_addr(&vde->iova, iova);
39 size = iommu_map_sgtable(vde->domain, addr, sgt,
40 IOMMU_READ | IOMMU_WRITE);
42 __free_iova(&vde->iova, iova);
51 void tegra_vde_iommu_unmap(struct tegra_vde *vde, struct iova *iova)
53 unsigned long shift = iova_shift(&vde->iova);
54 unsigned long size = iova_size(iova) << shift;
55 dma_addr_t addr = iova_dma_addr(&vde->iova, iova);
57 iommu_unmap(vde->domain, addr, size);
58 __free_iova(&vde->iova, iova);
61 int tegra_vde_iommu_init(struct tegra_vde *vde)
63 struct device *dev = vde->dev;
69 vde->group = iommu_group_get(dev);
73 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
74 if (dev->archdata.mapping) {
75 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
77 arm_iommu_detach_device(dev);
78 arm_iommu_release_mapping(mapping);
81 vde->domain = iommu_paging_domain_alloc(dev);
82 if (IS_ERR(vde->domain)) {
83 err = PTR_ERR(vde->domain);
88 err = iova_cache_get();
92 order = __ffs(vde->domain->pgsize_bitmap);
93 init_iova_domain(&vde->iova, 1UL << order, 0);
95 err = iommu_attach_group(vde->domain, vde->group);
100 * We're using some static addresses that are not accessible by VDE
101 * to trap invalid memory accesses.
103 shift = iova_shift(&vde->iova);
104 iova = reserve_iova(&vde->iova, 0x60000000 >> shift,
105 0x70000000 >> shift);
111 vde->iova_resv_static_addresses = iova;
114 * BSEV's end-address wraps around due to integer overflow during
115 * of hardware context preparation if IOVA is allocated at the end
116 * of address space and VDE can't handle that. Hence simply reserve
117 * the last page to avoid the problem.
119 iova = reserve_iova(&vde->iova, 0xffffffff >> shift,
120 (0xffffffff >> shift) + 1);
126 vde->iova_resv_last_page = iova;
131 __free_iova(&vde->iova, vde->iova_resv_static_addresses);
133 iommu_detach_group(vde->domain, vde->group);
135 put_iova_domain(&vde->iova);
138 iommu_domain_free(vde->domain);
140 iommu_group_put(vde->group);
145 void tegra_vde_iommu_deinit(struct tegra_vde *vde)
148 __free_iova(&vde->iova, vde->iova_resv_last_page);
149 __free_iova(&vde->iova, vde->iova_resv_static_addresses);
150 iommu_detach_group(vde->domain, vde->group);
151 put_iova_domain(&vde->iova);
153 iommu_domain_free(vde->domain);
154 iommu_group_put(vde->group);