]>
Commit | Line | Data |
---|---|---|
0db2e5d1 RM |
1 | /* |
2 | * A fairly generic DMA-API to IOMMU-API glue layer. | |
3 | * | |
4 | * Copyright (C) 2014-2015 ARM Ltd. | |
5 | * | |
6 | * based in part on arch/arm/mm/dma-mapping.c: | |
7 | * Copyright (C) 2000-2004 Russell King | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
20 | */ | |
21 | ||
22 | #include <linux/device.h> | |
23 | #include <linux/dma-iommu.h> | |
5b11e9cd | 24 | #include <linux/gfp.h> |
0db2e5d1 RM |
25 | #include <linux/huge_mm.h> |
26 | #include <linux/iommu.h> | |
27 | #include <linux/iova.h> | |
44bb7e24 | 28 | #include <linux/irq.h> |
0db2e5d1 | 29 | #include <linux/mm.h> |
fade1ec0 | 30 | #include <linux/pci.h> |
5b11e9cd RM |
31 | #include <linux/scatterlist.h> |
32 | #include <linux/vmalloc.h> | |
0db2e5d1 | 33 | |
44bb7e24 RM |
34 | struct iommu_dma_msi_page { |
35 | struct list_head list; | |
36 | dma_addr_t iova; | |
37 | phys_addr_t phys; | |
38 | }; | |
39 | ||
fdbe574e RM |
40 | enum iommu_dma_cookie_type { |
41 | IOMMU_DMA_IOVA_COOKIE, | |
42 | IOMMU_DMA_MSI_COOKIE, | |
43 | }; | |
44 | ||
44bb7e24 | 45 | struct iommu_dma_cookie { |
fdbe574e RM |
46 | enum iommu_dma_cookie_type type; |
47 | union { | |
48 | /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ | |
49 | struct iova_domain iovad; | |
50 | /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ | |
51 | dma_addr_t msi_iova; | |
52 | }; | |
53 | struct list_head msi_page_list; | |
54 | spinlock_t msi_lock; | |
44bb7e24 RM |
55 | }; |
56 | ||
fdbe574e RM |
57 | static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) |
58 | { | |
59 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) | |
60 | return cookie->iovad.granule; | |
61 | return PAGE_SIZE; | |
62 | } | |
63 | ||
44bb7e24 RM |
64 | static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain) |
65 | { | |
fdbe574e RM |
66 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
67 | ||
68 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) | |
69 | return &cookie->iovad; | |
70 | return NULL; | |
71 | } | |
72 | ||
73 | static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) | |
74 | { | |
75 | struct iommu_dma_cookie *cookie; | |
76 | ||
77 | cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); | |
78 | if (cookie) { | |
79 | spin_lock_init(&cookie->msi_lock); | |
80 | INIT_LIST_HEAD(&cookie->msi_page_list); | |
81 | cookie->type = type; | |
82 | } | |
83 | return cookie; | |
44bb7e24 RM |
84 | } |
85 | ||
0db2e5d1 RM |
86 | int iommu_dma_init(void) |
87 | { | |
88 | return iova_cache_get(); | |
89 | } | |
90 | ||
91 | /** | |
92 | * iommu_get_dma_cookie - Acquire DMA-API resources for a domain | |
93 | * @domain: IOMMU domain to prepare for DMA-API usage | |
94 | * | |
95 | * IOMMU drivers should normally call this from their domain_alloc | |
96 | * callback when domain->type == IOMMU_DOMAIN_DMA. | |
97 | */ | |
98 | int iommu_get_dma_cookie(struct iommu_domain *domain) | |
fdbe574e RM |
99 | { |
100 | if (domain->iova_cookie) | |
101 | return -EEXIST; | |
102 | ||
103 | domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); | |
104 | if (!domain->iova_cookie) | |
105 | return -ENOMEM; | |
106 | ||
107 | return 0; | |
108 | } | |
109 | EXPORT_SYMBOL(iommu_get_dma_cookie); | |
110 | ||
111 | /** | |
112 | * iommu_get_msi_cookie - Acquire just MSI remapping resources | |
113 | * @domain: IOMMU domain to prepare | |
114 | * @base: Start address of IOVA region for MSI mappings | |
115 | * | |
116 | * Users who manage their own IOVA allocation and do not want DMA API support, | |
117 | * but would still like to take advantage of automatic MSI remapping, can use | |
118 | * this to initialise their own domain appropriately. Users should reserve a | |
119 | * contiguous IOVA region, starting at @base, large enough to accommodate the | |
120 | * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address | |
121 | * used by the devices attached to @domain. | |
122 | */ | |
123 | int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) | |
0db2e5d1 | 124 | { |
44bb7e24 | 125 | struct iommu_dma_cookie *cookie; |
0db2e5d1 | 126 | |
fdbe574e RM |
127 | if (domain->type != IOMMU_DOMAIN_UNMANAGED) |
128 | return -EINVAL; | |
129 | ||
0db2e5d1 RM |
130 | if (domain->iova_cookie) |
131 | return -EEXIST; | |
132 | ||
fdbe574e | 133 | cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); |
44bb7e24 RM |
134 | if (!cookie) |
135 | return -ENOMEM; | |
0db2e5d1 | 136 | |
fdbe574e | 137 | cookie->msi_iova = base; |
44bb7e24 RM |
138 | domain->iova_cookie = cookie; |
139 | return 0; | |
0db2e5d1 | 140 | } |
fdbe574e | 141 | EXPORT_SYMBOL(iommu_get_msi_cookie); |
0db2e5d1 RM |
142 | |
143 | /** | |
144 | * iommu_put_dma_cookie - Release a domain's DMA mapping resources | |
fdbe574e RM |
145 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or |
146 | * iommu_get_msi_cookie() | |
0db2e5d1 RM |
147 | * |
148 | * IOMMU drivers should normally call this from their domain_free callback. | |
149 | */ | |
150 | void iommu_put_dma_cookie(struct iommu_domain *domain) | |
151 | { | |
44bb7e24 RM |
152 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
153 | struct iommu_dma_msi_page *msi, *tmp; | |
0db2e5d1 | 154 | |
44bb7e24 | 155 | if (!cookie) |
0db2e5d1 RM |
156 | return; |
157 | ||
fdbe574e | 158 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) |
44bb7e24 RM |
159 | put_iova_domain(&cookie->iovad); |
160 | ||
161 | list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { | |
162 | list_del(&msi->list); | |
163 | kfree(msi); | |
164 | } | |
165 | kfree(cookie); | |
0db2e5d1 RM |
166 | domain->iova_cookie = NULL; |
167 | } | |
168 | EXPORT_SYMBOL(iommu_put_dma_cookie); | |
169 | ||
273df963 RM |
170 | /** |
171 | * iommu_dma_get_resv_regions - Reserved region driver helper | |
172 | * @dev: Device from iommu_get_resv_regions() | |
173 | * @list: Reserved region list from iommu_get_resv_regions() | |
174 | * | |
175 | * IOMMU drivers can use this to implement their .get_resv_regions callback | |
176 | * for general non-IOMMU-specific reservations. Currently, this covers host | |
177 | * bridge windows for PCI devices. | |
178 | */ | |
179 | void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) | |
fade1ec0 | 180 | { |
273df963 | 181 | struct pci_host_bridge *bridge; |
fade1ec0 | 182 | struct resource_entry *window; |
fade1ec0 | 183 | |
273df963 RM |
184 | if (!dev_is_pci(dev)) |
185 | return; | |
186 | ||
187 | bridge = pci_find_host_bridge(to_pci_dev(dev)->bus); | |
fade1ec0 | 188 | resource_list_for_each_entry(window, &bridge->windows) { |
273df963 RM |
189 | struct iommu_resv_region *region; |
190 | phys_addr_t start; | |
191 | size_t length; | |
192 | ||
938f1bbe | 193 | if (resource_type(window->res) != IORESOURCE_MEM) |
fade1ec0 RM |
194 | continue; |
195 | ||
273df963 RM |
196 | start = window->res->start - window->offset; |
197 | length = window->res->end - window->res->start + 1; | |
198 | region = iommu_alloc_resv_region(start, length, 0, | |
199 | IOMMU_RESV_RESERVED); | |
200 | if (!region) | |
201 | return; | |
202 | ||
203 | list_add_tail(®ion->list, list); | |
fade1ec0 RM |
204 | } |
205 | } | |
273df963 | 206 | EXPORT_SYMBOL(iommu_dma_get_resv_regions); |
fade1ec0 | 207 | |
7c1b058c RM |
208 | static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, |
209 | phys_addr_t start, phys_addr_t end) | |
210 | { | |
211 | struct iova_domain *iovad = &cookie->iovad; | |
212 | struct iommu_dma_msi_page *msi_page; | |
213 | int i, num_pages; | |
214 | ||
215 | start -= iova_offset(iovad, start); | |
216 | num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); | |
217 | ||
218 | msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL); | |
219 | if (!msi_page) | |
220 | return -ENOMEM; | |
221 | ||
222 | for (i = 0; i < num_pages; i++) { | |
223 | msi_page[i].phys = start; | |
224 | msi_page[i].iova = start; | |
225 | INIT_LIST_HEAD(&msi_page[i].list); | |
226 | list_add(&msi_page[i].list, &cookie->msi_page_list); | |
227 | start += iovad->granule; | |
228 | } | |
229 | ||
230 | return 0; | |
231 | } | |
232 | ||
233 | static int iova_reserve_iommu_regions(struct device *dev, | |
234 | struct iommu_domain *domain) | |
235 | { | |
236 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | |
237 | struct iova_domain *iovad = &cookie->iovad; | |
238 | struct iommu_resv_region *region; | |
239 | LIST_HEAD(resv_regions); | |
240 | int ret = 0; | |
241 | ||
7c1b058c RM |
242 | iommu_get_resv_regions(dev, &resv_regions); |
243 | list_for_each_entry(region, &resv_regions, list) { | |
244 | unsigned long lo, hi; | |
245 | ||
246 | /* We ARE the software that manages these! */ | |
247 | if (region->type == IOMMU_RESV_SW_MSI) | |
248 | continue; | |
249 | ||
250 | lo = iova_pfn(iovad, region->start); | |
251 | hi = iova_pfn(iovad, region->start + region->length - 1); | |
252 | reserve_iova(iovad, lo, hi); | |
253 | ||
254 | if (region->type == IOMMU_RESV_MSI) | |
255 | ret = cookie_init_hw_msi_region(cookie, region->start, | |
256 | region->start + region->length); | |
257 | if (ret) | |
258 | break; | |
259 | } | |
260 | iommu_put_resv_regions(dev, &resv_regions); | |
261 | ||
262 | return ret; | |
263 | } | |
264 | ||
0db2e5d1 RM |
265 | /** |
266 | * iommu_dma_init_domain - Initialise a DMA mapping domain | |
267 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() | |
268 | * @base: IOVA at which the mappable address space starts | |
269 | * @size: Size of IOVA space | |
fade1ec0 | 270 | * @dev: Device the domain is being initialised for |
0db2e5d1 RM |
271 | * |
272 | * @base and @size should be exact multiples of IOMMU page granularity to | |
273 | * avoid rounding surprises. If necessary, we reserve the page at address 0 | |
274 | * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but | |
275 | * any change which could make prior IOVAs invalid will fail. | |
276 | */ | |
fade1ec0 RM |
277 | int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, |
278 | u64 size, struct device *dev) | |
0db2e5d1 | 279 | { |
fdbe574e RM |
280 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
281 | struct iova_domain *iovad = &cookie->iovad; | |
0db2e5d1 RM |
282 | unsigned long order, base_pfn, end_pfn; |
283 | ||
fdbe574e RM |
284 | if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) |
285 | return -EINVAL; | |
0db2e5d1 RM |
286 | |
287 | /* Use the smallest supported page size for IOVA granularity */ | |
d16e0faa | 288 | order = __ffs(domain->pgsize_bitmap); |
0db2e5d1 RM |
289 | base_pfn = max_t(unsigned long, 1, base >> order); |
290 | end_pfn = (base + size - 1) >> order; | |
291 | ||
292 | /* Check the domain allows at least some access to the device... */ | |
293 | if (domain->geometry.force_aperture) { | |
294 | if (base > domain->geometry.aperture_end || | |
295 | base + size <= domain->geometry.aperture_start) { | |
296 | pr_warn("specified DMA range outside IOMMU capability\n"); | |
297 | return -EFAULT; | |
298 | } | |
299 | /* ...then finally give it a kicking to make sure it fits */ | |
300 | base_pfn = max_t(unsigned long, base_pfn, | |
301 | domain->geometry.aperture_start >> order); | |
302 | end_pfn = min_t(unsigned long, end_pfn, | |
303 | domain->geometry.aperture_end >> order); | |
304 | } | |
f51d7bb7 RM |
305 | /* |
306 | * PCI devices may have larger DMA masks, but still prefer allocating | |
307 | * within a 32-bit mask to avoid DAC addressing. Such limitations don't | |
308 | * apply to the typical platform device, so for those we may as well | |
309 | * leave the cache limit at the top of their range to save an rb_last() | |
310 | * traversal on every allocation. | |
311 | */ | |
7c1b058c | 312 | if (dev && dev_is_pci(dev)) |
f51d7bb7 | 313 | end_pfn &= DMA_BIT_MASK(32) >> order; |
0db2e5d1 | 314 | |
f51d7bb7 | 315 | /* start_pfn is always nonzero for an already-initialised domain */ |
0db2e5d1 RM |
316 | if (iovad->start_pfn) { |
317 | if (1UL << order != iovad->granule || | |
f51d7bb7 | 318 | base_pfn != iovad->start_pfn) { |
0db2e5d1 RM |
319 | pr_warn("Incompatible range for DMA domain\n"); |
320 | return -EFAULT; | |
321 | } | |
f51d7bb7 RM |
322 | /* |
323 | * If we have devices with different DMA masks, move the free | |
324 | * area cache limit down for the benefit of the smaller one. | |
325 | */ | |
326 | iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn); | |
7c1b058c RM |
327 | |
328 | return 0; | |
0db2e5d1 | 329 | } |
7c1b058c RM |
330 | |
331 | init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); | |
332 | if (!dev) | |
333 | return 0; | |
334 | ||
335 | return iova_reserve_iommu_regions(dev, domain); | |
0db2e5d1 RM |
336 | } |
337 | EXPORT_SYMBOL(iommu_dma_init_domain); | |
338 | ||
339 | /** | |
737c85ca MH |
340 | * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API |
341 | * page flags. | |
0db2e5d1 RM |
342 | * @dir: Direction of DMA transfer |
343 | * @coherent: Is the DMA master cache-coherent? | |
737c85ca | 344 | * @attrs: DMA attributes for the mapping |
0db2e5d1 RM |
345 | * |
346 | * Return: corresponding IOMMU API page protection flags | |
347 | */ | |
737c85ca MH |
348 | int dma_info_to_prot(enum dma_data_direction dir, bool coherent, |
349 | unsigned long attrs) | |
0db2e5d1 RM |
350 | { |
351 | int prot = coherent ? IOMMU_CACHE : 0; | |
352 | ||
737c85ca MH |
353 | if (attrs & DMA_ATTR_PRIVILEGED) |
354 | prot |= IOMMU_PRIV; | |
355 | ||
0db2e5d1 RM |
356 | switch (dir) { |
357 | case DMA_BIDIRECTIONAL: | |
358 | return prot | IOMMU_READ | IOMMU_WRITE; | |
359 | case DMA_TO_DEVICE: | |
360 | return prot | IOMMU_READ; | |
361 | case DMA_FROM_DEVICE: | |
362 | return prot | IOMMU_WRITE; | |
363 | default: | |
364 | return 0; | |
365 | } | |
366 | } | |
367 | ||
842fe519 RM |
368 | static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, |
369 | size_t size, dma_addr_t dma_limit, struct device *dev) | |
0db2e5d1 | 370 | { |
44bb7e24 | 371 | struct iova_domain *iovad = cookie_iovad(domain); |
0db2e5d1 | 372 | unsigned long shift = iova_shift(iovad); |
842fe519 | 373 | unsigned long iova_len = size >> shift; |
122fac03 | 374 | struct iova *iova = NULL; |
0db2e5d1 | 375 | |
c987ff0d RM |
376 | if (domain->geometry.force_aperture) |
377 | dma_limit = min(dma_limit, domain->geometry.aperture_end); | |
122fac03 RM |
378 | |
379 | /* Try to get PCI devices a SAC address */ | |
380 | if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) | |
842fe519 | 381 | iova = alloc_iova(iovad, iova_len, DMA_BIT_MASK(32) >> shift, |
122fac03 | 382 | true); |
0db2e5d1 RM |
383 | /* |
384 | * Enforce size-alignment to be safe - there could perhaps be an | |
385 | * attribute to control this per-device, or at least per-domain... | |
386 | */ | |
122fac03 | 387 | if (!iova) |
842fe519 | 388 | iova = alloc_iova(iovad, iova_len, dma_limit >> shift, true); |
122fac03 | 389 | |
842fe519 | 390 | return (dma_addr_t)iova->pfn_lo << shift; |
0db2e5d1 RM |
391 | } |
392 | ||
842fe519 RM |
393 | static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, |
394 | dma_addr_t iova, size_t size) | |
0db2e5d1 | 395 | { |
842fe519 RM |
396 | struct iova_domain *iovad = &cookie->iovad; |
397 | struct iova *iova_rbnode; | |
0db2e5d1 | 398 | |
842fe519 RM |
399 | iova_rbnode = find_iova(iovad, iova_pfn(iovad, iova)); |
400 | if (WARN_ON(!iova_rbnode)) | |
0db2e5d1 RM |
401 | return; |
402 | ||
842fe519 RM |
403 | __free_iova(iovad, iova_rbnode); |
404 | } | |
405 | ||
406 | static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr, | |
407 | size_t size) | |
408 | { | |
409 | struct iova_domain *iovad = cookie_iovad(domain); | |
410 | size_t iova_off = iova_offset(iovad, dma_addr); | |
411 | ||
412 | dma_addr -= iova_off; | |
413 | size = iova_align(iovad, size + iova_off); | |
414 | ||
415 | WARN_ON(iommu_unmap(domain, dma_addr, size) != size); | |
416 | iommu_dma_free_iova(domain->iova_cookie, dma_addr, size); | |
0db2e5d1 RM |
417 | } |
418 | ||
419 | static void __iommu_dma_free_pages(struct page **pages, int count) | |
420 | { | |
421 | while (count--) | |
422 | __free_page(pages[count]); | |
423 | kvfree(pages); | |
424 | } | |
425 | ||
3b6b7e19 RM |
426 | static struct page **__iommu_dma_alloc_pages(unsigned int count, |
427 | unsigned long order_mask, gfp_t gfp) | |
0db2e5d1 RM |
428 | { |
429 | struct page **pages; | |
430 | unsigned int i = 0, array_size = count * sizeof(*pages); | |
3b6b7e19 RM |
431 | |
432 | order_mask &= (2U << MAX_ORDER) - 1; | |
433 | if (!order_mask) | |
434 | return NULL; | |
0db2e5d1 RM |
435 | |
436 | if (array_size <= PAGE_SIZE) | |
437 | pages = kzalloc(array_size, GFP_KERNEL); | |
438 | else | |
439 | pages = vzalloc(array_size); | |
440 | if (!pages) | |
441 | return NULL; | |
442 | ||
443 | /* IOMMU can map any pages, so himem can also be used here */ | |
444 | gfp |= __GFP_NOWARN | __GFP_HIGHMEM; | |
445 | ||
446 | while (count) { | |
447 | struct page *page = NULL; | |
3b6b7e19 | 448 | unsigned int order_size; |
0db2e5d1 RM |
449 | |
450 | /* | |
451 | * Higher-order allocations are a convenience rather | |
452 | * than a necessity, hence using __GFP_NORETRY until | |
3b6b7e19 | 453 | * falling back to minimum-order allocations. |
0db2e5d1 | 454 | */ |
3b6b7e19 RM |
455 | for (order_mask &= (2U << __fls(count)) - 1; |
456 | order_mask; order_mask &= ~order_size) { | |
457 | unsigned int order = __fls(order_mask); | |
458 | ||
459 | order_size = 1U << order; | |
460 | page = alloc_pages((order_mask - order_size) ? | |
461 | gfp | __GFP_NORETRY : gfp, order); | |
0db2e5d1 RM |
462 | if (!page) |
463 | continue; | |
3b6b7e19 RM |
464 | if (!order) |
465 | break; | |
466 | if (!PageCompound(page)) { | |
0db2e5d1 RM |
467 | split_page(page, order); |
468 | break; | |
3b6b7e19 RM |
469 | } else if (!split_huge_page(page)) { |
470 | break; | |
0db2e5d1 | 471 | } |
3b6b7e19 | 472 | __free_pages(page, order); |
0db2e5d1 | 473 | } |
0db2e5d1 RM |
474 | if (!page) { |
475 | __iommu_dma_free_pages(pages, i); | |
476 | return NULL; | |
477 | } | |
3b6b7e19 RM |
478 | count -= order_size; |
479 | while (order_size--) | |
0db2e5d1 RM |
480 | pages[i++] = page++; |
481 | } | |
482 | return pages; | |
483 | } | |
484 | ||
485 | /** | |
486 | * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc() | |
487 | * @dev: Device which owns this buffer | |
488 | * @pages: Array of buffer pages as returned by iommu_dma_alloc() | |
489 | * @size: Size of buffer in bytes | |
490 | * @handle: DMA address of buffer | |
491 | * | |
492 | * Frees both the pages associated with the buffer, and the array | |
493 | * describing them | |
494 | */ | |
495 | void iommu_dma_free(struct device *dev, struct page **pages, size_t size, | |
496 | dma_addr_t *handle) | |
497 | { | |
842fe519 | 498 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size); |
0db2e5d1 RM |
499 | __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); |
500 | *handle = DMA_ERROR_CODE; | |
501 | } | |
502 | ||
503 | /** | |
504 | * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space | |
505 | * @dev: Device to allocate memory for. Must be a real device | |
506 | * attached to an iommu_dma_domain | |
507 | * @size: Size of buffer in bytes | |
508 | * @gfp: Allocation flags | |
3b6b7e19 | 509 | * @attrs: DMA attributes for this allocation |
0db2e5d1 RM |
510 | * @prot: IOMMU mapping flags |
511 | * @handle: Out argument for allocated DMA handle | |
512 | * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the | |
513 | * given VA/PA are visible to the given non-coherent device. | |
514 | * | |
515 | * If @size is less than PAGE_SIZE, then a full CPU page will be allocated, | |
516 | * but an IOMMU which supports smaller pages might not map the whole thing. | |
517 | * | |
518 | * Return: Array of struct page pointers describing the buffer, | |
519 | * or NULL on failure. | |
520 | */ | |
3b6b7e19 | 521 | struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, |
00085f1e | 522 | unsigned long attrs, int prot, dma_addr_t *handle, |
0db2e5d1 RM |
523 | void (*flush_page)(struct device *, const void *, phys_addr_t)) |
524 | { | |
525 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | |
842fe519 RM |
526 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
527 | struct iova_domain *iovad = &cookie->iovad; | |
0db2e5d1 RM |
528 | struct page **pages; |
529 | struct sg_table sgt; | |
842fe519 | 530 | dma_addr_t iova; |
3b6b7e19 | 531 | unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; |
0db2e5d1 RM |
532 | |
533 | *handle = DMA_ERROR_CODE; | |
534 | ||
3b6b7e19 RM |
535 | min_size = alloc_sizes & -alloc_sizes; |
536 | if (min_size < PAGE_SIZE) { | |
537 | min_size = PAGE_SIZE; | |
538 | alloc_sizes |= PAGE_SIZE; | |
539 | } else { | |
540 | size = ALIGN(size, min_size); | |
541 | } | |
00085f1e | 542 | if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) |
3b6b7e19 RM |
543 | alloc_sizes = min_size; |
544 | ||
545 | count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
546 | pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp); | |
0db2e5d1 RM |
547 | if (!pages) |
548 | return NULL; | |
549 | ||
842fe519 RM |
550 | size = iova_align(iovad, size); |
551 | iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); | |
0db2e5d1 RM |
552 | if (!iova) |
553 | goto out_free_pages; | |
554 | ||
0db2e5d1 RM |
555 | if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) |
556 | goto out_free_iova; | |
557 | ||
558 | if (!(prot & IOMMU_CACHE)) { | |
559 | struct sg_mapping_iter miter; | |
560 | /* | |
561 | * The CPU-centric flushing implied by SG_MITER_TO_SG isn't | |
562 | * sufficient here, so skip it by using the "wrong" direction. | |
563 | */ | |
564 | sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG); | |
565 | while (sg_miter_next(&miter)) | |
566 | flush_page(dev, miter.addr, page_to_phys(miter.page)); | |
567 | sg_miter_stop(&miter); | |
568 | } | |
569 | ||
842fe519 | 570 | if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot) |
0db2e5d1 RM |
571 | < size) |
572 | goto out_free_sg; | |
573 | ||
842fe519 | 574 | *handle = iova; |
0db2e5d1 RM |
575 | sg_free_table(&sgt); |
576 | return pages; | |
577 | ||
578 | out_free_sg: | |
579 | sg_free_table(&sgt); | |
580 | out_free_iova: | |
842fe519 | 581 | iommu_dma_free_iova(cookie, iova, size); |
0db2e5d1 RM |
582 | out_free_pages: |
583 | __iommu_dma_free_pages(pages, count); | |
584 | return NULL; | |
585 | } | |
586 | ||
587 | /** | |
588 | * iommu_dma_mmap - Map a buffer into provided user VMA | |
589 | * @pages: Array representing buffer from iommu_dma_alloc() | |
590 | * @size: Size of buffer in bytes | |
591 | * @vma: VMA describing requested userspace mapping | |
592 | * | |
593 | * Maps the pages of the buffer in @pages into @vma. The caller is responsible | |
594 | * for verifying the correct size and protection of @vma beforehand. | |
595 | */ | |
596 | ||
597 | int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma) | |
598 | { | |
599 | unsigned long uaddr = vma->vm_start; | |
600 | unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
601 | int ret = -ENXIO; | |
602 | ||
603 | for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) { | |
604 | ret = vm_insert_page(vma, uaddr, pages[i]); | |
605 | if (ret) | |
606 | break; | |
607 | uaddr += PAGE_SIZE; | |
608 | } | |
609 | return ret; | |
610 | } | |
611 | ||
51f8cc9e RM |
612 | static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, |
613 | size_t size, int prot) | |
0db2e5d1 | 614 | { |
0db2e5d1 | 615 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
842fe519 RM |
616 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
617 | struct iova_domain *iovad = &cookie->iovad; | |
0db2e5d1 | 618 | size_t iova_off = iova_offset(iovad, phys); |
842fe519 | 619 | dma_addr_t iova; |
0db2e5d1 | 620 | |
842fe519 RM |
621 | size = iova_align(iovad, size + iova_off); |
622 | iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); | |
0db2e5d1 RM |
623 | if (!iova) |
624 | return DMA_ERROR_CODE; | |
625 | ||
842fe519 RM |
626 | if (iommu_map(domain, iova, phys - iova_off, size, prot)) { |
627 | iommu_dma_free_iova(cookie, iova, size); | |
0db2e5d1 RM |
628 | return DMA_ERROR_CODE; |
629 | } | |
842fe519 | 630 | return iova + iova_off; |
0db2e5d1 RM |
631 | } |
632 | ||
51f8cc9e RM |
633 | dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, |
634 | unsigned long offset, size_t size, int prot) | |
635 | { | |
636 | return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot); | |
637 | } | |
638 | ||
0db2e5d1 | 639 | void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, |
00085f1e | 640 | enum dma_data_direction dir, unsigned long attrs) |
0db2e5d1 | 641 | { |
842fe519 | 642 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size); |
0db2e5d1 RM |
643 | } |
644 | ||
645 | /* | |
646 | * Prepare a successfully-mapped scatterlist to give back to the caller. | |
809eac54 RM |
647 | * |
648 | * At this point the segments are already laid out by iommu_dma_map_sg() to | |
649 | * avoid individually crossing any boundaries, so we merely need to check a | |
650 | * segment's start address to avoid concatenating across one. | |
0db2e5d1 RM |
651 | */ |
652 | static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, | |
653 | dma_addr_t dma_addr) | |
654 | { | |
809eac54 RM |
655 | struct scatterlist *s, *cur = sg; |
656 | unsigned long seg_mask = dma_get_seg_boundary(dev); | |
657 | unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); | |
658 | int i, count = 0; | |
0db2e5d1 RM |
659 | |
660 | for_each_sg(sg, s, nents, i) { | |
809eac54 RM |
661 | /* Restore this segment's original unaligned fields first */ |
662 | unsigned int s_iova_off = sg_dma_address(s); | |
0db2e5d1 | 663 | unsigned int s_length = sg_dma_len(s); |
809eac54 | 664 | unsigned int s_iova_len = s->length; |
0db2e5d1 | 665 | |
809eac54 | 666 | s->offset += s_iova_off; |
0db2e5d1 | 667 | s->length = s_length; |
809eac54 RM |
668 | sg_dma_address(s) = DMA_ERROR_CODE; |
669 | sg_dma_len(s) = 0; | |
670 | ||
671 | /* | |
672 | * Now fill in the real DMA data. If... | |
673 | * - there is a valid output segment to append to | |
674 | * - and this segment starts on an IOVA page boundary | |
675 | * - but doesn't fall at a segment boundary | |
676 | * - and wouldn't make the resulting output segment too long | |
677 | */ | |
678 | if (cur_len && !s_iova_off && (dma_addr & seg_mask) && | |
679 | (cur_len + s_length <= max_len)) { | |
680 | /* ...then concatenate it with the previous one */ | |
681 | cur_len += s_length; | |
682 | } else { | |
683 | /* Otherwise start the next output segment */ | |
684 | if (i > 0) | |
685 | cur = sg_next(cur); | |
686 | cur_len = s_length; | |
687 | count++; | |
688 | ||
689 | sg_dma_address(cur) = dma_addr + s_iova_off; | |
690 | } | |
691 | ||
692 | sg_dma_len(cur) = cur_len; | |
693 | dma_addr += s_iova_len; | |
694 | ||
695 | if (s_length + s_iova_off < s_iova_len) | |
696 | cur_len = 0; | |
0db2e5d1 | 697 | } |
809eac54 | 698 | return count; |
0db2e5d1 RM |
699 | } |
700 | ||
701 | /* | |
702 | * If mapping failed, then just restore the original list, | |
703 | * but making sure the DMA fields are invalidated. | |
704 | */ | |
705 | static void __invalidate_sg(struct scatterlist *sg, int nents) | |
706 | { | |
707 | struct scatterlist *s; | |
708 | int i; | |
709 | ||
710 | for_each_sg(sg, s, nents, i) { | |
711 | if (sg_dma_address(s) != DMA_ERROR_CODE) | |
07b48ac4 | 712 | s->offset += sg_dma_address(s); |
0db2e5d1 RM |
713 | if (sg_dma_len(s)) |
714 | s->length = sg_dma_len(s); | |
715 | sg_dma_address(s) = DMA_ERROR_CODE; | |
716 | sg_dma_len(s) = 0; | |
717 | } | |
718 | } | |
719 | ||
720 | /* | |
721 | * The DMA API client is passing in a scatterlist which could describe | |
722 | * any old buffer layout, but the IOMMU API requires everything to be | |
723 | * aligned to IOMMU pages. Hence the need for this complicated bit of | |
724 | * impedance-matching, to be able to hand off a suitably-aligned list, | |
725 | * but still preserve the original offsets and sizes for the caller. | |
726 | */ | |
727 | int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, | |
728 | int nents, int prot) | |
729 | { | |
730 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | |
842fe519 RM |
731 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
732 | struct iova_domain *iovad = &cookie->iovad; | |
0db2e5d1 | 733 | struct scatterlist *s, *prev = NULL; |
842fe519 | 734 | dma_addr_t iova; |
0db2e5d1 | 735 | size_t iova_len = 0; |
809eac54 | 736 | unsigned long mask = dma_get_seg_boundary(dev); |
0db2e5d1 RM |
737 | int i; |
738 | ||
739 | /* | |
740 | * Work out how much IOVA space we need, and align the segments to | |
741 | * IOVA granules for the IOMMU driver to handle. With some clever | |
742 | * trickery we can modify the list in-place, but reversibly, by | |
809eac54 | 743 | * stashing the unaligned parts in the as-yet-unused DMA fields. |
0db2e5d1 RM |
744 | */ |
745 | for_each_sg(sg, s, nents, i) { | |
809eac54 | 746 | size_t s_iova_off = iova_offset(iovad, s->offset); |
0db2e5d1 | 747 | size_t s_length = s->length; |
809eac54 | 748 | size_t pad_len = (mask - iova_len + 1) & mask; |
0db2e5d1 | 749 | |
809eac54 | 750 | sg_dma_address(s) = s_iova_off; |
0db2e5d1 | 751 | sg_dma_len(s) = s_length; |
809eac54 RM |
752 | s->offset -= s_iova_off; |
753 | s_length = iova_align(iovad, s_length + s_iova_off); | |
0db2e5d1 RM |
754 | s->length = s_length; |
755 | ||
756 | /* | |
809eac54 RM |
757 | * Due to the alignment of our single IOVA allocation, we can |
758 | * depend on these assumptions about the segment boundary mask: | |
759 | * - If mask size >= IOVA size, then the IOVA range cannot | |
760 | * possibly fall across a boundary, so we don't care. | |
761 | * - If mask size < IOVA size, then the IOVA range must start | |
762 | * exactly on a boundary, therefore we can lay things out | |
763 | * based purely on segment lengths without needing to know | |
764 | * the actual addresses beforehand. | |
765 | * - The mask must be a power of 2, so pad_len == 0 if | |
766 | * iova_len == 0, thus we cannot dereference prev the first | |
767 | * time through here (i.e. before it has a meaningful value). | |
0db2e5d1 | 768 | */ |
809eac54 | 769 | if (pad_len && pad_len < s_length - 1) { |
0db2e5d1 RM |
770 | prev->length += pad_len; |
771 | iova_len += pad_len; | |
772 | } | |
773 | ||
774 | iova_len += s_length; | |
775 | prev = s; | |
776 | } | |
777 | ||
842fe519 | 778 | iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); |
0db2e5d1 RM |
779 | if (!iova) |
780 | goto out_restore_sg; | |
781 | ||
782 | /* | |
783 | * We'll leave any physical concatenation to the IOMMU driver's | |
784 | * implementation - it knows better than we do. | |
785 | */ | |
842fe519 | 786 | if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len) |
0db2e5d1 RM |
787 | goto out_free_iova; |
788 | ||
842fe519 | 789 | return __finalise_sg(dev, sg, nents, iova); |
0db2e5d1 RM |
790 | |
791 | out_free_iova: | |
842fe519 | 792 | iommu_dma_free_iova(cookie, iova, iova_len); |
0db2e5d1 RM |
793 | out_restore_sg: |
794 | __invalidate_sg(sg, nents); | |
795 | return 0; | |
796 | } | |
797 | ||
798 | void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |
00085f1e | 799 | enum dma_data_direction dir, unsigned long attrs) |
0db2e5d1 | 800 | { |
842fe519 RM |
801 | dma_addr_t start, end; |
802 | struct scatterlist *tmp; | |
803 | int i; | |
0db2e5d1 RM |
804 | /* |
805 | * The scatterlist segments are mapped into a single | |
806 | * contiguous IOVA allocation, so this is incredibly easy. | |
807 | */ | |
842fe519 RM |
808 | start = sg_dma_address(sg); |
809 | for_each_sg(sg_next(sg), tmp, nents - 1, i) { | |
810 | if (sg_dma_len(tmp) == 0) | |
811 | break; | |
812 | sg = tmp; | |
813 | } | |
814 | end = sg_dma_address(sg) + sg_dma_len(sg); | |
815 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start); | |
0db2e5d1 RM |
816 | } |
817 | ||
51f8cc9e RM |
818 | dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, |
819 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
820 | { | |
821 | return __iommu_dma_map(dev, phys, size, | |
737c85ca | 822 | dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO); |
51f8cc9e RM |
823 | } |
824 | ||
825 | void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, | |
826 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
827 | { | |
842fe519 | 828 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size); |
51f8cc9e RM |
829 | } |
830 | ||
0db2e5d1 RM |
831 | int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
832 | { | |
833 | return dma_addr == DMA_ERROR_CODE; | |
834 | } | |
44bb7e24 RM |
835 | |
836 | static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, | |
837 | phys_addr_t msi_addr, struct iommu_domain *domain) | |
838 | { | |
839 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | |
840 | struct iommu_dma_msi_page *msi_page; | |
fdbe574e | 841 | struct iova_domain *iovad = cookie_iovad(domain); |
842fe519 | 842 | dma_addr_t iova; |
44bb7e24 | 843 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
fdbe574e | 844 | size_t size = cookie_msi_granule(cookie); |
44bb7e24 | 845 | |
fdbe574e | 846 | msi_addr &= ~(phys_addr_t)(size - 1); |
44bb7e24 RM |
847 | list_for_each_entry(msi_page, &cookie->msi_page_list, list) |
848 | if (msi_page->phys == msi_addr) | |
849 | return msi_page; | |
850 | ||
851 | msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC); | |
852 | if (!msi_page) | |
853 | return NULL; | |
854 | ||
44bb7e24 | 855 | msi_page->phys = msi_addr; |
fdbe574e | 856 | if (iovad) { |
842fe519 | 857 | iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); |
fdbe574e RM |
858 | if (!iova) |
859 | goto out_free_page; | |
842fe519 | 860 | msi_page->iova = iova; |
fdbe574e RM |
861 | } else { |
862 | msi_page->iova = cookie->msi_iova; | |
863 | cookie->msi_iova += size; | |
864 | } | |
865 | ||
866 | if (iommu_map(domain, msi_page->iova, msi_addr, size, prot)) | |
44bb7e24 RM |
867 | goto out_free_iova; |
868 | ||
869 | INIT_LIST_HEAD(&msi_page->list); | |
870 | list_add(&msi_page->list, &cookie->msi_page_list); | |
871 | return msi_page; | |
872 | ||
873 | out_free_iova: | |
fdbe574e | 874 | if (iovad) |
842fe519 | 875 | iommu_dma_free_iova(cookie, iova, size); |
fdbe574e RM |
876 | else |
877 | cookie->msi_iova -= size; | |
44bb7e24 RM |
878 | out_free_page: |
879 | kfree(msi_page); | |
880 | return NULL; | |
881 | } | |
882 | ||
883 | void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) | |
884 | { | |
885 | struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq)); | |
886 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | |
887 | struct iommu_dma_cookie *cookie; | |
888 | struct iommu_dma_msi_page *msi_page; | |
889 | phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo; | |
890 | unsigned long flags; | |
891 | ||
892 | if (!domain || !domain->iova_cookie) | |
893 | return; | |
894 | ||
895 | cookie = domain->iova_cookie; | |
896 | ||
897 | /* | |
898 | * We disable IRQs to rule out a possible inversion against | |
899 | * irq_desc_lock if, say, someone tries to retarget the affinity | |
900 | * of an MSI from within an IPI handler. | |
901 | */ | |
902 | spin_lock_irqsave(&cookie->msi_lock, flags); | |
903 | msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); | |
904 | spin_unlock_irqrestore(&cookie->msi_lock, flags); | |
905 | ||
906 | if (WARN_ON(!msi_page)) { | |
907 | /* | |
908 | * We're called from a void callback, so the best we can do is | |
909 | * 'fail' by filling the message with obviously bogus values. | |
910 | * Since we got this far due to an IOMMU being present, it's | |
911 | * not like the existing address would have worked anyway... | |
912 | */ | |
913 | msg->address_hi = ~0U; | |
914 | msg->address_lo = ~0U; | |
915 | msg->data = ~0U; | |
916 | } else { | |
917 | msg->address_hi = upper_32_bits(msi_page->iova); | |
fdbe574e | 918 | msg->address_lo &= cookie_msi_granule(cookie) - 1; |
44bb7e24 RM |
919 | msg->address_lo += lower_32_bits(msi_page->iova); |
920 | } | |
921 | } |