]>
Commit | Line | Data |
---|---|---|
b097186f KRW |
1 | /* |
2 | * Copyright 2010 | |
3 | * by Konrad Rzeszutek Wilk <[email protected]> | |
4 | * | |
5 | * This code provides a IOMMU for Xen PV guests with PCI passthrough. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License v2.0 as published by | |
9 | * the Free Software Foundation | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * PV guests under Xen are running in an non-contiguous memory architecture. | |
17 | * | |
18 | * When PCI pass-through is utilized, this necessitates an IOMMU for | |
19 | * translating bus (DMA) to virtual and vice-versa and also providing a | |
20 | * mechanism to have contiguous pages for device drivers operations (say DMA | |
21 | * operations). | |
22 | * | |
23 | * Specifically, under Xen the Linux idea of pages is an illusion. It | |
24 | * assumes that pages start at zero and go up to the available memory. To | |
25 | * help with that, the Linux Xen MMU provides a lookup mechanism to | |
26 | * translate the page frame numbers (PFN) to machine frame numbers (MFN) | |
27 | * and vice-versa. The MFN are the "real" frame numbers. Furthermore | |
28 | * memory is not contiguous. Xen hypervisor stitches memory for guests | |
29 | * from different pools, which means there is no guarantee that PFN==MFN | |
30 | * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are | |
31 | * allocated in descending order (high to low), meaning the guest might | |
32 | * never get any MFN's under the 4GB mark. | |
33 | * | |
34 | */ | |
35 | ||
283c0972 JP |
36 | #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt |
37 | ||
2013288f | 38 | #include <linux/memblock.h> |
ea8c64ac | 39 | #include <linux/dma-direct.h> |
63c9744b | 40 | #include <linux/export.h> |
b097186f KRW |
41 | #include <xen/swiotlb-xen.h> |
42 | #include <xen/page.h> | |
43 | #include <xen/xen-ops.h> | |
f4b2f07b | 44 | #include <xen/hvc-console.h> |
2b2b614d | 45 | |
83862ccf | 46 | #include <asm/dma-mapping.h> |
1b65c4e5 | 47 | #include <asm/xen/page-coherent.h> |
e1d8f62a | 48 | |
2b2b614d | 49 | #include <trace/events/swiotlb.h> |
b097186f KRW |
50 | /* |
51 | * Used to do a quick range check in swiotlb_tbl_unmap_single and | |
52 | * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this | |
53 | * API. | |
54 | */ | |
55 | ||
4d048dbc CH |
56 | #define XEN_SWIOTLB_ERROR_CODE (~(dma_addr_t)0x0) |
57 | ||
b097186f KRW |
58 | static char *xen_io_tlb_start, *xen_io_tlb_end; |
59 | static unsigned long xen_io_tlb_nslabs; | |
60 | /* | |
61 | * Quick lookup value of the bus address of the IOTLB. | |
62 | */ | |
63 | ||
b8b0f559 | 64 | static u64 start_dma_addr; |
b097186f | 65 | |
e17b2f11 | 66 | /* |
9435cce8 | 67 | * Both of these functions should avoid XEN_PFN_PHYS because phys_addr_t |
e17b2f11 IC |
68 | * can be 32bit when dma_addr_t is 64bit leading to a loss in |
69 | * information if the shift is done before casting to 64bit. | |
70 | */ | |
6b42a7ea | 71 | static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr) |
b097186f | 72 | { |
9435cce8 JG |
73 | unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr)); |
74 | dma_addr_t dma = (dma_addr_t)bfn << XEN_PAGE_SHIFT; | |
e17b2f11 | 75 | |
9435cce8 | 76 | dma |= paddr & ~XEN_PAGE_MASK; |
e17b2f11 IC |
77 | |
78 | return dma; | |
b097186f KRW |
79 | } |
80 | ||
6b42a7ea | 81 | static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr) |
b097186f | 82 | { |
9435cce8 JG |
83 | unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr)); |
84 | dma_addr_t dma = (dma_addr_t)xen_pfn << XEN_PAGE_SHIFT; | |
e17b2f11 IC |
85 | phys_addr_t paddr = dma; |
86 | ||
9435cce8 | 87 | paddr |= baddr & ~XEN_PAGE_MASK; |
e17b2f11 IC |
88 | |
89 | return paddr; | |
b097186f KRW |
90 | } |
91 | ||
6b42a7ea | 92 | static inline dma_addr_t xen_virt_to_bus(void *address) |
b097186f KRW |
93 | { |
94 | return xen_phys_to_bus(virt_to_phys(address)); | |
95 | } | |
96 | ||
9435cce8 | 97 | static int check_pages_physically_contiguous(unsigned long xen_pfn, |
b097186f KRW |
98 | unsigned int offset, |
99 | size_t length) | |
100 | { | |
32e09870 | 101 | unsigned long next_bfn; |
b097186f KRW |
102 | int i; |
103 | int nr_pages; | |
104 | ||
9435cce8 JG |
105 | next_bfn = pfn_to_bfn(xen_pfn); |
106 | nr_pages = (offset + length + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT; | |
b097186f KRW |
107 | |
108 | for (i = 1; i < nr_pages; i++) { | |
9435cce8 | 109 | if (pfn_to_bfn(++xen_pfn) != ++next_bfn) |
b097186f KRW |
110 | return 0; |
111 | } | |
112 | return 1; | |
113 | } | |
114 | ||
6b42a7ea | 115 | static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) |
b097186f | 116 | { |
9435cce8 JG |
117 | unsigned long xen_pfn = XEN_PFN_DOWN(p); |
118 | unsigned int offset = p & ~XEN_PAGE_MASK; | |
b097186f | 119 | |
9435cce8 | 120 | if (offset + size <= XEN_PAGE_SIZE) |
b097186f | 121 | return 0; |
9435cce8 | 122 | if (check_pages_physically_contiguous(xen_pfn, offset, size)) |
b097186f KRW |
123 | return 0; |
124 | return 1; | |
125 | } | |
126 | ||
127 | static int is_xen_swiotlb_buffer(dma_addr_t dma_addr) | |
128 | { | |
9435cce8 JG |
129 | unsigned long bfn = XEN_PFN_DOWN(dma_addr); |
130 | unsigned long xen_pfn = bfn_to_local_pfn(bfn); | |
131 | phys_addr_t paddr = XEN_PFN_PHYS(xen_pfn); | |
b097186f KRW |
132 | |
133 | /* If the address is outside our domain, it CAN | |
134 | * have the same virtual address as another address | |
135 | * in our domain. Therefore _only_ check address within our domain. | |
136 | */ | |
9435cce8 | 137 | if (pfn_valid(PFN_DOWN(paddr))) { |
b097186f KRW |
138 | return paddr >= virt_to_phys(xen_io_tlb_start) && |
139 | paddr < virt_to_phys(xen_io_tlb_end); | |
140 | } | |
141 | return 0; | |
142 | } | |
143 | ||
144 | static int max_dma_bits = 32; | |
145 | ||
146 | static int | |
147 | xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs) | |
148 | { | |
149 | int i, rc; | |
150 | int dma_bits; | |
69908907 | 151 | dma_addr_t dma_handle; |
1b65c4e5 | 152 | phys_addr_t p = virt_to_phys(buf); |
b097186f KRW |
153 | |
154 | dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT; | |
155 | ||
156 | i = 0; | |
157 | do { | |
158 | int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE); | |
159 | ||
160 | do { | |
161 | rc = xen_create_contiguous_region( | |
1b65c4e5 | 162 | p + (i << IO_TLB_SHIFT), |
b097186f | 163 | get_order(slabs << IO_TLB_SHIFT), |
69908907 | 164 | dma_bits, &dma_handle); |
b097186f KRW |
165 | } while (rc && dma_bits++ < max_dma_bits); |
166 | if (rc) | |
167 | return rc; | |
168 | ||
169 | i += slabs; | |
170 | } while (i < nslabs); | |
171 | return 0; | |
172 | } | |
1cef36a5 KRW |
173 | static unsigned long xen_set_nslabs(unsigned long nr_tbl) |
174 | { | |
175 | if (!nr_tbl) { | |
176 | xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT); | |
177 | xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE); | |
178 | } else | |
179 | xen_io_tlb_nslabs = nr_tbl; | |
b097186f | 180 | |
1cef36a5 KRW |
181 | return xen_io_tlb_nslabs << IO_TLB_SHIFT; |
182 | } | |
b097186f | 183 | |
5bab7864 KRW |
184 | enum xen_swiotlb_err { |
185 | XEN_SWIOTLB_UNKNOWN = 0, | |
186 | XEN_SWIOTLB_ENOMEM, | |
187 | XEN_SWIOTLB_EFIXUP | |
188 | }; | |
189 | ||
190 | static const char *xen_swiotlb_error(enum xen_swiotlb_err err) | |
191 | { | |
192 | switch (err) { | |
193 | case XEN_SWIOTLB_ENOMEM: | |
194 | return "Cannot allocate Xen-SWIOTLB buffer\n"; | |
195 | case XEN_SWIOTLB_EFIXUP: | |
196 | return "Failed to get contiguous memory for DMA from Xen!\n"\ | |
197 | "You either: don't have the permissions, do not have"\ | |
198 | " enough free memory under 4GB, or the hypervisor memory"\ | |
199 | " is too fragmented!"; | |
200 | default: | |
201 | break; | |
202 | } | |
203 | return ""; | |
204 | } | |
b8277600 | 205 | int __ref xen_swiotlb_init(int verbose, bool early) |
b097186f | 206 | { |
b8277600 | 207 | unsigned long bytes, order; |
f4b2f07b | 208 | int rc = -ENOMEM; |
5bab7864 | 209 | enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN; |
f4b2f07b | 210 | unsigned int repeat = 3; |
5f98ecdb | 211 | |
1cef36a5 | 212 | xen_io_tlb_nslabs = swiotlb_nr_tbl(); |
f4b2f07b | 213 | retry: |
1cef36a5 | 214 | bytes = xen_set_nslabs(xen_io_tlb_nslabs); |
b8277600 | 215 | order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT); |
b097186f KRW |
216 | /* |
217 | * Get IO TLB memory from any location. | |
218 | */ | |
b8277600 | 219 | if (early) |
15c3c114 MR |
220 | xen_io_tlb_start = memblock_alloc(PAGE_ALIGN(bytes), |
221 | PAGE_SIZE); | |
b8277600 KRW |
222 | else { |
223 | #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) | |
224 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) | |
225 | while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { | |
8746515d | 226 | xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order); |
b8277600 KRW |
227 | if (xen_io_tlb_start) |
228 | break; | |
229 | order--; | |
230 | } | |
231 | if (order != get_order(bytes)) { | |
283c0972 JP |
232 | pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n", |
233 | (PAGE_SIZE << order) >> 20); | |
b8277600 KRW |
234 | xen_io_tlb_nslabs = SLABS_PER_PAGE << order; |
235 | bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT; | |
236 | } | |
237 | } | |
f4b2f07b | 238 | if (!xen_io_tlb_start) { |
5bab7864 | 239 | m_ret = XEN_SWIOTLB_ENOMEM; |
f4b2f07b KRW |
240 | goto error; |
241 | } | |
b097186f KRW |
242 | xen_io_tlb_end = xen_io_tlb_start + bytes; |
243 | /* | |
244 | * And replace that memory with pages under 4GB. | |
245 | */ | |
246 | rc = xen_swiotlb_fixup(xen_io_tlb_start, | |
247 | bytes, | |
248 | xen_io_tlb_nslabs); | |
f4b2f07b | 249 | if (rc) { |
b8277600 | 250 | if (early) |
2013288f MR |
251 | memblock_free(__pa(xen_io_tlb_start), |
252 | PAGE_ALIGN(bytes)); | |
b8277600 KRW |
253 | else { |
254 | free_pages((unsigned long)xen_io_tlb_start, order); | |
255 | xen_io_tlb_start = NULL; | |
256 | } | |
5bab7864 | 257 | m_ret = XEN_SWIOTLB_EFIXUP; |
b097186f | 258 | goto error; |
f4b2f07b | 259 | } |
b097186f | 260 | start_dma_addr = xen_virt_to_bus(xen_io_tlb_start); |
c468bdee | 261 | if (early) { |
ac2cbab2 YL |
262 | if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, |
263 | verbose)) | |
264 | panic("Cannot allocate SWIOTLB buffer"); | |
c468bdee KRW |
265 | rc = 0; |
266 | } else | |
b8277600 | 267 | rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs); |
7453c549 KRW |
268 | |
269 | if (!rc) | |
270 | swiotlb_set_max_segment(PAGE_SIZE); | |
271 | ||
b8277600 | 272 | return rc; |
b097186f | 273 | error: |
f4b2f07b KRW |
274 | if (repeat--) { |
275 | xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */ | |
276 | (xen_io_tlb_nslabs >> 1)); | |
283c0972 JP |
277 | pr_info("Lowering to %luMB\n", |
278 | (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20); | |
f4b2f07b KRW |
279 | goto retry; |
280 | } | |
283c0972 | 281 | pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc); |
b8277600 KRW |
282 | if (early) |
283 | panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc); | |
284 | else | |
285 | free_pages((unsigned long)xen_io_tlb_start, order); | |
286 | return rc; | |
b097186f | 287 | } |
dceb1a68 CH |
288 | |
289 | static void * | |
b097186f | 290 | xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
baa676fc | 291 | dma_addr_t *dma_handle, gfp_t flags, |
00085f1e | 292 | unsigned long attrs) |
b097186f KRW |
293 | { |
294 | void *ret; | |
295 | int order = get_order(size); | |
296 | u64 dma_mask = DMA_BIT_MASK(32); | |
6810df88 KRW |
297 | phys_addr_t phys; |
298 | dma_addr_t dev_addr; | |
b097186f KRW |
299 | |
300 | /* | |
301 | * Ignore region specifiers - the kernel's ideas of | |
302 | * pseudo-phys memory layout has nothing to do with the | |
303 | * machine physical layout. We can't allocate highmem | |
304 | * because we can't return a pointer to it. | |
305 | */ | |
306 | flags &= ~(__GFP_DMA | __GFP_HIGHMEM); | |
307 | ||
7250f422 JJ |
308 | /* Convert the size to actually allocated. */ |
309 | size = 1UL << (order + XEN_PAGE_SHIFT); | |
310 | ||
1b65c4e5 SS |
311 | /* On ARM this function returns an ioremap'ped virtual address for |
312 | * which virt_to_phys doesn't return the corresponding physical | |
313 | * address. In fact on ARM virt_to_phys only works for kernel direct | |
314 | * mapped RAM memory. Also see comment below. | |
315 | */ | |
316 | ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs); | |
b097186f | 317 | |
6810df88 KRW |
318 | if (!ret) |
319 | return ret; | |
320 | ||
b097186f | 321 | if (hwdev && hwdev->coherent_dma_mask) |
038d07a2 | 322 | dma_mask = hwdev->coherent_dma_mask; |
b097186f | 323 | |
1b65c4e5 SS |
324 | /* At this point dma_handle is the physical address, next we are |
325 | * going to set it to the machine address. | |
326 | * Do not use virt_to_phys(ret) because on ARM it doesn't correspond | |
327 | * to *dma_handle. */ | |
328 | phys = *dma_handle; | |
6810df88 KRW |
329 | dev_addr = xen_phys_to_bus(phys); |
330 | if (((dev_addr + size - 1 <= dma_mask)) && | |
331 | !range_straddles_page_boundary(phys, size)) | |
332 | *dma_handle = dev_addr; | |
333 | else { | |
1b65c4e5 | 334 | if (xen_create_contiguous_region(phys, order, |
69908907 | 335 | fls64(dma_mask), dma_handle) != 0) { |
1b65c4e5 | 336 | xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs); |
b097186f KRW |
337 | return NULL; |
338 | } | |
b097186f | 339 | } |
6810df88 | 340 | memset(ret, 0, size); |
b097186f KRW |
341 | return ret; |
342 | } | |
b097186f | 343 | |
dceb1a68 | 344 | static void |
b097186f | 345 | xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, |
00085f1e | 346 | dma_addr_t dev_addr, unsigned long attrs) |
b097186f KRW |
347 | { |
348 | int order = get_order(size); | |
6810df88 KRW |
349 | phys_addr_t phys; |
350 | u64 dma_mask = DMA_BIT_MASK(32); | |
b097186f | 351 | |
6810df88 KRW |
352 | if (hwdev && hwdev->coherent_dma_mask) |
353 | dma_mask = hwdev->coherent_dma_mask; | |
354 | ||
1b65c4e5 SS |
355 | /* do not use virt_to_phys because on ARM it doesn't return you the |
356 | * physical address */ | |
357 | phys = xen_bus_to_phys(dev_addr); | |
6810df88 | 358 | |
7250f422 JJ |
359 | /* Convert the size to actually allocated. */ |
360 | size = 1UL << (order + XEN_PAGE_SHIFT); | |
361 | ||
4855c92d | 362 | if (((dev_addr + size - 1 <= dma_mask)) || |
6810df88 | 363 | range_straddles_page_boundary(phys, size)) |
1b65c4e5 | 364 | xen_destroy_contiguous_region(phys, order); |
6810df88 | 365 | |
1b65c4e5 | 366 | xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); |
b097186f | 367 | } |
b097186f KRW |
368 | |
369 | /* | |
370 | * Map a single buffer of the indicated size for DMA in streaming mode. The | |
371 | * physical address to use is returned. | |
372 | * | |
373 | * Once the device is given the dma address, the device owns this memory until | |
374 | * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed. | |
375 | */ | |
dceb1a68 | 376 | static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, |
b097186f KRW |
377 | unsigned long offset, size_t size, |
378 | enum dma_data_direction dir, | |
00085f1e | 379 | unsigned long attrs) |
b097186f | 380 | { |
e05ed4d1 | 381 | phys_addr_t map, phys = page_to_phys(page) + offset; |
b097186f | 382 | dma_addr_t dev_addr = xen_phys_to_bus(phys); |
b097186f KRW |
383 | |
384 | BUG_ON(dir == DMA_NONE); | |
385 | /* | |
386 | * If the address happens to be in the device's DMA window, | |
387 | * we can safely return the device addr and not worry about bounce | |
388 | * buffering it. | |
389 | */ | |
390 | if (dma_capable(dev, dev_addr, size) && | |
a4dba130 | 391 | !range_straddles_page_boundary(phys, size) && |
291be10f | 392 | !xen_arch_need_swiotlb(dev, phys, dev_addr) && |
ae7871be | 393 | (swiotlb_force != SWIOTLB_FORCE)) { |
6cf05463 SS |
394 | /* we are not interested in the dma_addr returned by |
395 | * xen_dma_map_page, only in the potential cache flushes executed | |
396 | * by the function. */ | |
a0f2dee0 | 397 | xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs); |
b097186f | 398 | return dev_addr; |
6cf05463 | 399 | } |
b097186f KRW |
400 | |
401 | /* | |
402 | * Oh well, have to allocate and map a bounce buffer. | |
403 | */ | |
2b2b614d ZK |
404 | trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); |
405 | ||
0443fa00 AD |
406 | map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir, |
407 | attrs); | |
e05ed4d1 | 408 | if (map == SWIOTLB_MAP_ERROR) |
4d048dbc | 409 | return XEN_SWIOTLB_ERROR_CODE; |
b097186f | 410 | |
f1225ee4 | 411 | dev_addr = xen_phys_to_bus(map); |
6cf05463 | 412 | xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), |
a0f2dee0 | 413 | dev_addr, map & ~PAGE_MASK, size, dir, attrs); |
b097186f KRW |
414 | |
415 | /* | |
416 | * Ensure that the address returned is DMA'ble | |
417 | */ | |
76418421 AD |
418 | if (dma_capable(dev, dev_addr, size)) |
419 | return dev_addr; | |
420 | ||
d29fa0cb AD |
421 | attrs |= DMA_ATTR_SKIP_CPU_SYNC; |
422 | swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); | |
76418421 | 423 | |
4d048dbc | 424 | return XEN_SWIOTLB_ERROR_CODE; |
b097186f | 425 | } |
b097186f KRW |
426 | |
427 | /* | |
428 | * Unmap a single streaming mode DMA translation. The dma_addr and size must | |
429 | * match what was provided for in a previous xen_swiotlb_map_page call. All | |
430 | * other usages are undefined. | |
431 | * | |
432 | * After this call, reads by the cpu to the buffer are guaranteed to see | |
433 | * whatever the device wrote there. | |
434 | */ | |
435 | static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, | |
6cf05463 | 436 | size_t size, enum dma_data_direction dir, |
00085f1e | 437 | unsigned long attrs) |
b097186f KRW |
438 | { |
439 | phys_addr_t paddr = xen_bus_to_phys(dev_addr); | |
440 | ||
441 | BUG_ON(dir == DMA_NONE); | |
442 | ||
d6883e6f | 443 | xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs); |
6cf05463 | 444 | |
b097186f KRW |
445 | /* NOTE: We use dev_addr here, not paddr! */ |
446 | if (is_xen_swiotlb_buffer(dev_addr)) { | |
0443fa00 | 447 | swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs); |
b097186f KRW |
448 | return; |
449 | } | |
450 | ||
451 | if (dir != DMA_FROM_DEVICE) | |
452 | return; | |
453 | ||
454 | /* | |
455 | * phys_to_virt doesn't work with hihgmem page but we could | |
456 | * call dma_mark_clean() with hihgmem page here. However, we | |
457 | * are fine since dma_mark_clean() is null on POWERPC. We can | |
458 | * make dma_mark_clean() take a physical address if necessary. | |
459 | */ | |
460 | dma_mark_clean(phys_to_virt(paddr), size); | |
461 | } | |
462 | ||
dceb1a68 | 463 | static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, |
b097186f | 464 | size_t size, enum dma_data_direction dir, |
00085f1e | 465 | unsigned long attrs) |
b097186f | 466 | { |
6cf05463 | 467 | xen_unmap_single(hwdev, dev_addr, size, dir, attrs); |
b097186f | 468 | } |
b097186f KRW |
469 | |
470 | /* | |
471 | * Make physical memory consistent for a single streaming mode DMA translation | |
472 | * after a transfer. | |
473 | * | |
474 | * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer | |
475 | * using the cpu, yet do not wish to teardown the dma mapping, you must | |
476 | * call this function before doing so. At the next point you give the dma | |
477 | * address back to the card, you must first perform a | |
478 | * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer | |
479 | */ | |
480 | static void | |
481 | xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, | |
482 | size_t size, enum dma_data_direction dir, | |
483 | enum dma_sync_target target) | |
484 | { | |
485 | phys_addr_t paddr = xen_bus_to_phys(dev_addr); | |
486 | ||
487 | BUG_ON(dir == DMA_NONE); | |
488 | ||
6cf05463 | 489 | if (target == SYNC_FOR_CPU) |
d6883e6f | 490 | xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir); |
6cf05463 | 491 | |
b097186f | 492 | /* NOTE: We use dev_addr here, not paddr! */ |
6cf05463 | 493 | if (is_xen_swiotlb_buffer(dev_addr)) |
fbfda893 | 494 | swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); |
6cf05463 SS |
495 | |
496 | if (target == SYNC_FOR_DEVICE) | |
9490c6c6 | 497 | xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir); |
b097186f KRW |
498 | |
499 | if (dir != DMA_FROM_DEVICE) | |
500 | return; | |
501 | ||
502 | dma_mark_clean(phys_to_virt(paddr), size); | |
503 | } | |
504 | ||
505 | void | |
506 | xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, | |
507 | size_t size, enum dma_data_direction dir) | |
508 | { | |
509 | xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); | |
510 | } | |
b097186f KRW |
511 | |
512 | void | |
513 | xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, | |
514 | size_t size, enum dma_data_direction dir) | |
515 | { | |
516 | xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); | |
517 | } | |
dceb1a68 CH |
518 | |
519 | /* | |
520 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules | |
521 | * concerning calls here are the same as for swiotlb_unmap_page() above. | |
522 | */ | |
523 | static void | |
524 | xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | |
525 | int nelems, enum dma_data_direction dir, | |
526 | unsigned long attrs) | |
527 | { | |
528 | struct scatterlist *sg; | |
529 | int i; | |
530 | ||
531 | BUG_ON(dir == DMA_NONE); | |
532 | ||
533 | for_each_sg(sgl, sg, nelems, i) | |
534 | xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs); | |
535 | ||
536 | } | |
b097186f KRW |
537 | |
538 | /* | |
539 | * Map a set of buffers described by scatterlist in streaming mode for DMA. | |
540 | * This is the scatter-gather version of the above xen_swiotlb_map_page | |
541 | * interface. Here the scatter gather list elements are each tagged with the | |
542 | * appropriate dma address and length. They are obtained via | |
543 | * sg_dma_{address,length}(SG). | |
544 | * | |
545 | * NOTE: An implementation may be able to use a smaller number of | |
546 | * DMA address/length pairs than there are SG table elements. | |
547 | * (for example via virtual mapping capabilities) | |
548 | * The routine returns the number of addr/length pairs actually | |
549 | * used, at most nents. | |
550 | * | |
551 | * Device ownership issues as mentioned above for xen_swiotlb_map_page are the | |
552 | * same here. | |
553 | */ | |
dceb1a68 | 554 | static int |
b097186f KRW |
555 | xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
556 | int nelems, enum dma_data_direction dir, | |
00085f1e | 557 | unsigned long attrs) |
b097186f KRW |
558 | { |
559 | struct scatterlist *sg; | |
560 | int i; | |
561 | ||
562 | BUG_ON(dir == DMA_NONE); | |
563 | ||
564 | for_each_sg(sgl, sg, nelems, i) { | |
565 | phys_addr_t paddr = sg_phys(sg); | |
566 | dma_addr_t dev_addr = xen_phys_to_bus(paddr); | |
567 | ||
ae7871be | 568 | if (swiotlb_force == SWIOTLB_FORCE || |
291be10f | 569 | xen_arch_need_swiotlb(hwdev, paddr, dev_addr) || |
b097186f KRW |
570 | !dma_capable(hwdev, dev_addr, sg->length) || |
571 | range_straddles_page_boundary(paddr, sg->length)) { | |
e05ed4d1 AD |
572 | phys_addr_t map = swiotlb_tbl_map_single(hwdev, |
573 | start_dma_addr, | |
574 | sg_phys(sg), | |
575 | sg->length, | |
0443fa00 | 576 | dir, attrs); |
e05ed4d1 | 577 | if (map == SWIOTLB_MAP_ERROR) { |
783d0281 | 578 | dev_warn(hwdev, "swiotlb buffer is full\n"); |
b097186f KRW |
579 | /* Don't panic here, we expect map_sg users |
580 | to do proper error handling. */ | |
0443fa00 | 581 | attrs |= DMA_ATTR_SKIP_CPU_SYNC; |
b097186f KRW |
582 | xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, |
583 | attrs); | |
781575cd | 584 | sg_dma_len(sgl) = 0; |
15177608 | 585 | return 0; |
b097186f | 586 | } |
f1225ee4 | 587 | dev_addr = xen_phys_to_bus(map); |
71bfae90 | 588 | xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT), |
a0f2dee0 | 589 | dev_addr, |
71bfae90 SS |
590 | map & ~PAGE_MASK, |
591 | sg->length, | |
592 | dir, | |
593 | attrs); | |
f1225ee4 | 594 | sg->dma_address = dev_addr; |
6cf05463 SS |
595 | } else { |
596 | /* we are not interested in the dma_addr returned by | |
597 | * xen_dma_map_page, only in the potential cache flushes executed | |
598 | * by the function. */ | |
599 | xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT), | |
a0f2dee0 | 600 | dev_addr, |
6cf05463 SS |
601 | paddr & ~PAGE_MASK, |
602 | sg->length, | |
603 | dir, | |
604 | attrs); | |
b097186f | 605 | sg->dma_address = dev_addr; |
6cf05463 | 606 | } |
781575cd | 607 | sg_dma_len(sg) = sg->length; |
b097186f KRW |
608 | } |
609 | return nelems; | |
610 | } | |
b097186f | 611 | |
b097186f KRW |
612 | /* |
613 | * Make physical memory consistent for a set of streaming mode DMA translations | |
614 | * after a transfer. | |
615 | * | |
616 | * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules | |
617 | * and usage. | |
618 | */ | |
619 | static void | |
620 | xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, | |
621 | int nelems, enum dma_data_direction dir, | |
622 | enum dma_sync_target target) | |
623 | { | |
624 | struct scatterlist *sg; | |
625 | int i; | |
626 | ||
627 | for_each_sg(sgl, sg, nelems, i) | |
628 | xen_swiotlb_sync_single(hwdev, sg->dma_address, | |
781575cd | 629 | sg_dma_len(sg), dir, target); |
b097186f KRW |
630 | } |
631 | ||
dceb1a68 | 632 | static void |
b097186f KRW |
633 | xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, |
634 | int nelems, enum dma_data_direction dir) | |
635 | { | |
636 | xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); | |
637 | } | |
b097186f | 638 | |
dceb1a68 | 639 | static void |
b097186f KRW |
640 | xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, |
641 | int nelems, enum dma_data_direction dir) | |
642 | { | |
643 | xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); | |
644 | } | |
b097186f | 645 | |
b097186f KRW |
646 | /* |
647 | * Return whether the given device DMA address mask can be supported | |
648 | * properly. For example, if your device can only drive the low 24-bits | |
649 | * during bus mastering, then you would pass 0x00ffffff as the mask to | |
650 | * this function. | |
651 | */ | |
dceb1a68 | 652 | static int |
b097186f KRW |
653 | xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) |
654 | { | |
655 | return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask; | |
656 | } | |
eb1ddc00 | 657 | |
7e91c7df SS |
658 | /* |
659 | * Create userspace mapping for the DMA-coherent memory. | |
660 | * This function should be called with the pages from the current domain only, | |
661 | * passing pages mapped from other domains would lead to memory corruption. | |
662 | */ | |
dceb1a68 | 663 | static int |
7e91c7df SS |
664 | xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
665 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
666 | unsigned long attrs) | |
667 | { | |
668 | #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) | |
d5ff5061 SS |
669 | if (xen_get_dma_ops(dev)->mmap) |
670 | return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr, | |
7e91c7df SS |
671 | dma_addr, size, attrs); |
672 | #endif | |
58b04406 | 673 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); |
7e91c7df | 674 | } |
69369f52 AA |
675 | |
676 | /* | |
677 | * This function should be called with the pages from the current domain only, | |
678 | * passing pages mapped from other domains would lead to memory corruption. | |
679 | */ | |
dceb1a68 | 680 | static int |
69369f52 AA |
681 | xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, |
682 | void *cpu_addr, dma_addr_t handle, size_t size, | |
683 | unsigned long attrs) | |
684 | { | |
685 | #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) | |
d5ff5061 | 686 | if (xen_get_dma_ops(dev)->get_sgtable) { |
69369f52 AA |
687 | #if 0 |
688 | /* | |
689 | * This check verifies that the page belongs to the current domain and | |
690 | * is not one mapped from another domain. | |
691 | * This check is for debug only, and should not go to production build | |
692 | */ | |
693 | unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle)); | |
694 | BUG_ON (!page_is_ram(bfn)); | |
695 | #endif | |
d5ff5061 | 696 | return xen_get_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr, |
69369f52 AA |
697 | handle, size, attrs); |
698 | } | |
699 | #endif | |
9406a49f | 700 | return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size, attrs); |
69369f52 | 701 | } |
dceb1a68 | 702 | |
4d048dbc CH |
703 | static int xen_swiotlb_mapping_error(struct device *dev, dma_addr_t dma_addr) |
704 | { | |
705 | return dma_addr == XEN_SWIOTLB_ERROR_CODE; | |
706 | } | |
707 | ||
dceb1a68 CH |
708 | const struct dma_map_ops xen_swiotlb_dma_ops = { |
709 | .alloc = xen_swiotlb_alloc_coherent, | |
710 | .free = xen_swiotlb_free_coherent, | |
711 | .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, | |
712 | .sync_single_for_device = xen_swiotlb_sync_single_for_device, | |
713 | .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu, | |
714 | .sync_sg_for_device = xen_swiotlb_sync_sg_for_device, | |
715 | .map_sg = xen_swiotlb_map_sg_attrs, | |
716 | .unmap_sg = xen_swiotlb_unmap_sg_attrs, | |
717 | .map_page = xen_swiotlb_map_page, | |
718 | .unmap_page = xen_swiotlb_unmap_page, | |
719 | .dma_supported = xen_swiotlb_dma_supported, | |
dceb1a68 CH |
720 | .mmap = xen_swiotlb_dma_mmap, |
721 | .get_sgtable = xen_swiotlb_get_sgtable, | |
4d048dbc | 722 | .mapping_error = xen_swiotlb_mapping_error, |
dceb1a68 | 723 | }; |