1 // SPDX-License-Identifier: GPL-2.0-only
6 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
8 * PV guests under Xen are running in an non-contiguous memory architecture.
10 * When PCI pass-through is utilized, this necessitates an IOMMU for
11 * translating bus (DMA) to virtual and vice-versa and also providing a
12 * mechanism to have contiguous pages for device drivers operations (say DMA
15 * Specifically, under Xen the Linux idea of pages is an illusion. It
16 * assumes that pages start at zero and go up to the available memory. To
17 * help with that, the Linux Xen MMU provides a lookup mechanism to
18 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
19 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
20 * memory is not contiguous. Xen hypervisor stitches memory for guests
21 * from different pools, which means there is no guarantee that PFN==MFN
22 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
23 * allocated in descending order (high to low), meaning the guest might
24 * never get any MFN's under the 4GB mark.
27 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
29 #include <linux/memblock.h>
30 #include <linux/dma-direct.h>
31 #include <linux/dma-map-ops.h>
32 #include <linux/export.h>
33 #include <xen/swiotlb-xen.h>
35 #include <xen/xen-ops.h>
36 #include <xen/hvc-console.h>
38 #include <asm/dma-mapping.h>
39 #include <asm/xen/page-coherent.h>
41 #include <trace/events/swiotlb.h>
42 #define MAX_DMA_BITS 32
45 * Quick lookup value of the bus address of the IOTLB.
48 static inline phys_addr_t xen_phys_to_bus(struct device *dev, phys_addr_t paddr)
50 unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
51 phys_addr_t baddr = (phys_addr_t)bfn << XEN_PAGE_SHIFT;
53 baddr |= paddr & ~XEN_PAGE_MASK;
57 static inline dma_addr_t xen_phys_to_dma(struct device *dev, phys_addr_t paddr)
59 return phys_to_dma(dev, xen_phys_to_bus(dev, paddr));
62 static inline phys_addr_t xen_bus_to_phys(struct device *dev,
65 unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
66 phys_addr_t paddr = (xen_pfn << XEN_PAGE_SHIFT) |
67 (baddr & ~XEN_PAGE_MASK);
72 static inline phys_addr_t xen_dma_to_phys(struct device *dev,
75 return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
78 static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
80 unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
81 unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
83 next_bfn = pfn_to_bfn(xen_pfn);
85 for (i = 1; i < nr_pages; i++)
86 if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
92 static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
94 unsigned long bfn = XEN_PFN_DOWN(dma_to_phys(dev, dma_addr));
95 unsigned long xen_pfn = bfn_to_local_pfn(bfn);
96 phys_addr_t paddr = (phys_addr_t)xen_pfn << XEN_PAGE_SHIFT;
98 /* If the address is outside our domain, it CAN
99 * have the same virtual address as another address
100 * in our domain. Therefore _only_ check address within our domain.
102 if (pfn_valid(PFN_DOWN(paddr)))
103 return is_swiotlb_buffer(paddr);
107 static int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
111 dma_addr_t dma_handle;
112 phys_addr_t p = virt_to_phys(buf);
114 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
118 int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
121 rc = xen_create_contiguous_region(
122 p + (i << IO_TLB_SHIFT),
123 get_order(slabs << IO_TLB_SHIFT),
124 dma_bits, &dma_handle);
125 } while (rc && dma_bits++ < MAX_DMA_BITS);
130 } while (i < nslabs);
134 enum xen_swiotlb_err {
135 XEN_SWIOTLB_UNKNOWN = 0,
140 static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
143 case XEN_SWIOTLB_ENOMEM:
144 return "Cannot allocate Xen-SWIOTLB buffer\n";
145 case XEN_SWIOTLB_EFIXUP:
146 return "Failed to get contiguous memory for DMA from Xen!\n"\
147 "You either: don't have the permissions, do not have"\
148 " enough free memory under 4GB, or the hypervisor memory"\
149 " is too fragmented!";
156 #define DEFAULT_NSLABS ALIGN(SZ_64M >> IO_TLB_SHIFT, IO_TLB_SEGSIZE)
158 int __ref xen_swiotlb_init(void)
160 enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
161 unsigned long bytes = swiotlb_size_or_default();
162 unsigned long nslabs = bytes >> IO_TLB_SHIFT;
163 unsigned int order, repeat = 3;
168 m_ret = XEN_SWIOTLB_ENOMEM;
169 order = get_order(bytes);
172 * Get IO TLB memory from any location.
174 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
175 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
176 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
177 start = (void *)xen_get_swiotlb_free_pages(order);
184 if (order != get_order(bytes)) {
185 pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
186 (PAGE_SIZE << order) >> 20);
187 nslabs = SLABS_PER_PAGE << order;
188 bytes = nslabs << IO_TLB_SHIFT;
192 * And replace that memory with pages under 4GB.
194 rc = xen_swiotlb_fixup(start, nslabs);
196 free_pages((unsigned long)start, order);
197 m_ret = XEN_SWIOTLB_EFIXUP;
200 rc = swiotlb_late_init_with_tbl(start, nslabs);
203 swiotlb_set_max_segment(PAGE_SIZE);
208 nslabs = max(1024UL, (nslabs >> 1));
209 pr_info("Lowering to %luMB\n",
210 (nslabs << IO_TLB_SHIFT) >> 20);
213 pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
214 free_pages((unsigned long)start, order);
219 void __init xen_swiotlb_init_early(void)
221 unsigned long bytes = swiotlb_size_or_default();
222 unsigned long nslabs = bytes >> IO_TLB_SHIFT;
223 unsigned int repeat = 3;
229 * Get IO TLB memory from any location.
231 start = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);
233 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
234 __func__, PAGE_ALIGN(bytes), PAGE_SIZE);
237 * And replace that memory with pages under 4GB.
239 rc = xen_swiotlb_fixup(start, nslabs);
241 memblock_free(__pa(start), PAGE_ALIGN(bytes));
244 nslabs = max(1024UL, (nslabs >> 1));
245 bytes = nslabs << IO_TLB_SHIFT;
246 pr_info("Lowering to %luMB\n", bytes >> 20);
249 panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc);
252 if (swiotlb_init_with_tbl(start, nslabs, false))
253 panic("Cannot allocate SWIOTLB buffer");
254 swiotlb_set_max_segment(PAGE_SIZE);
256 #endif /* CONFIG_X86 */
259 xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
260 dma_addr_t *dma_handle, gfp_t flags,
264 int order = get_order(size);
265 u64 dma_mask = DMA_BIT_MASK(32);
270 * Ignore region specifiers - the kernel's ideas of
271 * pseudo-phys memory layout has nothing to do with the
272 * machine physical layout. We can't allocate highmem
273 * because we can't return a pointer to it.
275 flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
277 /* Convert the size to actually allocated. */
278 size = 1UL << (order + XEN_PAGE_SHIFT);
280 /* On ARM this function returns an ioremap'ped virtual address for
281 * which virt_to_phys doesn't return the corresponding physical
282 * address. In fact on ARM virt_to_phys only works for kernel direct
283 * mapped RAM memory. Also see comment below.
285 ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
290 if (hwdev && hwdev->coherent_dma_mask)
291 dma_mask = hwdev->coherent_dma_mask;
293 /* At this point dma_handle is the dma address, next we are
294 * going to set it to the machine address.
295 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
297 phys = dma_to_phys(hwdev, *dma_handle);
298 dev_addr = xen_phys_to_dma(hwdev, phys);
299 if (((dev_addr + size - 1 <= dma_mask)) &&
300 !range_straddles_page_boundary(phys, size))
301 *dma_handle = dev_addr;
303 if (xen_create_contiguous_region(phys, order,
304 fls64(dma_mask), dma_handle) != 0) {
305 xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
308 *dma_handle = phys_to_dma(hwdev, *dma_handle);
309 SetPageXenRemapped(virt_to_page(ret));
311 memset(ret, 0, size);
316 xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
317 dma_addr_t dev_addr, unsigned long attrs)
319 int order = get_order(size);
321 u64 dma_mask = DMA_BIT_MASK(32);
324 if (hwdev && hwdev->coherent_dma_mask)
325 dma_mask = hwdev->coherent_dma_mask;
327 /* do not use virt_to_phys because on ARM it doesn't return you the
328 * physical address */
329 phys = xen_dma_to_phys(hwdev, dev_addr);
331 /* Convert the size to actually allocated. */
332 size = 1UL << (order + XEN_PAGE_SHIFT);
334 if (is_vmalloc_addr(vaddr))
335 page = vmalloc_to_page(vaddr);
337 page = virt_to_page(vaddr);
339 if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
340 range_straddles_page_boundary(phys, size)) &&
341 TestClearPageXenRemapped(page))
342 xen_destroy_contiguous_region(phys, order);
344 xen_free_coherent_pages(hwdev, size, vaddr, phys_to_dma(hwdev, phys),
349 * Map a single buffer of the indicated size for DMA in streaming mode. The
350 * physical address to use is returned.
352 * Once the device is given the dma address, the device owns this memory until
353 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
355 static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
356 unsigned long offset, size_t size,
357 enum dma_data_direction dir,
360 phys_addr_t map, phys = page_to_phys(page) + offset;
361 dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
363 BUG_ON(dir == DMA_NONE);
365 * If the address happens to be in the device's DMA window,
366 * we can safely return the device addr and not worry about bounce
369 if (dma_capable(dev, dev_addr, size, true) &&
370 !range_straddles_page_boundary(phys, size) &&
371 !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
372 swiotlb_force != SWIOTLB_FORCE)
376 * Oh well, have to allocate and map a bounce buffer.
378 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
380 map = swiotlb_tbl_map_single(dev, phys, size, size, dir, attrs);
381 if (map == (phys_addr_t)DMA_MAPPING_ERROR)
382 return DMA_MAPPING_ERROR;
385 dev_addr = xen_phys_to_dma(dev, map);
388 * Ensure that the address returned is DMA'ble
390 if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
391 swiotlb_tbl_unmap_single(dev, map, size, dir,
392 attrs | DMA_ATTR_SKIP_CPU_SYNC);
393 return DMA_MAPPING_ERROR;
397 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
398 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr))))
399 arch_sync_dma_for_device(phys, size, dir);
401 xen_dma_sync_for_device(dev, dev_addr, size, dir);
407 * Unmap a single streaming mode DMA translation. The dma_addr and size must
408 * match what was provided for in a previous xen_swiotlb_map_page call. All
409 * other usages are undefined.
411 * After this call, reads by the cpu to the buffer are guaranteed to see
412 * whatever the device wrote there.
414 static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
415 size_t size, enum dma_data_direction dir, unsigned long attrs)
417 phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
419 BUG_ON(dir == DMA_NONE);
421 if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
422 if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr))))
423 arch_sync_dma_for_cpu(paddr, size, dir);
425 xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir);
428 /* NOTE: We use dev_addr here, not paddr! */
429 if (is_xen_swiotlb_buffer(hwdev, dev_addr))
430 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
434 xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
435 size_t size, enum dma_data_direction dir)
437 phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
439 if (!dev_is_dma_coherent(dev)) {
440 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
441 arch_sync_dma_for_cpu(paddr, size, dir);
443 xen_dma_sync_for_cpu(dev, dma_addr, size, dir);
446 if (is_xen_swiotlb_buffer(dev, dma_addr))
447 swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
451 xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
452 size_t size, enum dma_data_direction dir)
454 phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
456 if (is_xen_swiotlb_buffer(dev, dma_addr))
457 swiotlb_sync_single_for_device(dev, paddr, size, dir);
459 if (!dev_is_dma_coherent(dev)) {
460 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
461 arch_sync_dma_for_device(paddr, size, dir);
463 xen_dma_sync_for_device(dev, dma_addr, size, dir);
468 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
469 * concerning calls here are the same as for swiotlb_unmap_page() above.
472 xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
473 enum dma_data_direction dir, unsigned long attrs)
475 struct scatterlist *sg;
478 BUG_ON(dir == DMA_NONE);
480 for_each_sg(sgl, sg, nelems, i)
481 xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
487 xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
488 enum dma_data_direction dir, unsigned long attrs)
490 struct scatterlist *sg;
493 BUG_ON(dir == DMA_NONE);
495 for_each_sg(sgl, sg, nelems, i) {
496 sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
497 sg->offset, sg->length, dir, attrs);
498 if (sg->dma_address == DMA_MAPPING_ERROR)
500 sg_dma_len(sg) = sg->length;
505 xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
511 xen_swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
512 int nelems, enum dma_data_direction dir)
514 struct scatterlist *sg;
517 for_each_sg(sgl, sg, nelems, i) {
518 xen_swiotlb_sync_single_for_cpu(dev, sg->dma_address,
524 xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
525 int nelems, enum dma_data_direction dir)
527 struct scatterlist *sg;
530 for_each_sg(sgl, sg, nelems, i) {
531 xen_swiotlb_sync_single_for_device(dev, sg->dma_address,
537 * Return whether the given device DMA address mask can be supported
538 * properly. For example, if your device can only drive the low 24-bits
539 * during bus mastering, then you would pass 0x00ffffff as the mask to
543 xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
545 return xen_phys_to_dma(hwdev, io_tlb_default_mem->end - 1) <= mask;
548 const struct dma_map_ops xen_swiotlb_dma_ops = {
549 .alloc = xen_swiotlb_alloc_coherent,
550 .free = xen_swiotlb_free_coherent,
551 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
552 .sync_single_for_device = xen_swiotlb_sync_single_for_device,
553 .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
554 .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
555 .map_sg = xen_swiotlb_map_sg,
556 .unmap_sg = xen_swiotlb_unmap_sg,
557 .map_page = xen_swiotlb_map_page,
558 .unmap_page = xen_swiotlb_unmap_page,
559 .dma_supported = xen_swiotlb_dma_supported,
560 .mmap = dma_common_mmap,
561 .get_sgtable = dma_common_get_sgtable,
562 .alloc_pages = dma_common_alloc_pages,
563 .free_pages = dma_common_free_pages,