]> Git Repo - linux.git/commitdiff
Merge tag 'dma-mapping-5.12' of git://git.infradead.org/users/hch/dma-mapping
authorLinus Torvalds <[email protected]>
Wed, 24 Feb 2021 17:54:24 +0000 (09:54 -0800)
committerLinus Torvalds <[email protected]>
Wed, 24 Feb 2021 17:54:24 +0000 (09:54 -0800)
Pull dma-mapping updates from Christoph Hellwig:

 - add support to emulate processing delays in the DMA API benchmark
   selftest (Barry Song)

 - remove support for non-contiguous noncoherent allocations, which
   aren't used and will be replaced by a different API

* tag 'dma-mapping-5.12' of git://git.infradead.org/users/hch/dma-mapping:
  dma-mapping: remove the {alloc,free}_noncoherent methods
  dma-mapping: benchmark: pretend DMA is transmitting

1  2 
drivers/iommu/dma-iommu.c
include/linux/dma-map-ops.h
kernel/dma/mapping.c

index f659395e795971dc2d3fc9470f7f0ff9b301e129,255533faf90599f98212b09e785d7e9514c5ac3a..9ab6ee22c11088bb1fe76c11a5b3e456f73a5e35
@@@ -51,8 -51,6 +51,8 @@@ struct iommu_dma_cookie 
        struct iommu_domain             *fq_domain;
  };
  
 +static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
 +
  void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
                struct iommu_domain *domain)
  {
@@@ -380,6 -378,21 +380,6 @@@ static int iommu_dma_init_domain(struc
        return iova_reserve_iommu_regions(dev, domain);
  }
  
 -static int iommu_dma_deferred_attach(struct device *dev,
 -              struct iommu_domain *domain)
 -{
 -      const struct iommu_ops *ops = domain->ops;
 -
 -      if (!is_kdump_kernel())
 -              return 0;
 -
 -      if (unlikely(ops->is_attach_deferred &&
 -                      ops->is_attach_deferred(domain, dev)))
 -              return iommu_attach_device(domain, dev);
 -
 -      return 0;
 -}
 -
  /**
   * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
   *                    page flags.
@@@ -522,8 -535,7 +522,8 @@@ static dma_addr_t __iommu_dma_map(struc
        size_t iova_off = iova_offset(iovad, phys);
        dma_addr_t iova;
  
 -      if (unlikely(iommu_dma_deferred_attach(dev, domain)))
 +      if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
 +          iommu_deferred_attach(dev, domain))
                return DMA_MAPPING_ERROR;
  
        size = iova_align(iovad, size + iova_off);
@@@ -681,8 -693,7 +681,8 @@@ static void *iommu_dma_alloc_remap(stru
  
        *dma_handle = DMA_MAPPING_ERROR;
  
 -      if (unlikely(iommu_dma_deferred_attach(dev, domain)))
 +      if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
 +          iommu_deferred_attach(dev, domain))
                return NULL;
  
        min_size = alloc_sizes & -alloc_sizes;
@@@ -965,8 -976,7 +965,8 @@@ static int iommu_dma_map_sg(struct devi
        unsigned long mask = dma_get_seg_boundary(dev);
        int i;
  
 -      if (unlikely(iommu_dma_deferred_attach(dev, domain)))
 +      if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
 +          iommu_deferred_attach(dev, domain))
                return 0;
  
        if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
@@@ -1187,34 -1197,6 +1187,6 @@@ static void *iommu_dma_alloc(struct dev
        return cpu_addr;
  }
  
- #ifdef CONFIG_DMA_REMAP
- static void *iommu_dma_alloc_noncoherent(struct device *dev, size_t size,
-               dma_addr_t *handle, enum dma_data_direction dir, gfp_t gfp)
- {
-       if (!gfpflags_allow_blocking(gfp)) {
-               struct page *page;
-               page = dma_common_alloc_pages(dev, size, handle, dir, gfp);
-               if (!page)
-                       return NULL;
-               return page_address(page);
-       }
-       return iommu_dma_alloc_remap(dev, size, handle, gfp | __GFP_ZERO,
-                                    PAGE_KERNEL, 0);
- }
- static void iommu_dma_free_noncoherent(struct device *dev, size_t size,
-               void *cpu_addr, dma_addr_t handle, enum dma_data_direction dir)
- {
-       __iommu_dma_unmap(dev, handle, size);
-       __iommu_dma_free(dev, size, cpu_addr);
- }
- #else
- #define iommu_dma_alloc_noncoherent           NULL
- #define iommu_dma_free_noncoherent            NULL
- #endif /* CONFIG_DMA_REMAP */
  static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
                void *cpu_addr, dma_addr_t dma_addr, size_t size,
                unsigned long attrs)
@@@ -1285,8 -1267,6 +1257,6 @@@ static const struct dma_map_ops iommu_d
        .free                   = iommu_dma_free,
        .alloc_pages            = dma_common_alloc_pages,
        .free_pages             = dma_common_free_pages,
-       .alloc_noncoherent      = iommu_dma_alloc_noncoherent,
-       .free_noncoherent       = iommu_dma_free_noncoherent,
        .mmap                   = iommu_dma_mmap,
        .get_sgtable            = iommu_dma_get_sgtable,
        .map_page               = iommu_dma_map_page,
@@@ -1414,9 -1394,6 +1384,9 @@@ void iommu_dma_compose_msi_msg(struct m
  
  static int iommu_dma_init(void)
  {
 +      if (is_kdump_kernel())
 +              static_branch_enable(&iommu_deferred_attach_enabled);
 +
        return iova_cache_get();
  }
  arch_initcall(iommu_dma_init);
index 1e98b8c1e055a946b7e9b018fd158e8d361a7d7f,11e02537b9e01b610f97e9c502657fecdaa01d59..51872e736e7b1dfb2207a6f63e04f84f0b6be766
@@@ -22,11 -22,6 +22,6 @@@ struct dma_map_ops 
                        gfp_t gfp);
        void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
                        dma_addr_t dma_handle, enum dma_data_direction dir);
-       void *(*alloc_noncoherent)(struct device *dev, size_t size,
-                       dma_addr_t *dma_handle, enum dma_data_direction dir,
-                       gfp_t gfp);
-       void (*free_noncoherent)(struct device *dev, size_t size, void *vaddr,
-                       dma_addr_t dma_handle, enum dma_data_direction dir);
        int (*mmap)(struct device *, struct vm_area_struct *,
                        void *, dma_addr_t, size_t, unsigned long attrs);
  
@@@ -229,10 -224,11 +224,10 @@@ bool dma_free_from_pool(struct device *
  int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
                dma_addr_t dma_start, u64 size);
  
 -#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H
 -#include <asm/dma-coherence.h>
 -#elif defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
 +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
        defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
        defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
 +extern bool dma_default_coherent;
  static inline bool dev_is_dma_coherent(struct device *dev)
  {
        return dev->dma_coherent;
diff --combined kernel/dma/mapping.c
index 84de6b1c5fab4917869c8c865ea42c301dae93ff,68992e35c8c3a7010c283e0aaf02dcc84b2c445d..b6a63367993328671fa8ee6faab1d98ae7d852c5
@@@ -16,8 -16,6 +16,8 @@@
  #include "debug.h"
  #include "direct.h"
  
 +bool dma_default_coherent;
 +
  /*
   * Managed DMA API
   */
@@@ -517,46 -515,6 +517,6 @@@ void dma_free_pages(struct device *dev
  }
  EXPORT_SYMBOL_GPL(dma_free_pages);
  
- void *dma_alloc_noncoherent(struct device *dev, size_t size,
-               dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
- {
-       const struct dma_map_ops *ops = get_dma_ops(dev);
-       void *vaddr;
-       if (!ops || !ops->alloc_noncoherent) {
-               struct page *page;
-               page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
-               if (!page)
-                       return NULL;
-               return page_address(page);
-       }
-       size = PAGE_ALIGN(size);
-       vaddr = ops->alloc_noncoherent(dev, size, dma_handle, dir, gfp);
-       if (vaddr)
-               debug_dma_map_page(dev, virt_to_page(vaddr), 0, size, dir,
-                                  *dma_handle);
-       return vaddr;
- }
- EXPORT_SYMBOL_GPL(dma_alloc_noncoherent);
- void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
-               dma_addr_t dma_handle, enum dma_data_direction dir)
- {
-       const struct dma_map_ops *ops = get_dma_ops(dev);
-       if (!ops || !ops->free_noncoherent) {
-               dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
-               return;
-       }
-       size = PAGE_ALIGN(size);
-       debug_dma_unmap_page(dev, dma_handle, size, dir);
-       ops->free_noncoherent(dev, size, vaddr, dma_handle, dir);
- }
- EXPORT_SYMBOL_GPL(dma_free_noncoherent);
  int dma_supported(struct device *dev, u64 mask)
  {
        const struct dma_map_ops *ops = get_dma_ops(dev);
This page took 0.091844 seconds and 4 git commands to generate.