]> Git Repo - J-linux.git/commitdiff
Merge branches 'apple/dart', 'arm/mediatek', 'arm/renesas', 'arm/smmu', 'arm/tegra...
authorJoerg Roedel <[email protected]>
Sun, 31 Oct 2021 21:26:53 +0000 (22:26 +0100)
committerJoerg Roedel <[email protected]>
Sun, 31 Oct 2021 21:26:53 +0000 (22:26 +0100)
1  2  3  4  5  6  7  8  9  10 
drivers/iommu/apple-dart.c
drivers/iommu/dma-iommu.c

index fdfa39ec2a4d4a50d87d5fa9502efc7b1170bc61,f0f4d1f74f9222c6ac33331e89f1f56f11dc5f0f,559db9259e65c76ebf1eef2e02dd2c61be2de929,cdc2e83b21864b99db8b7dbb31a21b6cc277c5bd,559db9259e65c76ebf1eef2e02dd2c61be2de929,559db9259e65c76ebf1eef2e02dd2c61be2de929,fdfa39ec2a4d4a50d87d5fa9502efc7b1170bc61,559db9259e65c76ebf1eef2e02dd2c61be2de929,fdfa39ec2a4d4a50d87d5fa9502efc7b1170bc61,559db9259e65c76ebf1eef2e02dd2c61be2de929..96d4a1f8de79777ec641e5c81c1094b5d28eae2c
          #include <linux/bitfield.h>
          #include <linux/clk.h>
          #include <linux/dev_printk.h>
--- ------#include <linux/dma-iommu.h>
          #include <linux/dma-mapping.h>
          #include <linux/err.h>
          #include <linux/interrupt.h>
          #define DART_ERROR_ADDR_HI 0x54
          #define DART_ERROR_ADDR_LO 0x50
          
+ ++++++++#define DART_STREAMS_ENABLE 0xfc
+ ++++++++
          #define DART_TCR(sid) (0x100 + 4 * (sid))
          #define DART_TCR_TRANSLATE_ENABLE BIT(7)
          #define DART_TCR_BYPASS0_ENABLE BIT(8)
@@@@@@@@@@@ -183,6 -185,6 -183,7 -182,7 -183,7 -183,7 -183,6 -183,7 -183,6 -183,7 +184,6 @@@@@@@@@@@ struct apple_dart_master_cfg 
          
          static struct platform_driver apple_dart_driver;
          static const struct iommu_ops apple_dart_iommu_ops;
  ---- - -static const struct iommu_flush_ops apple_dart_tlb_ops;
          
          static struct apple_dart_domain *to_dart_domain(struct iommu_domain *dom)
          {
@@@@@@@@@@@ -301,6 -303,9 -302,6 -301,6 -302,6 -302,6 -301,6 -302,6 -301,6 -302,6 +302,9 @@@@@@@@@@@ static int apple_dart_hw_reset(struct a
                apple_dart_hw_disable_dma(&stream_map);
                apple_dart_hw_clear_all_ttbrs(&stream_map);
          
+ ++++++++      /* enable all streams globally since TCR is used to control isolation */
+ ++++++++      writel(DART_STREAM_ALL, dart->regs + DART_STREAMS_ENABLE);
+ ++++++++
                /* clear any pending errors before the interrupt is unmasked */
                writel(readl(dart->regs + DART_ERROR), dart->regs + DART_ERROR);
          
@@@@@@@@@@@ -337,6 -342,6 -338,22 -337,22 -338,22 -338,22 -337,6 -338,22 -337,6 -338,22 +341,6 @@@@@@@@@@@ static void apple_dart_iotlb_sync_map(s
                apple_dart_domain_flush_tlb(to_dart_domain(domain));
          }
          
  ---- - -static void apple_dart_tlb_flush_all(void *cookie)
  ---- - -{
  ---- - -      apple_dart_domain_flush_tlb(cookie);
  ---- - -}
  ---- - -
  ---- - -static void apple_dart_tlb_flush_walk(unsigned long iova, size_t size,
  ---- - -                                    size_t granule, void *cookie)
  ---- - -{
  ---- - -      apple_dart_domain_flush_tlb(cookie);
  ---- - -}
  ---- - -
  ---- - -static const struct iommu_flush_ops apple_dart_tlb_ops = {
  ---- - -      .tlb_flush_all = apple_dart_tlb_flush_all,
  ---- - -      .tlb_flush_walk = apple_dart_tlb_flush_walk,
  ---- - -};
  ---- - -
          static phys_addr_t apple_dart_iova_to_phys(struct iommu_domain *domain,
                                                   dma_addr_t iova)
          {
@@@@@@@@@@@ -418,6 -423,6 -435,7 -434,7 -435,7 -435,7 -418,6 -435,7 -418,6 -435,7 +422,6 @@@@@@@@@@@ static int apple_dart_finalize_domain(s
                        .ias = 32,
                        .oas = 36,
                        .coherent_walk = 1,
  ---- - -              .tlb = &apple_dart_tlb_ops,
                        .iommu_dev = dart->dev,
                };
          
@@@@@@@@@@@ -578,7 -583,7 -596,7 -595,6 -596,7 -596,7 -578,7 -596,7 -578,7 -596,7 +582,6 @@@@@@@@@@@ static struct iommu_domain *apple_dart_
                if (!dart_domain)
                        return NULL;
          
--- ------      iommu_get_dma_cookie(&dart_domain->domain);
                mutex_init(&dart_domain->init_lock);
          
                /* no need to allocate pgtbl_ops or do any other finalization steps */
@@@@@@@@@@@ -643,34 -648,34 -661,16 -659,16 -661,16 -661,16 -643,34 -661,16 -643,34 -661,16 +646,34 @@@@@@@@@@@ static int apple_dart_of_xlate(struct d
                return -EINVAL;
          }
          
  ++++ + +static DEFINE_MUTEX(apple_dart_groups_lock);
  ++++ + +
  ++++ + +static void apple_dart_release_group(void *iommu_data)
  ++++ + +{
  ++++ + +      int i, sid;
  ++++ + +      struct apple_dart_stream_map *stream_map;
  ++++ + +      struct apple_dart_master_cfg *group_master_cfg = iommu_data;
  ++++ + +
  ++++ + +      mutex_lock(&apple_dart_groups_lock);
  ++++ + +
  ++++ + +      for_each_stream_map(i, group_master_cfg, stream_map)
  ++++ + +              for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
  ++++ + +                      stream_map->dart->sid2group[sid] = NULL;
  ++++ + +
  ++++ + +      kfree(iommu_data);
  ++++ + +      mutex_unlock(&apple_dart_groups_lock);
  ++++ + +}
  ++++ + +
          static struct iommu_group *apple_dart_device_group(struct device *dev)
          {
  ---- - -      static DEFINE_MUTEX(lock);
                int i, sid;
                struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
                struct apple_dart_stream_map *stream_map;
  ++++ + +      struct apple_dart_master_cfg *group_master_cfg;
                struct iommu_group *group = NULL;
                struct iommu_group *res = ERR_PTR(-EINVAL);
          
  ---- - -      mutex_lock(&lock);
  ++++ + +      mutex_lock(&apple_dart_groups_lock);
          
                for_each_stream_map(i, cfg, stream_map) {
                        for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) {
          #endif
                        group = generic_device_group(dev);
          
-     - -       group_master_cfg = kzalloc(sizeof(*group_master_cfg), GFP_KERNEL);
  ++++ + +      res = ERR_PTR(-ENOMEM);
  ++++ + +      if (!group)
  ++++ + +              goto out;
  ++++ + +
-     - -       memcpy(group_master_cfg, cfg, sizeof(*group_master_cfg));
+ ++++++++      group_master_cfg = kmemdup(cfg, sizeof(*group_master_cfg), GFP_KERNEL);
  ++++ + +      if (!group_master_cfg) {
  ++++ + +              iommu_group_put(group);
  ++++ + +              goto out;
  ++++ + +      }
  ++++ + +
  ++++ + +      iommu_group_set_iommudata(group, group_master_cfg,
  ++++ + +              apple_dart_release_group);
  ++++ + +
                for_each_stream_map(i, cfg, stream_map)
                        for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
                                stream_map->dart->sid2group[sid] = group;
                res = group;
          
          out:
  ---- - -      mutex_unlock(&lock);
  ++++ + +      mutex_unlock(&apple_dart_groups_lock);
                return res;
          }
          
index 896bea04c347e6719198705b7f13c2d0e605ff68,896bea04c347e6719198705b7f13c2d0e605ff68,896bea04c347e6719198705b7f13c2d0e605ff68,26cb95d3830a36f9b3ea91311ee3105c10e10e9b,896bea04c347e6719198705b7f13c2d0e605ff68,896bea04c347e6719198705b7f13c2d0e605ff68,3e5a21b0bb24e67821e84654ab965612aff32dd6,896bea04c347e6719198705b7f13c2d0e605ff68,896bea04c347e6719198705b7f13c2d0e605ff68,fffa8721a8f01f7bd15d8d194c8840f7f96e115b..b42e38a0dbe26eb4981a64910eac43f3054e7266
@@@@@@@@@@@ -98,9 -98,9 -98,9 -98,6 -98,9 -98,9 -98,9 -98,9 -98,9 -98,9 +98,6 @@@@@@@@@@@ static struct iommu_dma_cookie *cookie_
          /**
           * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
           * @domain: IOMMU domain to prepare for DMA-API usage
--- ------ *
--- ------ * IOMMU drivers should normally call this from their domain_alloc
--- ------ * callback when domain->type == IOMMU_DOMAIN_DMA.
           */
          int iommu_get_dma_cookie(struct iommu_domain *domain)
          {
          
                return 0;
          }
--- ------EXPORT_SYMBOL(iommu_get_dma_cookie);
          
          /**
           * iommu_get_msi_cookie - Acquire just MSI remapping resources
@@@@@@@@@@@ -151,8 -151,8 -151,8 -147,6 -151,8 -151,8 -151,8 -151,8 -151,8 -151,8 +147,6 @@@@@@@@@@@ EXPORT_SYMBOL(iommu_get_msi_cookie)
           * iommu_put_dma_cookie - Release a domain's DMA mapping resources
           * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
           *          iommu_get_msi_cookie()
--- ------ *
--- ------ * IOMMU drivers should normally call this from their domain_free callback.
           */
          void iommu_put_dma_cookie(struct iommu_domain *domain)
          {
                kfree(cookie);
                domain->iova_cookie = NULL;
          }
--- ------EXPORT_SYMBOL(iommu_put_dma_cookie);
          
          /**
           * iommu_dma_get_resv_regions - Reserved region driver helper
@@@@@@@@@@@ -317,6 -317,6 -317,6 -310,6 -317,6 -317,6 -317,6 -317,6 -317,6 -317,11 +310,11 @@@@@@@@@@@ static bool dev_is_untrusted(struct dev
                return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
          }
          
+++++++++ static bool dev_use_swiotlb(struct device *dev)
+++++++++ {
+++++++++       return IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev);
+++++++++ }
+++++++++ 
          /* sysfs updates are serialised by the mutex of the group owning @domain */
          int iommu_dma_init_fq(struct iommu_domain *domain)
          {
@@@@@@@@@@@ -510,23 -510,23 -510,23 -503,23 -510,23 -510,23 -510,23 -510,23 -510,23 -515,6 +508,6 @@@@@@@@@@@ static void __iommu_dma_unmap(struct de
                iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather);
          }
          
--------- static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
---------               size_t size, enum dma_data_direction dir,
---------               unsigned long attrs)
--------- {
---------       struct iommu_domain *domain = iommu_get_dma_domain(dev);
---------       phys_addr_t phys;
--------- 
---------       phys = iommu_iova_to_phys(domain, dma_addr);
---------       if (WARN_ON(!phys))
---------               return;
--------- 
---------       __iommu_dma_unmap(dev, dma_addr, size);
--------- 
---------       if (unlikely(is_swiotlb_buffer(dev, phys)))
---------               swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
--------- }
--------- 
          static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
                        size_t size, int prot, u64 dma_mask)
          {
                return iova + iova_off;
          }
          
--------- static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
---------               size_t org_size, dma_addr_t dma_mask, bool coherent,
---------               enum dma_data_direction dir, unsigned long attrs)
--------- {
---------       int prot = dma_info_to_prot(dir, coherent, attrs);
---------       struct iommu_domain *domain = iommu_get_dma_domain(dev);
---------       struct iommu_dma_cookie *cookie = domain->iova_cookie;
---------       struct iova_domain *iovad = &cookie->iovad;
---------       size_t aligned_size = org_size;
---------       void *padding_start;
---------       size_t padding_size;
---------       dma_addr_t iova;
--------- 
---------       /*
---------        * If both the physical buffer start address and size are
---------        * page aligned, we don't need to use a bounce page.
---------        */
---------       if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
---------           iova_offset(iovad, phys | org_size)) {
---------               aligned_size = iova_align(iovad, org_size);
---------               phys = swiotlb_tbl_map_single(dev, phys, org_size,
---------                                             aligned_size, dir, attrs);
--------- 
---------               if (phys == DMA_MAPPING_ERROR)
---------                       return DMA_MAPPING_ERROR;
--------- 
---------               /* Cleanup the padding area. */
---------               padding_start = phys_to_virt(phys);
---------               padding_size = aligned_size;
--------- 
---------               if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
---------                   (dir == DMA_TO_DEVICE ||
---------                    dir == DMA_BIDIRECTIONAL)) {
---------                       padding_start += org_size;
---------                       padding_size -= org_size;
---------               }
--------- 
---------               memset(padding_start, 0, padding_size);
---------       }
--------- 
---------       iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
---------       if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
---------               swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs);
---------       return iova;
--------- }
--------- 
          static void __iommu_dma_free_pages(struct page **pages, int count)
          {
                while (count--)
@@@@@@@@@@@ -616,7 -616,7 -616,7 -609,7 -616,7 -616,7 -616,7 -616,7 -616,7 -558,7 +551,7 @@@@@@@@@@@ static struct page **__iommu_dma_alloc_
                if (!order_mask)
                        return NULL;
          
---------       pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
+++++++++       pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
                if (!pages)
                        return NULL;
          
@@@@@@@@@@@ -794,7 -794,7 -794,7 -787,7 -794,7 -794,7 -794,7 -794,7 -794,7 -736,7 +729,7 @@@@@@@@@@@ static void iommu_dma_sync_single_for_c
          {
                phys_addr_t phys;
          
---------       if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
+++++++++       if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev))
                        return;
          
                phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
@@@@@@@@@@@ -810,7 -810,7 -810,7 -803,7 -810,7 -810,7 -810,7 -810,7 -810,7 -752,7 +745,7 @@@@@@@@@@@ static void iommu_dma_sync_single_for_d
          {
                phys_addr_t phys;
          
---------       if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
+++++++++       if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev))
                        return;
          
                phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
@@@@@@@@@@@ -828,17 -828,17 -828,17 -821,17 -828,17 -828,17 -828,17 -828,17 -828,17 -770,13 +763,13 @@@@@@@@@@@ static void iommu_dma_sync_sg_for_cpu(s
                struct scatterlist *sg;
                int i;
          
---------       if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
---------               return;
--------- 
---------       for_each_sg(sgl, sg, nelems, i) {
---------               if (!dev_is_dma_coherent(dev))
+++++++++       if (dev_use_swiotlb(dev))
+++++++++               for_each_sg(sgl, sg, nelems, i)
+++++++++                       iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
+++++++++                                                     sg->length, dir);
+++++++++       else if (!dev_is_dma_coherent(dev))
+++++++++               for_each_sg(sgl, sg, nelems, i)
                                arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
--------- 
---------               if (is_swiotlb_buffer(dev, sg_phys(sg)))
---------                       swiotlb_sync_single_for_cpu(dev, sg_phys(sg),
---------                                                   sg->length, dir);
---------       }
          }
          
          static void iommu_dma_sync_sg_for_device(struct device *dev,
                struct scatterlist *sg;
                int i;
          
---------       if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
---------               return;
--------- 
---------       for_each_sg(sgl, sg, nelems, i) {
---------               if (is_swiotlb_buffer(dev, sg_phys(sg)))
---------                       swiotlb_sync_single_for_device(dev, sg_phys(sg),
---------                                                      sg->length, dir);
--------- 
---------               if (!dev_is_dma_coherent(dev))
+++++++++       if (dev_use_swiotlb(dev))
+++++++++               for_each_sg(sgl, sg, nelems, i)
+++++++++                       iommu_dma_sync_single_for_device(dev,
+++++++++                                                        sg_dma_address(sg),
+++++++++                                                        sg->length, dir);
+++++++++       else if (!dev_is_dma_coherent(dev))
+++++++++               for_each_sg(sgl, sg, nelems, i)
                                arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
---------       }
          }
          
          static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
          {
                phys_addr_t phys = page_to_phys(page) + offset;
                bool coherent = dev_is_dma_coherent(dev);
---------       dma_addr_t dma_handle;
+++++++++       int prot = dma_info_to_prot(dir, coherent, attrs);
+++++++++       struct iommu_domain *domain = iommu_get_dma_domain(dev);
+++++++++       struct iommu_dma_cookie *cookie = domain->iova_cookie;
+++++++++       struct iova_domain *iovad = &cookie->iovad;
+++++++++       dma_addr_t iova, dma_mask = dma_get_mask(dev);
+++++++++ 
+++++++++       /*
+++++++++        * If both the physical buffer start address and size are
+++++++++        * page aligned, we don't need to use a bounce page.
+++++++++        */
+++++++++       if (dev_use_swiotlb(dev) && iova_offset(iovad, phys | size)) {
+++++++++               void *padding_start;
+++++++++               size_t padding_size, aligned_size;
+++++++++ 
+++++++++               aligned_size = iova_align(iovad, size);
+++++++++               phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size,
+++++++++                                             iova_mask(iovad), dir, attrs);
+++++++++ 
+++++++++               if (phys == DMA_MAPPING_ERROR)
+++++++++                       return DMA_MAPPING_ERROR;
      +   
------ --       dma_handle = __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev),
------ --                       coherent, dir, attrs);
------ --       if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
------ --           dma_handle != DMA_MAPPING_ERROR)
+++++++++               /* Cleanup the padding area. */
+++++++++               padding_start = phys_to_virt(phys);
+++++++++               padding_size = aligned_size;
+++++++++ 
+++++++++               if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+++++++++                   (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) {
+++++++++                       padding_start += size;
+++++++++                       padding_size -= size;
+++++++++               }
+++++++++ 
+++++++++               memset(padding_start, 0, padding_size);
+++++++++       }
++++++ ++ 
      -         dma_handle = __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev),
      -                         coherent, dir, attrs);
      -         if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
      -             dma_handle != DMA_MAPPING_ERROR)
+++++++++       if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
                        arch_sync_dma_for_device(phys, size, dir);
---------       return dma_handle;
+++++++++ 
+++++++++       iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
+++++++++       if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
+++++++++               swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
+++++++++       return iova;
          }
          
          static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
                        size_t size, enum dma_data_direction dir, unsigned long attrs)
          {
---------       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
---------               iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
---------       __iommu_dma_unmap_swiotlb(dev, dma_handle, size, dir, attrs);
+++++++++       struct iommu_domain *domain = iommu_get_dma_domain(dev);
+++++++++       phys_addr_t phys;
+++++++++ 
+++++++++       phys = iommu_iova_to_phys(domain, dma_handle);
+++++++++       if (WARN_ON(!phys))
+++++++++               return;
+++++++++ 
+++++++++       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
+++++++++               arch_sync_dma_for_cpu(phys, size, dir);
+++++++++ 
+++++++++       __iommu_dma_unmap(dev, dma_handle, size);
+++++++++ 
+++++++++       if (unlikely(is_swiotlb_buffer(dev, phys)))
+++++++++               swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
          }
          
          /*
@@@@@@@@@@@ -967,7 -967,7 -967,7 -960,7 -967,7 -967,7 -967,7 -967,7 -967,7 -946,7 +939,7 @@@@@@@@@@@ static void iommu_dma_unmap_sg_swiotlb(
                int i;
          
                for_each_sg(sg, s, nents, i)
---------               __iommu_dma_unmap_swiotlb(dev, sg_dma_address(s),
+++++++++               iommu_dma_unmap_page(dev, sg_dma_address(s),
                                        sg_dma_len(s), dir, attrs);
          }
          
@@@@@@@@@@@ -978,9 -978,9 -978,9 -971,9 -978,9 -978,9 -978,9 -978,9 -978,9 -957,8 +950,8 @@@@@@@@@@@ static int iommu_dma_map_sg_swiotlb(str
                int i;
          
                for_each_sg(sg, s, nents, i) {
---------               sg_dma_address(s) = __iommu_dma_map_swiotlb(dev, sg_phys(s),
---------                               s->length, dma_get_mask(dev),
---------                               dev_is_dma_coherent(dev), dir, attrs);
+++++++++               sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s),
+++++++++                               s->offset, s->length, dir, attrs);
                        if (sg_dma_address(s) == DMA_MAPPING_ERROR)
                                goto out_unmap;
                        sg_dma_len(s) = s->length;
@@@@@@@@@@@ -1016,15 -1016,15 -1016,15 -1009,15 -1016,15 -1016,15 -1016,16 -1016,15 -1016,15 -994,15 +987,16 @@@@@@@@@@@ static int iommu_dma_map_sg(struct devi
          
                if (static_branch_unlikely(&iommu_deferred_attach_enabled)) {
                        ret = iommu_deferred_attach(dev, domain);
------ ---              goto out;
++++++ +++              if (ret)
++++++ +++                      goto out;
                }
          
+++++++++       if (dev_use_swiotlb(dev))
+++++++++               return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
+++++++++ 
                if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
                        iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
          
---------       if (dev_is_untrusted(dev))
---------               return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
--------- 
                /*
                 * Work out how much IOVA space we need, and align the segments to
                 * IOVA granules for the IOMMU driver to handle. With some clever
@@@@@@@@@@@ -1097,14 -1097,14 -1097,14 -1090,14 -1097,14 -1097,14 -1098,14 -1097,14 -1097,14 -1075,14 +1069,14 @@@@@@@@@@@ static void iommu_dma_unmap_sg(struct d
                struct scatterlist *tmp;
                int i;
          
---------       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
---------               iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
--------- 
---------       if (dev_is_untrusted(dev)) {
+++++++++       if (dev_use_swiotlb(dev)) {
                        iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs);
                        return;
                }
          
+++++++++       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+++++++++               iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
+++++++++ 
                /*
                 * The scatterlist segments are mapped into a single
                 * contiguous IOVA allocation, so this is incredibly easy.
This page took 0.139133 seconds and 4 git commands to generate.