]> Git Repo - J-linux.git/commitdiff
Merge tag 'vfio-v5.9-rc1' of git://github.com/awilliam/linux-vfio
authorLinus Torvalds <[email protected]>
Wed, 12 Aug 2020 19:09:36 +0000 (12:09 -0700)
committerLinus Torvalds <[email protected]>
Wed, 12 Aug 2020 19:09:36 +0000 (12:09 -0700)
Pull VFIO updates from Alex Williamson:

 - Inclusive naming updates (Alex Williamson)

 - Intel X550 INTx quirk (Alex Williamson)

 - Error path resched between unmaps (Xiang Zheng)

 - SPAPR IOMMU pin_user_pages() conversion (John Hubbard)

 - Trivial mutex simplification (Alex Williamson)

 - QAT device denylist (Giovanni Cabiddu)

 - type1 IOMMU ioctl refactor (Liu Yi L)

* tag 'vfio-v5.9-rc1' of git://github.com/awilliam/linux-vfio:
  vfio/type1: Refactor vfio_iommu_type1_ioctl()
  vfio/pci: Add QAT devices to denylist
  vfio/pci: Add device denylist
  PCI: Add Intel QuickAssist device IDs
  vfio/pci: Hold igate across releasing eventfd contexts
  vfio/spapr_tce: convert get_user_pages() --> pin_user_pages()
  vfio/type1: Add conditional rescheduling after iommu map failed
  vfio/pci: Add Intel X550 to hidden INTx devices
  vfio: Cleanup allowed driver naming

1  2 
drivers/vfio/vfio_iommu_type1.c
include/linux/pci_ids.h

index 9d41105bfd012861b02d21bf28fffdfa10c71a20,e1c00de50ff9790aadd1b5346bc0a281902df65d..6990fc711a80b4c5ff75ae918740d50307550410
@@@ -425,7 -425,7 +425,7 @@@ static int follow_fault_pfn(struct vm_a
        if (ret) {
                bool unlocked = false;
  
 -              ret = fixup_user_fault(NULL, mm, vaddr,
 +              ret = fixup_user_fault(mm, vaddr,
                                       FAULT_FLAG_REMOTE |
                                       (write_fault ?  FAULT_FLAG_WRITE : 0),
                                       &unlocked);
@@@ -453,7 -453,7 +453,7 @@@ static int vaddr_get_pfn(struct mm_stru
                flags |= FOLL_WRITE;
  
        mmap_read_lock(mm);
 -      ret = pin_user_pages_remote(NULL, mm, vaddr, 1, flags | FOLL_LONGTERM,
 +      ret = pin_user_pages_remote(mm, vaddr, 1, flags | FOLL_LONGTERM,
                                    page, NULL, NULL);
        if (ret == 1) {
                *pfn = page_to_pfn(page[0]);
@@@ -1225,8 -1225,10 +1225,10 @@@ static int vfio_iommu_map(struct vfio_i
        return 0;
  
  unwind:
-       list_for_each_entry_continue_reverse(d, &iommu->domain_list, next)
+       list_for_each_entry_continue_reverse(d, &iommu->domain_list, next) {
                iommu_unmap(d->domain, iova, npage << PAGE_SHIFT);
+               cond_resched();
+       }
  
        return ret;
  }
@@@ -2453,6 -2455,23 +2455,23 @@@ static int vfio_domains_have_iommu_cach
        return ret;
  }
  
+ static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu,
+                                           unsigned long arg)
+ {
+       switch (arg) {
+       case VFIO_TYPE1_IOMMU:
+       case VFIO_TYPE1v2_IOMMU:
+       case VFIO_TYPE1_NESTING_IOMMU:
+               return 1;
+       case VFIO_DMA_CC_IOMMU:
+               if (!iommu)
+                       return 0;
+               return vfio_domains_have_iommu_cache(iommu);
+       default:
+               return 0;
+       }
+ }
  static int vfio_iommu_iova_add_cap(struct vfio_info_cap *caps,
                 struct vfio_iommu_type1_info_cap_iova_range *cap_iovas,
                 size_t size)
@@@ -2529,241 -2548,256 +2548,256 @@@ static int vfio_iommu_migration_build_c
        return vfio_info_add_capability(caps, &cap_mig.header, sizeof(cap_mig));
  }
  
- static long vfio_iommu_type1_ioctl(void *iommu_data,
-                                  unsigned int cmd, unsigned long arg)
+ static int vfio_iommu_type1_get_info(struct vfio_iommu *iommu,
+                                    unsigned long arg)
  {
-       struct vfio_iommu *iommu = iommu_data;
+       struct vfio_iommu_type1_info info;
        unsigned long minsz;
+       struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
+       unsigned long capsz;
+       int ret;
  
-       if (cmd == VFIO_CHECK_EXTENSION) {
-               switch (arg) {
-               case VFIO_TYPE1_IOMMU:
-               case VFIO_TYPE1v2_IOMMU:
-               case VFIO_TYPE1_NESTING_IOMMU:
-                       return 1;
-               case VFIO_DMA_CC_IOMMU:
-                       if (!iommu)
-                               return 0;
-                       return vfio_domains_have_iommu_cache(iommu);
-               default:
-                       return 0;
-               }
-       } else if (cmd == VFIO_IOMMU_GET_INFO) {
-               struct vfio_iommu_type1_info info;
-               struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
-               unsigned long capsz;
-               int ret;
-               minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
+       minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
  
-               /* For backward compatibility, cannot require this */
-               capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
+       /* For backward compatibility, cannot require this */
+       capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
  
-               if (copy_from_user(&info, (void __user *)arg, minsz))
-                       return -EFAULT;
+       if (copy_from_user(&info, (void __user *)arg, minsz))
+               return -EFAULT;
  
-               if (info.argsz < minsz)
-                       return -EINVAL;
+       if (info.argsz < minsz)
+               return -EINVAL;
  
-               if (info.argsz >= capsz) {
-                       minsz = capsz;
-                       info.cap_offset = 0; /* output, no-recopy necessary */
-               }
+       if (info.argsz >= capsz) {
+               minsz = capsz;
+               info.cap_offset = 0; /* output, no-recopy necessary */
+       }
  
-               mutex_lock(&iommu->lock);
-               info.flags = VFIO_IOMMU_INFO_PGSIZES;
+       mutex_lock(&iommu->lock);
+       info.flags = VFIO_IOMMU_INFO_PGSIZES;
  
-               info.iova_pgsizes = iommu->pgsize_bitmap;
+       info.iova_pgsizes = iommu->pgsize_bitmap;
  
-               ret = vfio_iommu_migration_build_caps(iommu, &caps);
+       ret = vfio_iommu_migration_build_caps(iommu, &caps);
  
-               if (!ret)
-                       ret = vfio_iommu_iova_build_caps(iommu, &caps);
+       if (!ret)
+               ret = vfio_iommu_iova_build_caps(iommu, &caps);
  
-               mutex_unlock(&iommu->lock);
+       mutex_unlock(&iommu->lock);
  
-               if (ret)
-                       return ret;
+       if (ret)
+               return ret;
  
-               if (caps.size) {
-                       info.flags |= VFIO_IOMMU_INFO_CAPS;
+       if (caps.size) {
+               info.flags |= VFIO_IOMMU_INFO_CAPS;
  
-                       if (info.argsz < sizeof(info) + caps.size) {
-                               info.argsz = sizeof(info) + caps.size;
-                       } else {
-                               vfio_info_cap_shift(&caps, sizeof(info));
-                               if (copy_to_user((void __user *)arg +
-                                               sizeof(info), caps.buf,
-                                               caps.size)) {
-                                       kfree(caps.buf);
-                                       return -EFAULT;
-                               }
-                               info.cap_offset = sizeof(info);
+               if (info.argsz < sizeof(info) + caps.size) {
+                       info.argsz = sizeof(info) + caps.size;
+               } else {
+                       vfio_info_cap_shift(&caps, sizeof(info));
+                       if (copy_to_user((void __user *)arg +
+                                       sizeof(info), caps.buf,
+                                       caps.size)) {
+                               kfree(caps.buf);
+                               return -EFAULT;
                        }
-                       kfree(caps.buf);
+                       info.cap_offset = sizeof(info);
                }
  
-               return copy_to_user((void __user *)arg, &info, minsz) ?
-                       -EFAULT : 0;
+               kfree(caps.buf);
+       }
  
-       } else if (cmd == VFIO_IOMMU_MAP_DMA) {
-               struct vfio_iommu_type1_dma_map map;
-               uint32_t mask = VFIO_DMA_MAP_FLAG_READ |
-                               VFIO_DMA_MAP_FLAG_WRITE;
+       return copy_to_user((void __user *)arg, &info, minsz) ?
+                       -EFAULT : 0;
+ }
  
-               minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
+ static int vfio_iommu_type1_map_dma(struct vfio_iommu *iommu,
+                                   unsigned long arg)
+ {
+       struct vfio_iommu_type1_dma_map map;
+       unsigned long minsz;
+       uint32_t mask = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
  
-               if (copy_from_user(&map, (void __user *)arg, minsz))
-                       return -EFAULT;
+       minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
  
-               if (map.argsz < minsz || map.flags & ~mask)
-                       return -EINVAL;
+       if (copy_from_user(&map, (void __user *)arg, minsz))
+               return -EFAULT;
  
-               return vfio_dma_do_map(iommu, &map);
+       if (map.argsz < minsz || map.flags & ~mask)
+               return -EINVAL;
  
-       } else if (cmd == VFIO_IOMMU_UNMAP_DMA) {
-               struct vfio_iommu_type1_dma_unmap unmap;
-               struct vfio_bitmap bitmap = { 0 };
-               int ret;
+       return vfio_dma_do_map(iommu, &map);
+ }
  
-               minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
+ static int vfio_iommu_type1_unmap_dma(struct vfio_iommu *iommu,
+                                     unsigned long arg)
+ {
+       struct vfio_iommu_type1_dma_unmap unmap;
+       struct vfio_bitmap bitmap = { 0 };
+       unsigned long minsz;
+       int ret;
  
-               if (copy_from_user(&unmap, (void __user *)arg, minsz))
-                       return -EFAULT;
+       minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
  
-               if (unmap.argsz < minsz ||
-                   unmap.flags & ~VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP)
-                       return -EINVAL;
+       if (copy_from_user(&unmap, (void __user *)arg, minsz))
+               return -EFAULT;
  
-               if (unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
-                       unsigned long pgshift;
+       if (unmap.argsz < minsz ||
+           unmap.flags & ~VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP)
+               return -EINVAL;
  
-                       if (unmap.argsz < (minsz + sizeof(bitmap)))
-                               return -EINVAL;
+       if (unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
+               unsigned long pgshift;
  
-                       if (copy_from_user(&bitmap,
-                                          (void __user *)(arg + minsz),
-                                          sizeof(bitmap)))
-                               return -EFAULT;
+               if (unmap.argsz < (minsz + sizeof(bitmap)))
+                       return -EINVAL;
  
-                       if (!access_ok((void __user *)bitmap.data, bitmap.size))
-                               return -EINVAL;
+               if (copy_from_user(&bitmap,
+                                  (void __user *)(arg + minsz),
+                                  sizeof(bitmap)))
+                       return -EFAULT;
  
-                       pgshift = __ffs(bitmap.pgsize);
-                       ret = verify_bitmap_size(unmap.size >> pgshift,
-                                                bitmap.size);
-                       if (ret)
-                               return ret;
-               }
+               if (!access_ok((void __user *)bitmap.data, bitmap.size))
+                       return -EINVAL;
  
-               ret = vfio_dma_do_unmap(iommu, &unmap, &bitmap);
+               pgshift = __ffs(bitmap.pgsize);
+               ret = verify_bitmap_size(unmap.size >> pgshift,
+                                        bitmap.size);
                if (ret)
                        return ret;
+       }
+       ret = vfio_dma_do_unmap(iommu, &unmap, &bitmap);
+       if (ret)
+               return ret;
  
-               return copy_to_user((void __user *)arg, &unmap, minsz) ?
+       return copy_to_user((void __user *)arg, &unmap, minsz) ?
                        -EFAULT : 0;
-       } else if (cmd == VFIO_IOMMU_DIRTY_PAGES) {
-               struct vfio_iommu_type1_dirty_bitmap dirty;
-               uint32_t mask = VFIO_IOMMU_DIRTY_PAGES_FLAG_START |
-                               VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP |
-                               VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
-               int ret = 0;
+ }
  
-               if (!iommu->v2)
-                       return -EACCES;
+ static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
+                                       unsigned long arg)
+ {
+       struct vfio_iommu_type1_dirty_bitmap dirty;
+       uint32_t mask = VFIO_IOMMU_DIRTY_PAGES_FLAG_START |
+                       VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP |
+                       VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
+       unsigned long minsz;
+       int ret = 0;
  
-               minsz = offsetofend(struct vfio_iommu_type1_dirty_bitmap,
-                                   flags);
+       if (!iommu->v2)
+               return -EACCES;
  
-               if (copy_from_user(&dirty, (void __user *)arg, minsz))
-                       return -EFAULT;
+       minsz = offsetofend(struct vfio_iommu_type1_dirty_bitmap, flags);
  
-               if (dirty.argsz < minsz || dirty.flags & ~mask)
-                       return -EINVAL;
+       if (copy_from_user(&dirty, (void __user *)arg, minsz))
+               return -EFAULT;
  
-               /* only one flag should be set at a time */
-               if (__ffs(dirty.flags) != __fls(dirty.flags))
-                       return -EINVAL;
+       if (dirty.argsz < minsz || dirty.flags & ~mask)
+               return -EINVAL;
  
-               if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_START) {
-                       size_t pgsize;
+       /* only one flag should be set at a time */
+       if (__ffs(dirty.flags) != __fls(dirty.flags))
+               return -EINVAL;
  
-                       mutex_lock(&iommu->lock);
-                       pgsize = 1 << __ffs(iommu->pgsize_bitmap);
-                       if (!iommu->dirty_page_tracking) {
-                               ret = vfio_dma_bitmap_alloc_all(iommu, pgsize);
-                               if (!ret)
-                                       iommu->dirty_page_tracking = true;
-                       }
-                       mutex_unlock(&iommu->lock);
-                       return ret;
-               } else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP) {
-                       mutex_lock(&iommu->lock);
-                       if (iommu->dirty_page_tracking) {
-                               iommu->dirty_page_tracking = false;
-                               vfio_dma_bitmap_free_all(iommu);
-                       }
-                       mutex_unlock(&iommu->lock);
-                       return 0;
-               } else if (dirty.flags &
-                                VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP) {
-                       struct vfio_iommu_type1_dirty_bitmap_get range;
-                       unsigned long pgshift;
-                       size_t data_size = dirty.argsz - minsz;
-                       size_t iommu_pgsize;
-                       if (!data_size || data_size < sizeof(range))
-                               return -EINVAL;
-                       if (copy_from_user(&range, (void __user *)(arg + minsz),
-                                          sizeof(range)))
-                               return -EFAULT;
+       if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_START) {
+               size_t pgsize;
  
-                       if (range.iova + range.size < range.iova)
-                               return -EINVAL;
-                       if (!access_ok((void __user *)range.bitmap.data,
-                                      range.bitmap.size))
-                               return -EINVAL;
+               mutex_lock(&iommu->lock);
+               pgsize = 1 << __ffs(iommu->pgsize_bitmap);
+               if (!iommu->dirty_page_tracking) {
+                       ret = vfio_dma_bitmap_alloc_all(iommu, pgsize);
+                       if (!ret)
+                               iommu->dirty_page_tracking = true;
+               }
+               mutex_unlock(&iommu->lock);
+               return ret;
+       } else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP) {
+               mutex_lock(&iommu->lock);
+               if (iommu->dirty_page_tracking) {
+                       iommu->dirty_page_tracking = false;
+                       vfio_dma_bitmap_free_all(iommu);
+               }
+               mutex_unlock(&iommu->lock);
+               return 0;
+       } else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP) {
+               struct vfio_iommu_type1_dirty_bitmap_get range;
+               unsigned long pgshift;
+               size_t data_size = dirty.argsz - minsz;
+               size_t iommu_pgsize;
  
-                       pgshift = __ffs(range.bitmap.pgsize);
-                       ret = verify_bitmap_size(range.size >> pgshift,
-                                                range.bitmap.size);
-                       if (ret)
-                               return ret;
+               if (!data_size || data_size < sizeof(range))
+                       return -EINVAL;
  
-                       mutex_lock(&iommu->lock);
+               if (copy_from_user(&range, (void __user *)(arg + minsz),
+                                  sizeof(range)))
+                       return -EFAULT;
  
-                       iommu_pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
+               if (range.iova + range.size < range.iova)
+                       return -EINVAL;
+               if (!access_ok((void __user *)range.bitmap.data,
+                              range.bitmap.size))
+                       return -EINVAL;
  
-                       /* allow only smallest supported pgsize */
-                       if (range.bitmap.pgsize != iommu_pgsize) {
-                               ret = -EINVAL;
-                               goto out_unlock;
-                       }
-                       if (range.iova & (iommu_pgsize - 1)) {
-                               ret = -EINVAL;
-                               goto out_unlock;
-                       }
-                       if (!range.size || range.size & (iommu_pgsize - 1)) {
-                               ret = -EINVAL;
-                               goto out_unlock;
-                       }
+               pgshift = __ffs(range.bitmap.pgsize);
+               ret = verify_bitmap_size(range.size >> pgshift,
+                                        range.bitmap.size);
+               if (ret)
+                       return ret;
  
-                       if (iommu->dirty_page_tracking)
-                               ret = vfio_iova_dirty_bitmap(range.bitmap.data,
-                                               iommu, range.iova, range.size,
-                                               range.bitmap.pgsize);
-                       else
-                               ret = -EINVAL;
- out_unlock:
-                       mutex_unlock(&iommu->lock);
+               mutex_lock(&iommu->lock);
  
-                       return ret;
+               iommu_pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
+               /* allow only smallest supported pgsize */
+               if (range.bitmap.pgsize != iommu_pgsize) {
+                       ret = -EINVAL;
+                       goto out_unlock;
+               }
+               if (range.iova & (iommu_pgsize - 1)) {
+                       ret = -EINVAL;
+                       goto out_unlock;
+               }
+               if (!range.size || range.size & (iommu_pgsize - 1)) {
+                       ret = -EINVAL;
+                       goto out_unlock;
                }
+               if (iommu->dirty_page_tracking)
+                       ret = vfio_iova_dirty_bitmap(range.bitmap.data,
+                                                    iommu, range.iova,
+                                                    range.size,
+                                                    range.bitmap.pgsize);
+               else
+                       ret = -EINVAL;
+ out_unlock:
+               mutex_unlock(&iommu->lock);
+               return ret;
        }
  
-       return -ENOTTY;
+       return -EINVAL;
+ }
+ static long vfio_iommu_type1_ioctl(void *iommu_data,
+                                  unsigned int cmd, unsigned long arg)
+ {
+       struct vfio_iommu *iommu = iommu_data;
+       switch (cmd) {
+       case VFIO_CHECK_EXTENSION:
+               return vfio_iommu_type1_check_extension(iommu, arg);
+       case VFIO_IOMMU_GET_INFO:
+               return vfio_iommu_type1_get_info(iommu, arg);
+       case VFIO_IOMMU_MAP_DMA:
+               return vfio_iommu_type1_map_dma(iommu, arg);
+       case VFIO_IOMMU_UNMAP_DMA:
+               return vfio_iommu_type1_unmap_dma(iommu, arg);
+       case VFIO_IOMMU_DIRTY_PAGES:
+               return vfio_iommu_type1_dirty_pages(iommu, arg);
+       default:
+               return -ENOTTY;
+       }
  }
  
  static int vfio_iommu_type1_register_notifier(void *iommu_data,
diff --combined include/linux/pci_ids.h
index 5c709a1450b1152afea2b23ca8f769de9de8a6b2,f3166b1425ca4a0ce354dc974ddc84a5fe048d40..1ab1e24bcbce5946e6d59879bd30109af600bfdd
  
  #define PCI_VENDOR_ID_ASMEDIA         0x1b21
  
 +#define PCI_VENDOR_ID_REDHAT          0x1b36
 +
  #define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS   0x1c36
  
  #define PCI_VENDOR_ID_CIRCUITCO               0x1cc8
  #define PCI_DEVICE_ID_INTEL_80332_1   0x0332
  #define PCI_DEVICE_ID_INTEL_80333_0   0x0370
  #define PCI_DEVICE_ID_INTEL_80333_1   0x0372
+ #define PCI_DEVICE_ID_INTEL_QAT_DH895XCC      0x0435
+ #define PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF   0x0443
  #define PCI_DEVICE_ID_INTEL_82375     0x0482
  #define PCI_DEVICE_ID_INTEL_82424     0x0483
  #define PCI_DEVICE_ID_INTEL_82378     0x0484
  #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI     0x1577
  #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE  0x1578
  #define PCI_DEVICE_ID_INTEL_80960_RP  0x1960
+ #define PCI_DEVICE_ID_INTEL_QAT_C3XXX 0x19e2
+ #define PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF      0x19e3
  #define PCI_DEVICE_ID_INTEL_82840_HB  0x1a21
  #define PCI_DEVICE_ID_INTEL_82845_HB  0x1a30
  #define PCI_DEVICE_ID_INTEL_IOAT      0x1a38
  #define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717
  #define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718
  #define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719
+ #define PCI_DEVICE_ID_INTEL_QAT_C62X  0x37c8
+ #define PCI_DEVICE_ID_INTEL_QAT_C62X_VF       0x37c9
  #define PCI_DEVICE_ID_INTEL_ICH10_0   0x3a14
  #define PCI_DEVICE_ID_INTEL_ICH10_1   0x3a16
  #define PCI_DEVICE_ID_INTEL_ICH10_2   0x3a18
This page took 0.114471 seconds and 4 git commands to generate.