]> Git Repo - J-linux.git/commitdiff
Merge branch 'x86-dax-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <[email protected]>
Tue, 5 Jun 2018 02:23:13 +0000 (19:23 -0700)
committerLinus Torvalds <[email protected]>
Tue, 5 Jun 2018 02:23:13 +0000 (19:23 -0700)
Pull x86 dax updates from Ingo Molnar:
 "This contains x86 memcpy_mcsafe() fault handling improvements the
  nvdimm tree would like to make more use of"

* 'x86-dax-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/asm/memcpy_mcsafe: Define copy_to_iter_mcsafe()
  x86/asm/memcpy_mcsafe: Add write-protection-fault handling
  x86/asm/memcpy_mcsafe: Return bytes remaining
  x86/asm/memcpy_mcsafe: Add labels for __memcpy_mcsafe() write fault handling
  x86/asm/memcpy_mcsafe: Remove loop unrolling

1  2 
arch/x86/Kconfig
arch/x86/lib/usercopy_64.c
lib/iov_iter.c

diff --combined arch/x86/Kconfig
index f2ee6a8ffe657fd63377a85757743cb186f8615b,6ca22706cd64e962c4918d148ea3c528bb8ed33f..1fe24b624d44c9cc1611c18fea8ab87ed070d147
@@@ -28,8 -28,6 +28,8 @@@ config X86_6
        select ARCH_USE_CMPXCHG_LOCKREF
        select HAVE_ARCH_SOFT_DIRTY
        select MODULES_USE_ELF_RELA
 +      select NEED_DMA_MAP_STATE
 +      select SWIOTLB
        select X86_DEV_DMA_OPS
        select ARCH_HAS_SYSCALL_WRAPPER
  
@@@ -62,6 -60,7 +62,7 @@@ config X8
        select ARCH_HAS_PMEM_API                if X86_64
        select ARCH_HAS_REFCOUNT
        select ARCH_HAS_UACCESS_FLUSHCACHE      if X86_64
+       select ARCH_HAS_UACCESS_MCSAFE          if X86_64
        select ARCH_HAS_SET_MEMORY
        select ARCH_HAS_SG_CHAIN
        select ARCH_HAS_STRICT_KERNEL_RWX
        select HAVE_C_RECORDMCOUNT
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DEBUG_STACKOVERFLOW
 -      select HAVE_DMA_API_DEBUG
        select HAVE_DMA_CONTIGUOUS
        select HAVE_DYNAMIC_FTRACE
        select HAVE_DYNAMIC_FTRACE_WITH_REGS
        select HAVE_UNSTABLE_SCHED_CLOCK
        select HAVE_USER_RETURN_NOTIFIER
        select IRQ_FORCED_THREADING
 +      select NEED_SG_DMA_LENGTH
        select PCI_LOCKLESS_CONFIG
        select PERF_EVENTS
        select RTC_LIB
@@@ -238,6 -237,13 +239,6 @@@ config ARCH_MMAP_RND_COMPAT_BITS_MA
  config SBUS
        bool
  
 -config NEED_DMA_MAP_STATE
 -      def_bool y
 -      depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG || SWIOTLB
 -
 -config NEED_SG_DMA_LENGTH
 -      def_bool y
 -
  config GENERIC_ISA_DMA
        def_bool y
        depends on ISA_DMA_API
@@@ -870,7 -876,6 +871,7 @@@ config DM
  
  config GART_IOMMU
        bool "Old AMD GART IOMMU support"
 +      select IOMMU_HELPER
        select SWIOTLB
        depends on X86_64 && PCI && AMD_NB
        ---help---
  
  config CALGARY_IOMMU
        bool "IBM Calgary IOMMU support"
 +      select IOMMU_HELPER
        select SWIOTLB
        depends on X86_64 && PCI
        ---help---
@@@ -920,6 -924,20 +921,6 @@@ config CALGARY_IOMMU_ENABLED_BY_DEFAUL
          Calgary anyway, pass 'iommu=calgary' on the kernel command line.
          If unsure, say Y.
  
 -# need this always selected by IOMMU for the VIA workaround
 -config SWIOTLB
 -      def_bool y if X86_64
 -      ---help---
 -        Support for software bounce buffers used on x86-64 systems
 -        which don't have a hardware IOMMU. Using this PCI devices
 -        which can only access 32-bits of memory can be used on systems
 -        with more than 3 GB of memory.
 -        If unsure, say Y.
 -
 -config IOMMU_HELPER
 -      def_bool y
 -      depends on CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU
 -
  config MAXSMP
        bool "Enable Maximum number of SMP Processors and NUMA Nodes"
        depends on X86_64 && SMP && DEBUG_KERNEL
@@@ -1441,7 -1459,6 +1442,7 @@@ config HIGHME
  config X86_PAE
        bool "PAE (Physical Address Extension) Support"
        depends on X86_32 && !HIGHMEM4G
 +      select PHYS_ADDR_T_64BIT
        select SWIOTLB
        ---help---
          PAE is required for NX support, and furthermore enables
@@@ -1469,6 -1486,14 +1470,6 @@@ config X86_5LEVE
  
          Say N if unsure.
  
 -config ARCH_PHYS_ADDR_T_64BIT
 -      def_bool y
 -      depends on X86_64 || X86_PAE
 -
 -config ARCH_DMA_ADDR_T_64BIT
 -      def_bool y
 -      depends on X86_64 || HIGHMEM64G
 -
  config X86_DIRECT_GBPAGES
        def_bool y
        depends on X86_64 && !DEBUG_PAGEALLOC
index a624dcc4de104b2b616338334537a034b6b1da4a,7ebc9901dd05986d72c06564b72c9ca5c3788f58..9c5606d88f618f9ef5b8594027d2513f257a1d9f
@@@ -23,13 -23,13 +23,13 @@@ unsigned long __clear_user(void __user 
        asm volatile(
                "       testq  %[size8],%[size8]\n"
                "       jz     4f\n"
 -              "0:     movq %[zero],(%[dst])\n"
 -              "       addq   %[eight],%[dst]\n"
 +              "0:     movq $0,(%[dst])\n"
 +              "       addq   $8,%[dst]\n"
                "       decl %%ecx ; jnz   0b\n"
                "4:     movq  %[size1],%%rcx\n"
                "       testl %%ecx,%%ecx\n"
                "       jz     2f\n"
 -              "1:     movb   %b[zero],(%[dst])\n"
 +              "1:     movb   $0,(%[dst])\n"
                "       incq   %[dst]\n"
                "       decl %%ecx ; jnz  1b\n"
                "2:\n"
@@@ -40,7 -40,8 +40,7 @@@
                _ASM_EXTABLE(0b,3b)
                _ASM_EXTABLE(1b,2b)
                : [size8] "=&c"(size), [dst] "=&D" (__d0)
 -              : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
 -                [zero] "r" (0UL), [eight] "r" (8UL));
 +              : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr));
        clac();
        return size;
  }
@@@ -74,6 -75,27 +74,27 @@@ copy_user_handle_tail(char *to, char *f
        return len;
  }
  
+ /*
+  * Similar to copy_user_handle_tail, probe for the write fault point,
+  * but reuse __memcpy_mcsafe in case a new read error is encountered.
+  * clac() is handled in _copy_to_iter_mcsafe().
+  */
+ __visible unsigned long
+ mcsafe_handle_tail(char *to, char *from, unsigned len)
+ {
+       for (; len; --len, to++, from++) {
+               /*
+                * Call the assembly routine back directly since
+                * memcpy_mcsafe() may silently fallback to memcpy.
+                */
+               unsigned long rem = __memcpy_mcsafe(to, from, 1);
+               if (rem)
+                       break;
+       }
+       return len;
+ }
  #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
  /**
   * clean_cache_range - write back a cache range with CLWB
diff --combined lib/iov_iter.c
index fdae394172fa78efaf3637266492c5a5823ce41a,70ebc8ede143f7599b482cc4e0704fde5ef99188..7e43cd54c84ca3da2d77b02e7112c69386428a2b
@@@ -573,6 -573,67 +573,67 @@@ size_t _copy_to_iter(const void *addr, 
  }
  EXPORT_SYMBOL(_copy_to_iter);
  
+ #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
+ static int copyout_mcsafe(void __user *to, const void *from, size_t n)
+ {
+       if (access_ok(VERIFY_WRITE, to, n)) {
+               kasan_check_read(from, n);
+               n = copy_to_user_mcsafe((__force void *) to, from, n);
+       }
+       return n;
+ }
+ static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
+               const char *from, size_t len)
+ {
+       unsigned long ret;
+       char *to;
+       to = kmap_atomic(page);
+       ret = memcpy_mcsafe(to + offset, from, len);
+       kunmap_atomic(to);
+       return ret;
+ }
+ size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
+ {
+       const char *from = addr;
+       unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
+       if (unlikely(i->type & ITER_PIPE)) {
+               WARN_ON(1);
+               return 0;
+       }
+       if (iter_is_iovec(i))
+               might_fault();
+       iterate_and_advance(i, bytes, v,
+               copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
+               ({
+               rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset,
+                                (from += v.bv_len) - v.bv_len, v.bv_len);
+               if (rem) {
+                       curr_addr = (unsigned long) from;
+                       bytes = curr_addr - s_addr - rem;
+                       return bytes;
+               }
+               }),
+               ({
+               rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len,
+                               v.iov_len);
+               if (rem) {
+                       curr_addr = (unsigned long) from;
+                       bytes = curr_addr - s_addr - rem;
+                       return bytes;
+               }
+               })
+       )
+       return bytes;
+ }
+ EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
+ #endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */
  size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
  {
        char *to = addr;
@@@ -1012,7 -1073,7 +1073,7 @@@ unsigned long iov_iter_gap_alignment(co
  }
  EXPORT_SYMBOL(iov_iter_gap_alignment);
  
 -static inline size_t __pipe_get_pages(struct iov_iter *i,
 +static inline ssize_t __pipe_get_pages(struct iov_iter *i,
                                size_t maxsize,
                                struct page **pages,
                                int idx,
@@@ -1102,7 -1163,7 +1163,7 @@@ static ssize_t pipe_get_pages_alloc(str
                   size_t *start)
  {
        struct page **p;
 -      size_t n;
 +      ssize_t n;
        int idx;
        int npages;
  
This page took 0.085881 seconds and 4 git commands to generate.