]> Git Repo - linux.git/commitdiff
riscv: lib: vectorize copy_to_user/copy_from_user
authorAndy Chiu <[email protected]>
Mon, 15 Jan 2024 05:59:24 +0000 (05:59 +0000)
committerPalmer Dabbelt <[email protected]>
Tue, 16 Jan 2024 15:13:57 +0000 (07:13 -0800)
This patch utilizes Vector to perform copy_to_user/copy_from_user. If
Vector is available and the size of copy is large enough for Vector to
perform better than scalar, then direct the kernel to do Vector copies
for userspace. Though the best programming practice for users is to
reduce the copy, this provides a faster variant when copies are
inevitable.

The optimal size for using Vector, copy_to_user_thres, is only a
heuristic for now. We can add DT parsing if people feel the need of
customizing it.

The exception fixup code of the __asm_vector_usercopy must fallback to
the scalar one because accessing user pages might fault, and must be
sleepable. Current kernel-mode Vector does not allow tasks to be
preemptible, so we must disactivate Vector and perform a scalar fallback
in such case.

The original implementation of Vector operations comes from
https://github.com/sifive/sifive-libc, which we agree to contribute to
Linux kernel.

Co-developed-by: Jerry Shih <[email protected]>
Signed-off-by: Jerry Shih <[email protected]>
Co-developed-by: Nick Knight <[email protected]>
Signed-off-by: Nick Knight <[email protected]>
Suggested-by: Guo Ren <[email protected]>
Signed-off-by: Andy Chiu <[email protected]>
Tested-by: Björn Töpel <[email protected]>
Tested-by: Lad Prabhakar <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Palmer Dabbelt <[email protected]>
arch/riscv/Kconfig
arch/riscv/include/asm/asm-prototypes.h
arch/riscv/lib/Makefile
arch/riscv/lib/riscv_v_helpers.c [new file with mode: 0644]
arch/riscv/lib/uaccess.S
arch/riscv/lib/uaccess_vector.S [new file with mode: 0644]

index 95a2a06acc6a62412894e491c3bfd5d4a161d15b..3c5ba05e8a2da812c13605a3be674b2f1be94dcf 100644 (file)
@@ -525,6 +525,14 @@ config RISCV_ISA_V_DEFAULT_ENABLE
 
          If you don't know what to do here, say Y.
 
+config RISCV_ISA_V_UCOPY_THRESHOLD
+       int "Threshold size for vectorized user copies"
+       depends on RISCV_ISA_V
+       default 768
+       help
+         Prefer using vectorized copy_to_user()/copy_from_user() when the
+         workload size exceeds this value.
+
 config TOOLCHAIN_HAS_ZBB
        bool
        default y
index 6db1a9bbff4ce35d1e05261094bb2efa2ef311a1..be438932f321f7d358ad6ebdb5bd7a9123eac8df 100644 (file)
@@ -11,6 +11,10 @@ long long __ashlti3(long long a, int b);
 
 #ifdef CONFIG_RISCV_ISA_V
 
+#ifdef CONFIG_MMU
+asmlinkage int enter_vector_usercopy(void *dst, void *src, size_t n);
+#endif /* CONFIG_MMU  */
+
 void xor_regs_2_(unsigned long bytes, unsigned long *__restrict p1,
                 const unsigned long *__restrict p2);
 void xor_regs_3_(unsigned long bytes, unsigned long *__restrict p1,
index 494f9cd1a00c0b83619f622d381e86c5529fd80d..c8a6787d58273a465dd0c12e8217cb4a241d4693 100644 (file)
@@ -6,9 +6,13 @@ lib-y                  += memmove.o
 lib-y                  += strcmp.o
 lib-y                  += strlen.o
 lib-y                  += strncmp.o
-lib-$(CONFIG_MMU)      += uaccess.o
+ifeq ($(CONFIG_MMU), y)
+lib-y                          += uaccess.o
+lib-$(CONFIG_RISCV_ISA_V)      += uaccess_vector.o
+endif
 lib-$(CONFIG_64BIT)    += tishift.o
 lib-$(CONFIG_RISCV_ISA_ZICBOZ) += clear_page.o
 
 obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
 lib-$(CONFIG_RISCV_ISA_V)      += xor.o
+lib-$(CONFIG_RISCV_ISA_V)      += riscv_v_helpers.o
diff --git a/arch/riscv/lib/riscv_v_helpers.c b/arch/riscv/lib/riscv_v_helpers.c
new file mode 100644 (file)
index 0000000..be38a93
--- /dev/null
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2023 SiFive
+ * Author: Andy Chiu <[email protected]>
+ */
+#include <linux/linkage.h>
+#include <asm/asm.h>
+
+#include <asm/vector.h>
+#include <asm/simd.h>
+
+#ifdef CONFIG_MMU
+#include <asm/asm-prototypes.h>
+#endif
+
+#ifdef CONFIG_MMU
+size_t riscv_v_usercopy_threshold = CONFIG_RISCV_ISA_V_UCOPY_THRESHOLD;
+int __asm_vector_usercopy(void *dst, void *src, size_t n);
+int fallback_scalar_usercopy(void *dst, void *src, size_t n);
+asmlinkage int enter_vector_usercopy(void *dst, void *src, size_t n)
+{
+       size_t remain, copied;
+
+       /* skip has_vector() check because it has been done by the asm  */
+       if (!may_use_simd())
+               goto fallback;
+
+       kernel_vector_begin();
+       remain = __asm_vector_usercopy(dst, src, n);
+       kernel_vector_end();
+
+       if (remain) {
+               copied = n - remain;
+               dst += copied;
+               src += copied;
+               n = remain;
+               goto fallback;
+       }
+
+       return remain;
+
+fallback:
+       return fallback_scalar_usercopy(dst, src, n);
+}
+#endif
index 3ab438f30d1328707862134f819e8a74598c6dce..a1e4a3c4292549bfd509cd9ce18b509af50b76ae 100644 (file)
@@ -3,6 +3,8 @@
 #include <asm/asm.h>
 #include <asm/asm-extable.h>
 #include <asm/csr.h>
+#include <asm/hwcap.h>
+#include <asm/alternative-macros.h>
 
        .macro fixup op reg addr lbl
 100:
        .endm
 
 SYM_FUNC_START(__asm_copy_to_user)
+#ifdef CONFIG_RISCV_ISA_V
+       ALTERNATIVE("j fallback_scalar_usercopy", "nop", 0, RISCV_ISA_EXT_v, CONFIG_RISCV_ISA_V)
+       REG_L   t0, riscv_v_usercopy_threshold
+       bltu    a2, t0, fallback_scalar_usercopy
+       tail enter_vector_usercopy
+#endif
+SYM_FUNC_START(fallback_scalar_usercopy)
 
        /* Enable access to user memory */
        li t6, SR_SUM
@@ -181,6 +190,7 @@ SYM_FUNC_START(__asm_copy_to_user)
        sub a0, t5, a0
        ret
 SYM_FUNC_END(__asm_copy_to_user)
+SYM_FUNC_END(fallback_scalar_usercopy)
 EXPORT_SYMBOL(__asm_copy_to_user)
 SYM_FUNC_ALIAS(__asm_copy_from_user, __asm_copy_to_user)
 EXPORT_SYMBOL(__asm_copy_from_user)
diff --git a/arch/riscv/lib/uaccess_vector.S b/arch/riscv/lib/uaccess_vector.S
new file mode 100644 (file)
index 0000000..51ab558
--- /dev/null
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <linux/linkage.h>
+#include <asm-generic/export.h>
+#include <asm/asm.h>
+#include <asm/asm-extable.h>
+#include <asm/csr.h>
+
+#define pDst a0
+#define pSrc a1
+#define iNum a2
+
+#define iVL a3
+
+#define ELEM_LMUL_SETTING m8
+#define vData v0
+
+       .macro fixup op reg addr lbl
+100:
+       \op \reg, \addr
+       _asm_extable    100b, \lbl
+       .endm
+
+SYM_FUNC_START(__asm_vector_usercopy)
+       /* Enable access to user memory */
+       li      t6, SR_SUM
+       csrs    CSR_STATUS, t6
+
+loop:
+       vsetvli iVL, iNum, e8, ELEM_LMUL_SETTING, ta, ma
+       fixup vle8.v vData, (pSrc), 10f
+       sub iNum, iNum, iVL
+       add pSrc, pSrc, iVL
+       fixup vse8.v vData, (pDst), 11f
+       add pDst, pDst, iVL
+       bnez iNum, loop
+
+       /* Exception fixup for vector load is shared with normal exit */
+10:
+       /* Disable access to user memory */
+       csrc    CSR_STATUS, t6
+       mv      a0, iNum
+       ret
+
+       /* Exception fixup code for vector store. */
+11:
+       /* Undo the subtraction after vle8.v */
+       add     iNum, iNum, iVL
+       /* Make sure the scalar fallback skip already processed bytes */
+       csrr    t2, CSR_VSTART
+       sub     iNum, iNum, t2
+       j       10b
+SYM_FUNC_END(__asm_vector_usercopy)
This page took 0.069492 seconds and 4 git commands to generate.