]> Git Repo - linux.git/commitdiff
arm64: start using 'asm goto' for get_user() when available
authorLinus Torvalds <[email protected]>
Sun, 9 Jun 2024 17:11:04 +0000 (10:11 -0700)
committerLinus Torvalds <[email protected]>
Wed, 19 Jun 2024 19:33:38 +0000 (12:33 -0700)
This generates noticeably better code with compilers that support it,
since we don't need to test the error register etc, the exception just
jumps to the error handling directly.

Note that this also marks SW_TTBR0_PAN incompatible with KCSAN support,
since KCSAN wants to save and restore the user access state.

KCSAN and SW_TTBR0_PAN were probably always incompatible, but it became
obvious only when implementing the unsafe user access functions.  At
that point the default empty user_access_save/restore() functions
weren't provided by the default fallback functions.

Signed-off-by: Linus Torvalds <[email protected]>
arch/arm64/Kconfig
arch/arm64/include/asm/uaccess.h
arch/arm64/kernel/mte.c

index 5d91259ee7b53a36f32b84512706c9f9a09d6620..b6e8920364de22986378f3c64fdd3f4f87cf0643 100644 (file)
@@ -1649,6 +1649,7 @@ config RODATA_FULL_DEFAULT_ENABLED
 
 config ARM64_SW_TTBR0_PAN
        bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
+       depends on !KCSAN
        help
          Enabling this option prevents the kernel from accessing
          user-space memory directly by pointing TTBR0_EL1 to a reserved
index 14be5000c5a0cec1a65503137dba929da314077b..3e721f73bcaf44624e82be27a684d168f604fd3d 100644 (file)
@@ -184,29 +184,40 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
  * The "__xxx_error" versions set the third argument to -EFAULT if an error
  * occurs, and leave it unchanged on success.
  */
-#define __get_mem_asm(load, reg, x, addr, err, type)                   \
+#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+#define __get_mem_asm(load, reg, x, addr, label, type)                 \
+       asm_goto_output(                                                \
+       "1:     " load "        " reg "0, [%1]\n"                       \
+       _ASM_EXTABLE_##type##ACCESS_ERR(1b, %l2, %w0)                   \
+       : "=r" (x)                                                      \
+       : "r" (addr) : : label)
+#else
+#define __get_mem_asm(load, reg, x, addr, label, type) do {            \
+       int __gma_err = 0;                                              \
        asm volatile(                                                   \
        "1:     " load "        " reg "1, [%2]\n"                       \
        "2:\n"                                                          \
        _ASM_EXTABLE_##type##ACCESS_ERR_ZERO(1b, 2b, %w0, %w1)          \
-       : "+r" (err), "=r" (x)                                          \
-       : "r" (addr))
+       : "+r" (__gma_err), "=r" (x)                                    \
+       : "r" (addr));                                                  \
+       if (__gma_err) goto label; } while (0)
+#endif
 
-#define __raw_get_mem(ldr, x, ptr, err, type)                                  \
+#define __raw_get_mem(ldr, x, ptr, label, type)                                        \
 do {                                                                           \
        unsigned long __gu_val;                                                 \
        switch (sizeof(*(ptr))) {                                               \
        case 1:                                                                 \
-               __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err), type);     \
+               __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), label, type);     \
                break;                                                          \
        case 2:                                                                 \
-               __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err), type);     \
+               __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), label, type);     \
                break;                                                          \
        case 4:                                                                 \
-               __get_mem_asm(ldr, "%w", __gu_val, (ptr), (err), type);         \
+               __get_mem_asm(ldr, "%w", __gu_val, (ptr), label, type);         \
                break;                                                          \
        case 8:                                                                 \
-               __get_mem_asm(ldr, "%x",  __gu_val, (ptr), (err), type);        \
+               __get_mem_asm(ldr, "%x",  __gu_val, (ptr), label, type);        \
                break;                                                          \
        default:                                                                \
                BUILD_BUG();                                                    \
@@ -219,27 +230,34 @@ do {                                                                              \
  * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
  * we must evaluate these outside of the critical section.
  */
-#define __raw_get_user(x, ptr, err)                                    \
+#define __raw_get_user(x, ptr, label)                                  \
 do {                                                                   \
        __typeof__(*(ptr)) __user *__rgu_ptr = (ptr);                   \
        __typeof__(x) __rgu_val;                                        \
        __chk_user_ptr(ptr);                                            \
-                                                                       \
-       uaccess_ttbr0_enable();                                         \
-       __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err, U);            \
-       uaccess_ttbr0_disable();                                        \
-                                                                       \
-       (x) = __rgu_val;                                                \
+       do {                                                            \
+               __label__ __rgu_failed;                                 \
+               uaccess_ttbr0_enable();                                 \
+               __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, __rgu_failed, U);   \
+               uaccess_ttbr0_disable();                                \
+               (x) = __rgu_val;                                        \
+               break;                                                  \
+       __rgu_failed:                                                   \
+               uaccess_ttbr0_disable();                                \
+               goto label;                                             \
+       } while (0);                                                    \
 } while (0)
 
 #define __get_user_error(x, ptr, err)                                  \
 do {                                                                   \
+       __label__ __gu_failed;                                          \
        __typeof__(*(ptr)) __user *__p = (ptr);                         \
        might_fault();                                                  \
        if (access_ok(__p, sizeof(*__p))) {                             \
                __p = uaccess_mask_ptr(__p);                            \
-               __raw_get_user((x), __p, (err));                        \
+               __raw_get_user((x), __p, __gu_failed);                  \
        } else {                                                        \
+       __gu_failed:                                                    \
                (x) = (__force __typeof__(x))0; (err) = -EFAULT;        \
        }                                                               \
 } while (0)
@@ -262,15 +280,18 @@ do {                                                                      \
 do {                                                                   \
        __typeof__(dst) __gkn_dst = (dst);                              \
        __typeof__(src) __gkn_src = (src);                              \
-       int __gkn_err = 0;                                              \
-                                                                       \
-       __mte_enable_tco_async();                                       \
-       __raw_get_mem("ldr", *((type *)(__gkn_dst)),                    \
-                     (__force type *)(__gkn_src), __gkn_err, K);       \
-       __mte_disable_tco_async();                                      \
+       do {                                                            \
+               __label__ __gkn_label;                                  \
                                                                        \
-       if (unlikely(__gkn_err))                                        \
+               __mte_enable_tco_async();                               \
+               __raw_get_mem("ldr", *((type *)(__gkn_dst)),            \
+                     (__force type *)(__gkn_src), __gkn_label, K);     \
+               __mte_disable_tco_async();                              \
+               break;                                                  \
+       __gkn_label:                                                    \
+               __mte_disable_tco_async();                              \
                goto err_label;                                         \
+       } while (0);                                                    \
 } while (0)
 
 #define __put_mem_asm(store, reg, x, addr, err, type)                  \
@@ -381,6 +402,60 @@ extern unsigned long __must_check __arch_copy_to_user(void __user *to, const voi
        __actu_ret;                                                     \
 })
 
+static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
+{
+       if (unlikely(!access_ok(ptr,len)))
+               return 0;
+       uaccess_ttbr0_enable();
+       return 1;
+}
+#define user_access_begin(a,b) user_access_begin(a,b)
+#define user_access_end()      uaccess_ttbr0_disable()
+
+/*
+ * The arm64 inline asms should learn abut asm goto, and we should
+ * teach user_access_begin() about address masking.
+ */
+#define unsafe_put_user(x, ptr, label) do {                            \
+       int __upu_err = 0;                                              \
+       __raw_put_mem("sttr", x, uaccess_mask_ptr(ptr), __upu_err, U);  \
+       if (__upu_err) goto label;                              \
+} while (0)
+
+#define unsafe_get_user(x, ptr, label) \
+       __raw_get_mem("ldtr", x, uaccess_mask_ptr(ptr), label, U)
+
+/*
+ * KCSAN uses these to save and restore ttbr state.
+ * We do not support KCSAN with ARM64_SW_TTBR0_PAN, so
+ * they are no-ops.
+ */
+static inline unsigned long user_access_save(void) { return 0; }
+static inline void user_access_restore(unsigned long enabled) { }
+
+/*
+ * We want the unsafe accessors to always be inlined and use
+ * the error labels - thus the macro games.
+ */
+#define unsafe_copy_loop(dst, src, len, type, label)                           \
+       while (len >= sizeof(type)) {                                           \
+               unsafe_put_user(*(type *)(src),(type __user *)(dst),label);     \
+               dst += sizeof(type);                                            \
+               src += sizeof(type);                                            \
+               len -= sizeof(type);                                            \
+       }
+
+#define unsafe_copy_to_user(_dst,_src,_len,label)                      \
+do {                                                                   \
+       char __user *__ucu_dst = (_dst);                                \
+       const char *__ucu_src = (_src);                                 \
+       size_t __ucu_len = (_len);                                      \
+       unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label);  \
+       unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label);  \
+       unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label);  \
+       unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label);   \
+} while (0)
+
 #define INLINE_COPY_TO_USER
 #define INLINE_COPY_FROM_USER
 
index dcdcccd40891c638951be93b6f5f2884502a2f7c..6174671be7c18d5b71f0fbf4aee467b1235080a0 100644 (file)
@@ -582,12 +582,9 @@ subsys_initcall(register_mte_tcf_preferred_sysctl);
 size_t mte_probe_user_range(const char __user *uaddr, size_t size)
 {
        const char __user *end = uaddr + size;
-       int err = 0;
        char val;
 
-       __raw_get_user(val, uaddr, err);
-       if (err)
-               return size;
+       __raw_get_user(val, uaddr, efault);
 
        uaddr = PTR_ALIGN(uaddr, MTE_GRANULE_SIZE);
        while (uaddr < end) {
@@ -595,12 +592,13 @@ size_t mte_probe_user_range(const char __user *uaddr, size_t size)
                 * A read is sufficient for mte, the caller should have probed
                 * for the pte write permission if required.
                 */
-               __raw_get_user(val, uaddr, err);
-               if (err)
-                       return end - uaddr;
+               __raw_get_user(val, uaddr, efault);
                uaddr += MTE_GRANULE_SIZE;
        }
        (void)val;
 
        return 0;
+
+efault:
+       return end - uaddr;
 }
This page took 0.073582 seconds and 4 git commands to generate.