]> Git Repo - linux.git/commitdiff
x86/uaccess: Provide untagged_addr() and remove tags before address check
authorKirill A. Shutemov <[email protected]>
Sun, 12 Mar 2023 11:26:01 +0000 (14:26 +0300)
committerDave Hansen <[email protected]>
Thu, 16 Mar 2023 20:08:39 +0000 (13:08 -0700)
untagged_addr() is a helper used by the core-mm to strip tag bits and
get the address to the canonical shape based on rules of the current
thread. It only handles userspace addresses.

The untagging mask is stored in per-CPU variable and set on context
switching to the task.

The tags must not be included into check whether it's okay to access the
userspace address. Strip tags in access_ok().

Signed-off-by: Kirill A. Shutemov <[email protected]>
Signed-off-by: Dave Hansen <[email protected]>
Acked-by: Peter Zijlstra (Intel) <[email protected]>
Tested-by: Alexander Potapenko <[email protected]>
Link: https://lore.kernel.org/all/20230312112612.31869-7-kirill.shutemov%40linux.intel.com
arch/x86/include/asm/mmu.h
arch/x86/include/asm/mmu_context.h
arch/x86/include/asm/tlbflush.h
arch/x86/include/asm/uaccess.h
arch/x86/kernel/process.c
arch/x86/mm/init.c

index 22fc9fbf1d0a2899d1f7e9e33e95a79339c25d23..9cac8c45a64702aea72371932b2900300774bdd5 100644 (file)
@@ -45,6 +45,9 @@ typedef struct {
 #ifdef CONFIG_ADDRESS_MASKING
        /* Active LAM mode:  X86_CR3_LAM_U48 or X86_CR3_LAM_U57 or 0 (disabled) */
        unsigned long lam_cr3_mask;
+
+       /* Significant bits of the virtual address. Excludes tag bits. */
+       u64 untag_mask;
 #endif
 
        struct mutex lock;
index 0295c3863db73b0ab52d4328de066ab0bc29bda3..eb1387ac40fa9ee7fac6ea51f3cd97c7c6af5439 100644 (file)
@@ -101,6 +101,12 @@ static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
 static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm)
 {
        mm->context.lam_cr3_mask = oldmm->context.lam_cr3_mask;
+       mm->context.untag_mask = oldmm->context.untag_mask;
+}
+
+static inline void mm_reset_untag_mask(struct mm_struct *mm)
+{
+       mm->context.untag_mask = -1UL;
 }
 
 #else
@@ -113,6 +119,10 @@ static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
 static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm)
 {
 }
+
+static inline void mm_reset_untag_mask(struct mm_struct *mm)
+{
+}
 #endif
 
 #define enter_lazy_tlb enter_lazy_tlb
@@ -139,6 +149,7 @@ static inline int init_new_context(struct task_struct *tsk,
                mm->context.execute_only_pkey = -1;
        }
 #endif
+       mm_reset_untag_mask(mm);
        init_new_context_ldt(mm);
        return 0;
 }
index e8b47f57bd4afa8f91396ad85090b839af82850a..75bfaa421030328b72542ed803e45dd401fbf210 100644 (file)
@@ -54,6 +54,15 @@ static inline void cr4_clear_bits(unsigned long mask)
        local_irq_restore(flags);
 }
 
+#ifdef CONFIG_ADDRESS_MASKING
+DECLARE_PER_CPU(u64, tlbstate_untag_mask);
+
+static inline u64 current_untag_mask(void)
+{
+       return this_cpu_read(tlbstate_untag_mask);
+}
+#endif
+
 #ifndef MODULE
 /*
  * 6 because 6 should be plenty and struct tlb_state will fit in two cache
@@ -380,6 +389,7 @@ static inline void set_tlbstate_lam_mode(struct mm_struct *mm)
 {
        this_cpu_write(cpu_tlbstate.lam,
                       mm->context.lam_cr3_mask >> X86_CR3_LAM_U57_BIT);
+       this_cpu_write(tlbstate_untag_mask, mm->context.untag_mask);
 }
 
 #else
index 1cc756eafa4478393e28b121e5ee6dc3c023c50c..c79ebdbd63562cd9b0d015c16a6f2ab4fa5839e2 100644 (file)
@@ -7,11 +7,13 @@
 #include <linux/compiler.h>
 #include <linux/instrumented.h>
 #include <linux/kasan-checks.h>
+#include <linux/mm_types.h>
 #include <linux/string.h>
 #include <asm/asm.h>
 #include <asm/page.h>
 #include <asm/smap.h>
 #include <asm/extable.h>
+#include <asm/tlbflush.h>
 
 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
 static inline bool pagefault_disabled(void);
@@ -21,6 +23,39 @@ static inline bool pagefault_disabled(void);
 # define WARN_ON_IN_IRQ()
 #endif
 
+#ifdef CONFIG_ADDRESS_MASKING
+/*
+ * Mask out tag bits from the address.
+ *
+ * Magic with the 'sign' allows to untag userspace pointer without any branches
+ * while leaving kernel addresses intact.
+ */
+static inline unsigned long __untagged_addr(unsigned long addr,
+                                           unsigned long mask)
+{
+       long sign = addr >> 63;
+
+       addr &= mask | sign;
+       return addr;
+}
+
+#define untagged_addr(addr)    ({                                      \
+       u64 __addr = (__force u64)(addr);                               \
+       __addr = __untagged_addr(__addr, current_untag_mask());         \
+       (__force __typeof__(addr))__addr;                               \
+})
+
+#define untagged_addr_remote(mm, addr) ({                              \
+       u64 __addr = (__force u64)(addr);                               \
+       mmap_assert_locked(mm);                                         \
+       __addr = __untagged_addr(__addr, (mm)->context.untag_mask);     \
+       (__force __typeof__(addr))__addr;                               \
+})
+
+#else
+#define untagged_addr(addr)    (addr)
+#endif
+
 /**
  * access_ok - Checks if a user space pointer is valid
  * @addr: User space pointer to start of block to check
@@ -38,10 +73,10 @@ static inline bool pagefault_disabled(void);
  * Return: true (nonzero) if the memory block may be valid, false (zero)
  * if it is definitely invalid.
  */
-#define access_ok(addr, size)                                  \
+#define access_ok(addr, size)                                          \
 ({                                                                     \
        WARN_ON_IN_IRQ();                                               \
-       likely(__access_ok(addr, size));                                \
+       likely(__access_ok(untagged_addr(addr), size));                 \
 })
 
 #include <asm-generic/access_ok.h>
index b650cde3f64db5592a394d3e827fc897467eabcd..bbc8c4c6e36099471aa0b2136141a8b09901de3b 100644 (file)
@@ -48,6 +48,7 @@
 #include <asm/frame.h>
 #include <asm/unwind.h>
 #include <asm/tdx.h>
+#include <asm/mmu_context.h>
 
 #include "process.h"
 
@@ -368,6 +369,8 @@ void arch_setup_new_exec(void)
                task_clear_spec_ssb_noexec(current);
                speculation_ctrl_update(read_thread_flags());
        }
+
+       mm_reset_untag_mask(current->mm);
 }
 
 #ifdef CONFIG_X86_IOPL_IOPERM
index cb258f58fdc87935500c28053eef87ce1b02e206..659b6c0f7910d5a7b1d52b791e3289e7c8f6bca7 100644 (file)
@@ -1048,6 +1048,11 @@ __visible DEFINE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate) = {
        .cr4 = ~0UL,    /* fail hard if we screw up cr4 shadow initialization */
 };
 
+#ifdef CONFIG_ADDRESS_MASKING
+DEFINE_PER_CPU(u64, tlbstate_untag_mask);
+EXPORT_PER_CPU_SYMBOL(tlbstate_untag_mask);
+#endif
+
 void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
 {
        /* entry 0 MUST be WB (hardwired to speed up translations) */
This page took 0.09825 seconds and 4 git commands to generate.