]> Git Repo - linux.git/commitdiff
x86/mm/iommu/sva: Make LAM and SVA mutually exclusive
authorKirill A. Shutemov <[email protected]>
Sun, 12 Mar 2023 11:26:06 +0000 (14:26 +0300)
committerDave Hansen <[email protected]>
Thu, 16 Mar 2023 20:08:40 +0000 (13:08 -0700)
IOMMU and SVA-capable devices know nothing about LAM and only expect
canonical addresses. An attempt to pass down tagged pointer will lead
to address translation failure.

By default do not allow to enable both LAM and use SVA in the same
process.

The new ARCH_FORCE_TAGGED_SVA arch_prctl() overrides the limitation.
By using the arch_prctl() userspace takes responsibility to never pass
tagged address to the device.

Signed-off-by: Kirill A. Shutemov <[email protected]>
Signed-off-by: Dave Hansen <[email protected]>
Reviewed-by: Ashok Raj <[email protected]>
Reviewed-by: Jacob Pan <[email protected]>
Acked-by: Peter Zijlstra (Intel) <[email protected]>
Link: https://lore.kernel.org/all/20230312112612.31869-12-kirill.shutemov%40linux.intel.com
arch/x86/include/asm/mmu.h
arch/x86/include/asm/mmu_context.h
arch/x86/include/uapi/asm/prctl.h
arch/x86/kernel/process_64.c
drivers/iommu/iommu-sva.c
include/linux/mmu_context.h

index e80762e998ce948b87334b62892369a284beed45..0da5c227f490c07bcff20c877ba4ea6b77129c91 100644 (file)
@@ -14,6 +14,8 @@
 #define MM_CONTEXT_HAS_VSYSCALL                1
 /* Do not allow changing LAM mode */
 #define MM_CONTEXT_LOCK_LAM            2
+/* Allow LAM and SVA coexisting */
+#define MM_CONTEXT_FORCE_TAGGED_SVA    3
 
 /*
  * x86 has arch-specific MMU state beyond what lives in mm_struct.
index 06eaaf75d57213617022024d43b244efe5747419..4c396e9a384f6eb2025e2c3b2d599c49e4db72be 100644 (file)
@@ -115,6 +115,12 @@ static inline void mm_reset_untag_mask(struct mm_struct *mm)
        mm->context.untag_mask = -1UL;
 }
 
+#define arch_pgtable_dma_compat arch_pgtable_dma_compat
+static inline bool arch_pgtable_dma_compat(struct mm_struct *mm)
+{
+       return !mm_lam_cr3_mask(mm) ||
+               test_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &mm->context.flags);
+}
 #else
 
 static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
index a31e27b95b194c0a2a2ab59c3c0c2d626ecb06a2..eb290d89cb3231ec0ee0b0f0d1116a8068cd7939 100644 (file)
@@ -23,5 +23,6 @@
 #define ARCH_GET_UNTAG_MASK            0x4001
 #define ARCH_ENABLE_TAGGED_ADDR                0x4002
 #define ARCH_GET_MAX_TAG_BITS          0x4003
+#define ARCH_FORCE_TAGGED_SVA          0x4004
 
 #endif /* _ASM_X86_PRCTL_H */
index 186f34add8658451662c717ed0bae0c8efc09a5d..b46924c9e46d9356e6bcaef2675c7d5ffbea85ca 100644 (file)
@@ -756,6 +756,10 @@ static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits)
        if (current->mm != mm)
                return -EINVAL;
 
+       if (mm_valid_pasid(mm) &&
+           !test_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &mm->context.flags))
+               return -EINTR;
+
        if (mmap_write_lock_killable(mm))
                return -EINTR;
 
@@ -878,6 +882,9 @@ long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
                                (unsigned long __user *)arg2);
        case ARCH_ENABLE_TAGGED_ADDR:
                return prctl_enable_tagged_addr(task->mm, arg2);
+       case ARCH_FORCE_TAGGED_SVA:
+               set_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &task->mm->context.flags);
+               return 0;
        case ARCH_GET_MAX_TAG_BITS:
                if (!cpu_feature_enabled(X86_FEATURE_LAM))
                        return put_user(0, (unsigned long __user *)arg2);
index 4ee2929f0d7a0c5e4cd3128bfa4c93c8d8f70e66..dd76a1a09cf708ce7f92ed8396531e0d20ec89d2 100644 (file)
@@ -2,6 +2,7 @@
 /*
  * Helpers for IOMMU drivers implementing SVA
  */
+#include <linux/mmu_context.h>
 #include <linux/mutex.h>
 #include <linux/sched/mm.h>
 #include <linux/iommu.h>
@@ -32,6 +33,9 @@ int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
            min == 0 || max < min)
                return -EINVAL;
 
+       if (!arch_pgtable_dma_compat(mm))
+               return -EBUSY;
+
        mutex_lock(&iommu_sva_lock);
        /* Is a PASID already associated with this mm? */
        if (mm_valid_pasid(mm)) {
index 14b9c1fa05c49f27d5843ca415e594c832c7f979..f2b7a3f040999e8a1506306d0c30800a2a7d3ec2 100644 (file)
@@ -35,4 +35,11 @@ static inline unsigned long mm_untag_mask(struct mm_struct *mm)
 }
 #endif
 
+#ifndef arch_pgtable_dma_compat
+static inline bool arch_pgtable_dma_compat(struct mm_struct *mm)
+{
+       return true;
+}
+#endif
+
 #endif
This page took 0.071796 seconds and 4 git commands to generate.