]> Git Repo - J-linux.git/commitdiff
ARM: 9429/1: ioremap: Sync PGDs for VMALLOC shadow
authorLinus Walleij <[email protected]>
Wed, 23 Oct 2024 12:03:14 +0000 (13:03 +0100)
committerRussell King (Oracle) <[email protected]>
Wed, 13 Nov 2024 08:15:22 +0000 (08:15 +0000)
When sync:ing the VMALLOC area to other CPUs, make sure to also
sync the KASAN shadow memory for the VMALLOC area, so that we
don't get stale entries for the shadow memory in the top level PGD.

Since we are now copying PGDs in two instances, create a helper
function named memcpy_pgd() to do the actual copying, and
create a helper to map the addresses of VMALLOC_START and
VMALLOC_END into the corresponding shadow memory.

Co-developed-by: Melon Liu <[email protected]>
Cc: [email protected]
Fixes: 565cbaad83d8 ("ARM: 9202/1: kasan: support CONFIG_KASAN_VMALLOC")
Link: https://lore.kernel.org/linux-arm-kernel/[email protected]/
Reported-by: Clement LE GOFFIC <[email protected]>
Suggested-by: Mark Rutland <[email protected]>
Suggested-by: Russell King (Oracle) <[email protected]>
Acked-by: Mark Rutland <[email protected]>
Signed-off-by: Linus Walleij <[email protected]>
Signed-off-by: Russell King (Oracle) <[email protected]>
arch/arm/mm/ioremap.c

index 794cfea9f9d4c894d906d3032cac50cbbf0161ef..ff555823cceb86e105fcb25db510d93ecf2cf41e 100644 (file)
@@ -23,6 +23,7 @@
  */
 #include <linux/module.h>
 #include <linux/errno.h>
+#include <linux/kasan.h>
 #include <linux/mm.h>
 #include <linux/vmalloc.h>
 #include <linux/io.h>
@@ -115,16 +116,40 @@ int ioremap_page(unsigned long virt, unsigned long phys,
 }
 EXPORT_SYMBOL(ioremap_page);
 
+#ifdef CONFIG_KASAN
+static unsigned long arm_kasan_mem_to_shadow(unsigned long addr)
+{
+       return (unsigned long)kasan_mem_to_shadow((void *)addr);
+}
+#else
+static unsigned long arm_kasan_mem_to_shadow(unsigned long addr)
+{
+       return 0;
+}
+#endif
+
+static void memcpy_pgd(struct mm_struct *mm, unsigned long start,
+                      unsigned long end)
+{
+       end = ALIGN(end, PGDIR_SIZE);
+       memcpy(pgd_offset(mm, start), pgd_offset_k(start),
+              sizeof(pgd_t) * (pgd_index(end) - pgd_index(start)));
+}
+
 void __check_vmalloc_seq(struct mm_struct *mm)
 {
        int seq;
 
        do {
                seq = atomic_read(&init_mm.context.vmalloc_seq);
-               memcpy(pgd_offset(mm, VMALLOC_START),
-                      pgd_offset_k(VMALLOC_START),
-                      sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
-                                       pgd_index(VMALLOC_START)));
+               memcpy_pgd(mm, VMALLOC_START, VMALLOC_END);
+               if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
+                       unsigned long start =
+                               arm_kasan_mem_to_shadow(VMALLOC_START);
+                       unsigned long end =
+                               arm_kasan_mem_to_shadow(VMALLOC_END);
+                       memcpy_pgd(mm, start, end);
+               }
                /*
                 * Use a store-release so that other CPUs that observe the
                 * counter's new value are guaranteed to see the results of the
This page took 0.053393 seconds and 4 git commands to generate.