]> Git Repo - J-linux.git/commitdiff
powerpc/code-patching: Use dedicated memory routines for patching
authorBenjamin Gray <[email protected]>
Mon, 25 Mar 2024 05:28:15 +0000 (16:28 +1100)
committerMichael Ellerman <[email protected]>
Tue, 7 May 2024 14:35:42 +0000 (00:35 +1000)
The patching page set up as a writable alias may be in quadrant 0
(userspace) if the temporary mm path is used. This causes sanitiser
failures if so. Sanitiser failures also occur on the non-mm path
because the plain memset family is instrumented, and KASAN treats the
patching window as poisoned.

Introduce locally defined patch_* variants of memset that perform an
uninstrumented lower level set, as well as detecting write errors like
the original single patch variant does.

copy_to_user() is not correct here, as the PTE makes it a proper kernel
page (the EAA is privileged access only, RW). It just happens to be in
quadrant 0 because that's the hardware's mechanism for using the current
PID vs PID 0 in translations. Importantly, it's incorrect to allow user
page accesses.

Now that the patching memsets are used, we also propagate a failure up
to the caller as the single patch variant does.

Signed-off-by: Benjamin Gray <[email protected]>
Signed-off-by: Michael Ellerman <[email protected]>
Link: https://msgid.link/[email protected]
arch/powerpc/lib/code-patching.c

index c6ab46156cda597e01572b1b269ff9125ec0d758..df64343b9214ae504cebe7c00fac36d881676f03 100644 (file)
@@ -372,9 +372,32 @@ int patch_instruction(u32 *addr, ppc_inst_t instr)
 }
 NOKPROBE_SYMBOL(patch_instruction);
 
+static int patch_memset64(u64 *addr, u64 val, size_t count)
+{
+       for (u64 *end = addr + count; addr < end; addr++)
+               __put_kernel_nofault(addr, &val, u64, failed);
+
+       return 0;
+
+failed:
+       return -EPERM;
+}
+
+static int patch_memset32(u32 *addr, u32 val, size_t count)
+{
+       for (u32 *end = addr + count; addr < end; addr++)
+               __put_kernel_nofault(addr, &val, u32, failed);
+
+       return 0;
+
+failed:
+       return -EPERM;
+}
+
 static int __patch_instructions(u32 *patch_addr, u32 *code, size_t len, bool repeat_instr)
 {
        unsigned long start = (unsigned long)patch_addr;
+       int err;
 
        /* Repeat instruction */
        if (repeat_instr) {
@@ -383,19 +406,19 @@ static int __patch_instructions(u32 *patch_addr, u32 *code, size_t len, bool rep
                if (ppc_inst_prefixed(instr)) {
                        u64 val = ppc_inst_as_ulong(instr);
 
-                       memset64((u64 *)patch_addr, val, len / 8);
+                       err = patch_memset64((u64 *)patch_addr, val, len / 8);
                } else {
                        u32 val = ppc_inst_val(instr);
 
-                       memset32(patch_addr, val, len / 4);
+                       err = patch_memset32(patch_addr, val, len / 4);
                }
        } else {
-               memcpy(patch_addr, code, len);
+               err = copy_to_kernel_nofault(patch_addr, code, len);
        }
 
        smp_wmb();      /* smp write barrier */
        flush_icache_range(start, start + len);
-       return 0;
+       return err;
 }
 
 /*
This page took 0.047714 seconds and 4 git commands to generate.