]> Git Repo - J-linux.git/blobdiff - arch/powerpc/lib/code-patching.c
Merge tag 'mmc-v6.7-2' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
[J-linux.git] / arch / powerpc / lib / code-patching.c
index b00112d7ad467d30712d168538a418bac4d5c60c..c6ab46156cda597e01572b1b269ff9125ec0d758 100644 (file)
@@ -38,6 +38,7 @@ static int __patch_instruction(u32 *exec_addr, ppc_inst_t instr, u32 *patch_addr
        return 0;
 
 failed:
+       mb();  /* sync */
        return -EPERM;
 }
 
@@ -204,9 +205,6 @@ void __init poking_init(void)
 {
        int ret;
 
-       if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
-               return;
-
        if (mm_patch_enabled())
                ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
                                        "powerpc/text_poke_mm:online",
@@ -309,10 +307,6 @@ static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr)
 
        err = __patch_instruction(addr, instr, patch_addr);
 
-       /* hwsync performed by __patch_instruction (sync) if successful */
-       if (err)
-               mb();  /* sync */
-
        /* context synchronisation performed by __patch_instruction (isync or exception) */
        stop_using_temp_mm(patching_mm, orig_mm);
 
@@ -378,6 +372,144 @@ int patch_instruction(u32 *addr, ppc_inst_t instr)
 }
 NOKPROBE_SYMBOL(patch_instruction);
 
+static int __patch_instructions(u32 *patch_addr, u32 *code, size_t len, bool repeat_instr)
+{
+       unsigned long start = (unsigned long)patch_addr;
+
+       /* Repeat instruction */
+       if (repeat_instr) {
+               ppc_inst_t instr = ppc_inst_read(code);
+
+               if (ppc_inst_prefixed(instr)) {
+                       u64 val = ppc_inst_as_ulong(instr);
+
+                       memset64((u64 *)patch_addr, val, len / 8);
+               } else {
+                       u32 val = ppc_inst_val(instr);
+
+                       memset32(patch_addr, val, len / 4);
+               }
+       } else {
+               memcpy(patch_addr, code, len);
+       }
+
+       smp_wmb();      /* smp write barrier */
+       flush_icache_range(start, start + len);
+       return 0;
+}
+
+/*
+ * A page is mapped and instructions that fit the page are patched.
+ * Assumes 'len' to be (PAGE_SIZE - offset_in_page(addr)) or below.
+ */
+static int __do_patch_instructions_mm(u32 *addr, u32 *code, size_t len, bool repeat_instr)
+{
+       struct mm_struct *patching_mm, *orig_mm;
+       unsigned long pfn = get_patch_pfn(addr);
+       unsigned long text_poke_addr;
+       spinlock_t *ptl;
+       u32 *patch_addr;
+       pte_t *pte;
+       int err;
+
+       patching_mm = __this_cpu_read(cpu_patching_context.mm);
+       text_poke_addr = __this_cpu_read(cpu_patching_context.addr);
+       patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
+
+       pte = get_locked_pte(patching_mm, text_poke_addr, &ptl);
+       if (!pte)
+               return -ENOMEM;
+
+       __set_pte_at(patching_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
+
+       /* order PTE update before use, also serves as the hwsync */
+       asm volatile("ptesync" ::: "memory");
+
+       /* order context switch after arbitrary prior code */
+       isync();
+
+       orig_mm = start_using_temp_mm(patching_mm);
+
+       err = __patch_instructions(patch_addr, code, len, repeat_instr);
+
+       /* context synchronisation performed by __patch_instructions */
+       stop_using_temp_mm(patching_mm, orig_mm);
+
+       pte_clear(patching_mm, text_poke_addr, pte);
+       /*
+        * ptesync to order PTE update before TLB invalidation done
+        * by radix__local_flush_tlb_page_psize (in _tlbiel_va)
+        */
+       local_flush_tlb_page_psize(patching_mm, text_poke_addr, mmu_virtual_psize);
+
+       pte_unmap_unlock(pte, ptl);
+
+       return err;
+}
+
+/*
+ * A page is mapped and instructions that fit the page are patched.
+ * Assumes 'len' to be (PAGE_SIZE - offset_in_page(addr)) or below.
+ */
+static int __do_patch_instructions(u32 *addr, u32 *code, size_t len, bool repeat_instr)
+{
+       unsigned long pfn = get_patch_pfn(addr);
+       unsigned long text_poke_addr;
+       u32 *patch_addr;
+       pte_t *pte;
+       int err;
+
+       text_poke_addr = (unsigned long)__this_cpu_read(cpu_patching_context.addr) & PAGE_MASK;
+       patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
+
+       pte = __this_cpu_read(cpu_patching_context.pte);
+       __set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
+       /* See ptesync comment in radix__set_pte_at() */
+       if (radix_enabled())
+               asm volatile("ptesync" ::: "memory");
+
+       err = __patch_instructions(patch_addr, code, len, repeat_instr);
+
+       pte_clear(&init_mm, text_poke_addr, pte);
+       flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE);
+
+       return err;
+}
+
+/*
+ * Patch 'addr' with 'len' bytes of instructions from 'code'.
+ *
+ * If repeat_instr is true, the same instruction is filled for
+ * 'len' bytes.
+ */
+int patch_instructions(u32 *addr, u32 *code, size_t len, bool repeat_instr)
+{
+       while (len > 0) {
+               unsigned long flags;
+               size_t plen;
+               int err;
+
+               plen = min_t(size_t, PAGE_SIZE - offset_in_page(addr), len);
+
+               local_irq_save(flags);
+               if (mm_patch_enabled())
+                       err = __do_patch_instructions_mm(addr, code, plen, repeat_instr);
+               else
+                       err = __do_patch_instructions(addr, code, plen, repeat_instr);
+               local_irq_restore(flags);
+               if (err)
+                       return err;
+
+               len -= plen;
+               addr = (u32 *)((unsigned long)addr + plen);
+               if (!repeat_instr)
+                       code = (u32 *)((unsigned long)code + plen);
+       }
+
+       return 0;
+}
+NOKPROBE_SYMBOL(patch_instructions);
+
 int patch_branch(u32 *addr, unsigned long target, int flags)
 {
        ppc_inst_t instr;
This page took 0.030085 seconds and 4 git commands to generate.