return __patch_instruction(addr, instr, addr);
}
-#ifdef CONFIG_STRICT_KERNEL_RWX
+struct patch_context {
+ union {
+ struct vm_struct *area;
+ struct mm_struct *mm;
+ };
+ unsigned long addr;
+ pte_t *pte;
+};
-static DEFINE_PER_CPU(struct vm_struct *, text_poke_area);
-static DEFINE_PER_CPU(struct mm_struct *, cpu_patching_mm);
-static DEFINE_PER_CPU(unsigned long, cpu_patching_addr);
-static DEFINE_PER_CPU(pte_t *, cpu_patching_pte);
+static DEFINE_PER_CPU(struct patch_context, cpu_patching_context);
static int map_patch_area(void *addr, unsigned long text_poke_addr);
static void unmap_patch_area(unsigned long addr);
unmap_patch_area(addr);
- this_cpu_write(text_poke_area, area);
+ this_cpu_write(cpu_patching_context.area, area);
+ this_cpu_write(cpu_patching_context.addr, addr);
+ this_cpu_write(cpu_patching_context.pte, virt_to_kpte(addr));
return 0;
}
static int text_area_cpu_down(unsigned int cpu)
{
- free_vm_area(this_cpu_read(text_poke_area));
+ free_vm_area(this_cpu_read(cpu_patching_context.area));
+ this_cpu_write(cpu_patching_context.area, NULL);
+ this_cpu_write(cpu_patching_context.addr, 0);
+ this_cpu_write(cpu_patching_context.pte, NULL);
return 0;
}
goto fail_no_pte;
pte_unmap_unlock(pte, ptl);
- this_cpu_write(cpu_patching_mm, mm);
- this_cpu_write(cpu_patching_addr, addr);
- this_cpu_write(cpu_patching_pte, pte);
+ this_cpu_write(cpu_patching_context.mm, mm);
+ this_cpu_write(cpu_patching_context.addr, addr);
return 0;
static int text_area_cpu_down_mm(unsigned int cpu)
{
- put_patching_mm(this_cpu_read(cpu_patching_mm),
- this_cpu_read(cpu_patching_addr));
+ put_patching_mm(this_cpu_read(cpu_patching_context.mm),
+ this_cpu_read(cpu_patching_context.addr));
- this_cpu_write(cpu_patching_mm, NULL);
- this_cpu_write(cpu_patching_addr, 0);
- this_cpu_write(cpu_patching_pte, NULL);
+ this_cpu_write(cpu_patching_context.mm, NULL);
+ this_cpu_write(cpu_patching_context.addr, 0);
return 0;
}
{
int ret;
+ if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
+ return;
+
if (mm_patch_enabled())
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"powerpc/text_poke_mm:online",
unsigned long pfn = get_patch_pfn(addr);
struct mm_struct *patching_mm;
struct mm_struct *orig_mm;
+ spinlock_t *ptl;
- patching_mm = __this_cpu_read(cpu_patching_mm);
- pte = __this_cpu_read(cpu_patching_pte);
- text_poke_addr = __this_cpu_read(cpu_patching_addr);
+ patching_mm = __this_cpu_read(cpu_patching_context.mm);
+ text_poke_addr = __this_cpu_read(cpu_patching_context.addr);
patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
+ pte = get_locked_pte(patching_mm, text_poke_addr, &ptl);
+ if (!pte)
+ return -ENOMEM;
+
__set_pte_at(patching_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
/* order PTE update before use, also serves as the hwsync */
*/
local_flush_tlb_page_psize(patching_mm, text_poke_addr, mmu_virtual_psize);
+ pte_unmap_unlock(pte, ptl);
+
return err;
}
pte_t *pte;
unsigned long pfn = get_patch_pfn(addr);
- text_poke_addr = (unsigned long)__this_cpu_read(text_poke_area)->addr & PAGE_MASK;
+ text_poke_addr = (unsigned long)__this_cpu_read(cpu_patching_context.addr) & PAGE_MASK;
patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
- pte = virt_to_kpte(text_poke_addr);
+ pte = __this_cpu_read(cpu_patching_context.pte);
__set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
/* See ptesync comment in radix__set_pte_at() */
if (radix_enabled())
return err;
}
-static int do_patch_instruction(u32 *addr, ppc_inst_t instr)
+int patch_instruction(u32 *addr, ppc_inst_t instr)
{
int err;
unsigned long flags;
* when text_poke_area is not ready, but we still need
* to allow patching. We just do the plain old patching
*/
- if (!static_branch_likely(&poking_init_done))
+ if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) ||
+ !static_branch_likely(&poking_init_done))
return raw_patch_instruction(addr, instr);
local_irq_save(flags);
return err;
}
-#else /* !CONFIG_STRICT_KERNEL_RWX */
-
-static int do_patch_instruction(u32 *addr, ppc_inst_t instr)
-{
- return raw_patch_instruction(addr, instr);
-}
-
-#endif /* CONFIG_STRICT_KERNEL_RWX */
-
-__ro_after_init DEFINE_STATIC_KEY_FALSE(init_mem_is_free);
-
-int patch_instruction(u32 *addr, ppc_inst_t instr)
-{
- /* Make sure we aren't patching a freed init section */
- if (static_branch_likely(&init_mem_is_free) && init_section_contains(addr, 4))
- return 0;
-
- return do_patch_instruction(addr, instr);
-}
NOKPROBE_SYMBOL(patch_instruction);
int patch_branch(u32 *addr, unsigned long target, int flags)