1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2008 Michael Ellerman, IBM Corporation.
6 #include <linux/kprobes.h>
7 #include <linux/mmu_context.h>
8 #include <linux/random.h>
9 #include <linux/vmalloc.h>
10 #include <linux/init.h>
11 #include <linux/cpuhotplug.h>
12 #include <linux/uaccess.h>
13 #include <linux/jump_label.h>
15 #include <asm/debug.h>
16 #include <asm/pgalloc.h>
18 #include <asm/tlbflush.h>
20 #include <asm/code-patching.h>
23 static int __patch_instruction(u32 *exec_addr, ppc_inst_t instr, u32 *patch_addr)
25 if (!ppc_inst_prefixed(instr)) {
26 u32 val = ppc_inst_val(instr);
28 __put_kernel_nofault(patch_addr, &val, u32, failed);
30 u64 val = ppc_inst_as_ulong(instr);
32 __put_kernel_nofault(patch_addr, &val, u64, failed);
35 asm ("dcbst 0, %0; sync; icbi 0,%1; sync; isync" :: "r" (patch_addr),
44 int raw_patch_instruction(u32 *addr, ppc_inst_t instr)
46 return __patch_instruction(addr, instr, addr);
49 #ifdef CONFIG_STRICT_KERNEL_RWX
51 struct patch_context {
53 struct vm_struct *area;
60 static DEFINE_PER_CPU(struct patch_context, cpu_patching_context);
62 static int map_patch_area(void *addr, unsigned long text_poke_addr);
63 static void unmap_patch_area(unsigned long addr);
65 static bool mm_patch_enabled(void)
67 return IS_ENABLED(CONFIG_SMP) && radix_enabled();
71 * The following applies for Radix MMU. Hash MMU has different requirements,
72 * and so is not supported.
74 * Changing mm requires context synchronising instructions on both sides of
75 * the context switch, as well as a hwsync between the last instruction for
76 * which the address of an associated storage access was translated using
77 * the current context.
79 * switch_mm_irqs_off() performs an isync after the context switch. It is
80 * the responsibility of the caller to perform the CSI and hwsync before
81 * starting/stopping the temp mm.
83 static struct mm_struct *start_using_temp_mm(struct mm_struct *temp_mm)
85 struct mm_struct *orig_mm = current->active_mm;
87 lockdep_assert_irqs_disabled();
88 switch_mm_irqs_off(orig_mm, temp_mm, current);
90 WARN_ON(!mm_is_thread_local(temp_mm));
92 suspend_breakpoints();
96 static void stop_using_temp_mm(struct mm_struct *temp_mm,
97 struct mm_struct *orig_mm)
99 lockdep_assert_irqs_disabled();
100 switch_mm_irqs_off(temp_mm, orig_mm, current);
101 restore_breakpoints();
104 static int text_area_cpu_up(unsigned int cpu)
106 struct vm_struct *area;
110 area = get_vm_area(PAGE_SIZE, VM_ALLOC);
112 WARN_ONCE(1, "Failed to create text area for cpu %d\n",
117 // Map/unmap the area to ensure all page tables are pre-allocated
118 addr = (unsigned long)area->addr;
119 err = map_patch_area(empty_zero_page, addr);
123 unmap_patch_area(addr);
125 this_cpu_write(cpu_patching_context.area, area);
126 this_cpu_write(cpu_patching_context.addr, addr);
127 this_cpu_write(cpu_patching_context.pte, virt_to_kpte(addr));
132 static int text_area_cpu_down(unsigned int cpu)
134 free_vm_area(this_cpu_read(cpu_patching_context.area));
135 this_cpu_write(cpu_patching_context.area, NULL);
136 this_cpu_write(cpu_patching_context.addr, 0);
137 this_cpu_write(cpu_patching_context.pte, NULL);
141 static void put_patching_mm(struct mm_struct *mm, unsigned long patching_addr)
143 struct mmu_gather tlb;
145 tlb_gather_mmu(&tlb, mm);
146 free_pgd_range(&tlb, patching_addr, patching_addr + PAGE_SIZE, 0, 0);
150 static int text_area_cpu_up_mm(unsigned int cpu)
152 struct mm_struct *mm;
162 * Choose a random page-aligned address from the interval
163 * [PAGE_SIZE .. DEFAULT_MAP_WINDOW - PAGE_SIZE].
164 * The lower address bound is PAGE_SIZE to avoid the zero-page.
166 addr = (1 + (get_random_long() % (DEFAULT_MAP_WINDOW / PAGE_SIZE - 2))) << PAGE_SHIFT;
169 * PTE allocation uses GFP_KERNEL which means we need to
170 * pre-allocate the PTE here because we cannot do the
171 * allocation during patching when IRQs are disabled.
173 * Using get_locked_pte() to avoid open coding, the lock
176 pte = get_locked_pte(mm, addr, &ptl);
179 pte_unmap_unlock(pte, ptl);
181 this_cpu_write(cpu_patching_context.mm, mm);
182 this_cpu_write(cpu_patching_context.addr, addr);
183 this_cpu_write(cpu_patching_context.pte, pte);
188 put_patching_mm(mm, addr);
193 static int text_area_cpu_down_mm(unsigned int cpu)
195 put_patching_mm(this_cpu_read(cpu_patching_context.mm),
196 this_cpu_read(cpu_patching_context.addr));
198 this_cpu_write(cpu_patching_context.mm, NULL);
199 this_cpu_write(cpu_patching_context.addr, 0);
200 this_cpu_write(cpu_patching_context.pte, NULL);
205 static __ro_after_init DEFINE_STATIC_KEY_FALSE(poking_init_done);
207 void __init poking_init(void)
211 if (mm_patch_enabled())
212 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
213 "powerpc/text_poke_mm:online",
215 text_area_cpu_down_mm);
217 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
218 "powerpc/text_poke:online",
222 /* cpuhp_setup_state returns >= 0 on success */
223 if (WARN_ON(ret < 0))
226 static_branch_enable(&poking_init_done);
229 static unsigned long get_patch_pfn(void *addr)
231 if (IS_ENABLED(CONFIG_MODULES) && is_vmalloc_or_module_addr(addr))
232 return vmalloc_to_pfn(addr);
234 return __pa_symbol(addr) >> PAGE_SHIFT;
238 * This can be called for kernel text or a module.
240 static int map_patch_area(void *addr, unsigned long text_poke_addr)
242 unsigned long pfn = get_patch_pfn(addr);
244 return map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL);
247 static void unmap_patch_area(unsigned long addr)
255 pgdp = pgd_offset_k(addr);
256 if (WARN_ON(pgd_none(*pgdp)))
259 p4dp = p4d_offset(pgdp, addr);
260 if (WARN_ON(p4d_none(*p4dp)))
263 pudp = pud_offset(p4dp, addr);
264 if (WARN_ON(pud_none(*pudp)))
267 pmdp = pmd_offset(pudp, addr);
268 if (WARN_ON(pmd_none(*pmdp)))
271 ptep = pte_offset_kernel(pmdp, addr);
272 if (WARN_ON(pte_none(*ptep)))
276 * In hash, pte_clear flushes the tlb, in radix, we have to
278 pte_clear(&init_mm, addr, ptep);
279 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
282 static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr)
286 unsigned long text_poke_addr;
288 unsigned long pfn = get_patch_pfn(addr);
289 struct mm_struct *patching_mm;
290 struct mm_struct *orig_mm;
292 patching_mm = __this_cpu_read(cpu_patching_context.mm);
293 pte = __this_cpu_read(cpu_patching_context.pte);
294 text_poke_addr = __this_cpu_read(cpu_patching_context.addr);
295 patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
297 __set_pte_at(patching_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
299 /* order PTE update before use, also serves as the hwsync */
300 asm volatile("ptesync": : :"memory");
302 /* order context switch after arbitrary prior code */
305 orig_mm = start_using_temp_mm(patching_mm);
307 err = __patch_instruction(addr, instr, patch_addr);
309 /* hwsync performed by __patch_instruction (sync) if successful */
313 /* context synchronisation performed by __patch_instruction (isync or exception) */
314 stop_using_temp_mm(patching_mm, orig_mm);
316 pte_clear(patching_mm, text_poke_addr, pte);
318 * ptesync to order PTE update before TLB invalidation done
319 * by radix__local_flush_tlb_page_psize (in _tlbiel_va)
321 local_flush_tlb_page_psize(patching_mm, text_poke_addr, mmu_virtual_psize);
326 static int __do_patch_instruction(u32 *addr, ppc_inst_t instr)
330 unsigned long text_poke_addr;
332 unsigned long pfn = get_patch_pfn(addr);
334 text_poke_addr = (unsigned long)__this_cpu_read(cpu_patching_context.addr) & PAGE_MASK;
335 patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
337 pte = __this_cpu_read(cpu_patching_context.pte);
338 __set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
339 /* See ptesync comment in radix__set_pte_at() */
341 asm volatile("ptesync": : :"memory");
343 err = __patch_instruction(addr, instr, patch_addr);
345 pte_clear(&init_mm, text_poke_addr, pte);
346 flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE);
351 static int do_patch_instruction(u32 *addr, ppc_inst_t instr)
357 * During early early boot patch_instruction is called
358 * when text_poke_area is not ready, but we still need
359 * to allow patching. We just do the plain old patching
361 if (!static_branch_likely(&poking_init_done))
362 return raw_patch_instruction(addr, instr);
364 local_irq_save(flags);
365 if (mm_patch_enabled())
366 err = __do_patch_instruction_mm(addr, instr);
368 err = __do_patch_instruction(addr, instr);
369 local_irq_restore(flags);
373 #else /* !CONFIG_STRICT_KERNEL_RWX */
375 static int do_patch_instruction(u32 *addr, ppc_inst_t instr)
377 return raw_patch_instruction(addr, instr);
380 #endif /* CONFIG_STRICT_KERNEL_RWX */
382 __ro_after_init DEFINE_STATIC_KEY_FALSE(init_mem_is_free);
384 int patch_instruction(u32 *addr, ppc_inst_t instr)
386 /* Make sure we aren't patching a freed init section */
387 if (static_branch_likely(&init_mem_is_free) && init_section_contains(addr, 4))
390 return do_patch_instruction(addr, instr);
392 NOKPROBE_SYMBOL(patch_instruction);
394 int patch_branch(u32 *addr, unsigned long target, int flags)
398 if (create_branch(&instr, addr, target, flags))
401 return patch_instruction(addr, instr);
405 * Helper to check if a given instruction is a conditional branch
406 * Derived from the conditional checks in analyse_instr()
408 bool is_conditional_branch(ppc_inst_t instr)
410 unsigned int opcode = ppc_inst_primary_opcode(instr);
412 if (opcode == 16) /* bc, bca, bcl, bcla */
415 switch ((ppc_inst_val(instr) >> 1) & 0x3ff) {
416 case 16: /* bclr, bclrl */
417 case 528: /* bcctr, bcctrl */
418 case 560: /* bctar, bctarl */
424 NOKPROBE_SYMBOL(is_conditional_branch);
426 int create_cond_branch(ppc_inst_t *instr, const u32 *addr,
427 unsigned long target, int flags)
432 if (! (flags & BRANCH_ABSOLUTE))
433 offset = offset - (unsigned long)addr;
435 /* Check we can represent the target in the instruction format */
436 if (!is_offset_in_cond_branch_range(offset))
439 /* Mask out the flags and target, so they don't step on each other. */
440 *instr = ppc_inst(0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC));
445 int instr_is_relative_branch(ppc_inst_t instr)
447 if (ppc_inst_val(instr) & BRANCH_ABSOLUTE)
450 return instr_is_branch_iform(instr) || instr_is_branch_bform(instr);
453 int instr_is_relative_link_branch(ppc_inst_t instr)
455 return instr_is_relative_branch(instr) && (ppc_inst_val(instr) & BRANCH_SET_LINK);
458 static unsigned long branch_iform_target(const u32 *instr)
462 imm = ppc_inst_val(ppc_inst_read(instr)) & 0x3FFFFFC;
464 /* If the top bit of the immediate value is set this is negative */
468 if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0)
469 imm += (unsigned long)instr;
471 return (unsigned long)imm;
474 static unsigned long branch_bform_target(const u32 *instr)
478 imm = ppc_inst_val(ppc_inst_read(instr)) & 0xFFFC;
480 /* If the top bit of the immediate value is set this is negative */
484 if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0)
485 imm += (unsigned long)instr;
487 return (unsigned long)imm;
490 unsigned long branch_target(const u32 *instr)
492 if (instr_is_branch_iform(ppc_inst_read(instr)))
493 return branch_iform_target(instr);
494 else if (instr_is_branch_bform(ppc_inst_read(instr)))
495 return branch_bform_target(instr);
500 int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src)
502 unsigned long target;
503 target = branch_target(src);
505 if (instr_is_branch_iform(ppc_inst_read(src)))
506 return create_branch(instr, dest, target,
507 ppc_inst_val(ppc_inst_read(src)));
508 else if (instr_is_branch_bform(ppc_inst_read(src)))
509 return create_cond_branch(instr, dest, target,
510 ppc_inst_val(ppc_inst_read(src)));