1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Split from ftrace_64.S
6 #include <linux/export.h>
7 #include <linux/magic.h>
8 #include <asm/ppc_asm.h>
9 #include <asm/asm-offsets.h>
10 #include <asm/ftrace.h>
11 #include <asm/ppc-opcode.h>
12 #include <asm/thread_info.h>
14 #include <asm/ptrace.h>
18 * ftrace_caller()/ftrace_regs_caller() is the function that replaces _mcount()
19 * when ftrace is active.
21 * We arrive here after a function A calls function B, and we are the trace
22 * function for B. When we enter r1 points to A's stack frame, B has not yet
23 * had a chance to allocate one yet.
25 * Additionally r2 may point either to the TOC for A, or B, depending on
26 * whether B did a TOC setup sequence before calling us.
28 * On entry the LR points back to the _mcount() call site, and r0 holds the
29 * saved LR as it was on entry to B, ie. the original return address at the
32 * Our job is to save the register state into a struct pt_regs (on the stack)
33 * and then arrange for the ftrace function to be called.
35 .macro ftrace_regs_entry allregs
36 /* Create a minimal stack frame for representing B */
37 PPC_STLU r1, -STACK_FRAME_MIN_SIZE(r1)
39 /* Create our stack frame + pt_regs */
40 PPC_STLU r1,-SWITCH_FRAME_SIZE(r1)
42 /* Save all gprs to pt_regs */
47 /* Save the original return address in A's stack frame */
48 std r0, LRSAVE+SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE(r1)
50 lbz r3, PACA_FTRACE_ENABLED(r13)
59 #ifdef CONFIG_LIVEPATCH_64
64 /* Save previous stack pointer (r1) */
65 addi r8, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
69 /* Load special regs for save below */
75 /* Clear MSR to flag as ftrace_caller versus frace_regs_caller */
79 /* Get the _mcount() call site out of LR */
81 /* Save it as pt_regs->nip */
83 /* Also save it in B's stackframe header for proper unwind */
84 PPC_STL r7, LRSAVE+SWITCH_FRAME_SIZE(r1)
85 /* Save the read LR in pt_regs->link */
89 /* Save callee's TOC in the ABI compliant location */
91 LOAD_PACA_TOC() /* get kernel TOC in r2 */
92 LOAD_REG_ADDR(r3, function_trace_op)
95 lis r3,function_trace_op@ha
96 lwz r5,function_trace_op@l(r3)
99 #ifdef CONFIG_LIVEPATCH_64
100 mr r14, r7 /* remember old NIP */
103 /* Calculate ip from nip-4 into r3 for call below */
104 subi r3, r7, MCOUNT_INSN_SIZE
106 /* Put the original return address in r4 as parent_ip */
109 /* Save special regs */
113 PPC_STL r10, _XER(r1)
114 PPC_STL r11, _CCR(r1)
117 /* Load &pt_regs in r6 for call below */
118 addi r6, r1, STACK_INT_FRAME_REGS
121 .macro ftrace_regs_exit allregs
122 /* Load ctr with the possibly modified NIP */
126 #ifdef CONFIG_LIVEPATCH_64
127 cmpd r14, r3 /* has NIP been altered? */
135 #ifdef CONFIG_LIVEPATCH_64
140 /* Restore possibly modified LR */
145 /* Restore callee's TOC */
149 /* Pop our stack frame */
150 addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
152 #ifdef CONFIG_LIVEPATCH_64
153 /* Based on the cmpd above, if the NIP was altered handle livepatch */
154 bne- livepatch_handler
156 bctr /* jump after _mcount site */
159 _GLOBAL(ftrace_regs_caller)
161 /* ftrace_call(r3, r4, r5, r6) */
162 .globl ftrace_regs_call
167 _GLOBAL(ftrace_caller)
169 /* ftrace_call(r3, r4, r5, r6) */
183 addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
188 #ifdef CONFIG_LIVEPATCH_64
190 * This function runs in the mcount context, between two functions. As
191 * such it can only clobber registers which are volatile and used in
194 * We get here when a function A, calls another function B, but B has
195 * been live patched with a new function C.
198 * - we have no stack frame and can not allocate one
199 * - LR points back to the original caller (in A)
200 * - CTR holds the new NIP in C
201 * - r0, r11 & r12 are free
204 ld r12, PACA_THREAD_INFO(r13)
206 /* Allocate 3 x 8 bytes */
207 ld r11, TI_livepatch_sp(r12)
209 std r11, TI_livepatch_sp(r12)
211 /* Save toc & real LR on livepatch stack */
216 /* Store stack end marker */
217 lis r12, STACK_END_MAGIC@h
218 ori r12, r12, STACK_END_MAGIC@l
221 /* Put ctr in r12 for global entry and branch there */
226 * Now we are returning from the patched function to the original
227 * caller A. We are free to use r11, r12 and we can use r2 until we
231 ld r12, PACA_THREAD_INFO(r13)
233 ld r11, TI_livepatch_sp(r12)
235 /* Check stack marker hasn't been trashed */
236 lis r2, STACK_END_MAGIC@h
237 ori r2, r2, STACK_END_MAGIC@l
240 EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
242 /* Restore LR & toc from livepatch stack */
247 /* Pop livepatch stack frame */
248 ld r12, PACA_THREAD_INFO(r13)
250 std r11, TI_livepatch_sp(r12)
252 /* Return to original caller of live patched function */
254 #endif /* CONFIG_LIVEPATCH */
256 #ifndef CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY
259 EXPORT_SYMBOL(_mcount)
266 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
267 _GLOBAL(return_to_handler)
268 /* need to save return values */
279 * We might be called from a module.
280 * Switch to our TOC to run inside the core kernel.
289 bl ftrace_return_to_handler
292 /* return value has real return address */
307 /* Jump back to real return address */
309 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
311 .pushsection ".tramp.ftrace.text","aw",@progbits;
312 .globl ftrace_tramp_text
317 .pushsection ".tramp.ftrace.init","aw",@progbits;
318 .globl ftrace_tramp_init