aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNaveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>2017-04-25 09:55:54 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2017-04-27 08:20:29 -0400
commit096ff2ddba83bf022d593a3096d683e57c4befb0 (patch)
treed1db8a2bbb1647fc6bf37a5aef235e9645e5dc97
parent7853f9c029ac9134df42ea9e0d6bc600180f268d (diff)
powerpc/ftrace/64: Split further based on -mprofile-kernel
Split ftrace_64.S further retaining the core ftrace 64-bit aspects in ftrace_64.S and moving ftrace_caller() and ftrace_graph_caller() into separate files based on -mprofile-kernel. The livepatch routines are all now contained within the mprofile file. Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/kernel/trace/Makefile5
-rw-r--r--arch/powerpc/kernel/trace/ftrace_64.S306
-rw-r--r--arch/powerpc/kernel/trace/ftrace_64_mprofile.S272
-rw-r--r--arch/powerpc/kernel/trace/ftrace_64_pg.S68
4 files changed, 346 insertions, 305 deletions
diff --git a/arch/powerpc/kernel/trace/Makefile b/arch/powerpc/kernel/trace/Makefile
index 5f5a35254a9b..729dffc5f7bc 100644
--- a/arch/powerpc/kernel/trace/Makefile
+++ b/arch/powerpc/kernel/trace/Makefile
@@ -11,6 +11,11 @@ endif
11 11
12obj32-$(CONFIG_FUNCTION_TRACER) += ftrace_32.o 12obj32-$(CONFIG_FUNCTION_TRACER) += ftrace_32.o
13obj64-$(CONFIG_FUNCTION_TRACER) += ftrace_64.o 13obj64-$(CONFIG_FUNCTION_TRACER) += ftrace_64.o
14ifdef CONFIG_MPROFILE_KERNEL
15obj64-$(CONFIG_FUNCTION_TRACER) += ftrace_64_mprofile.o
16else
17obj64-$(CONFIG_FUNCTION_TRACER) += ftrace_64_pg.o
18endif
14obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 19obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
15obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 20obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
16obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o 21obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
diff --git a/arch/powerpc/kernel/trace/ftrace_64.S b/arch/powerpc/kernel/trace/ftrace_64.S
index 587e7b5c0aff..e5ccea19821e 100644
--- a/arch/powerpc/kernel/trace/ftrace_64.S
+++ b/arch/powerpc/kernel/trace/ftrace_64.S
@@ -23,233 +23,7 @@ EXPORT_SYMBOL(_mcount)
23 mtlr r0 23 mtlr r0
24 bctr 24 bctr
25 25
26#ifndef CC_USING_MPROFILE_KERNEL 26#else /* CONFIG_DYNAMIC_FTRACE */
27_GLOBAL_TOC(ftrace_caller)
28 /* Taken from output of objdump from lib64/glibc */
29 mflr r3
30 ld r11, 0(r1)
31 stdu r1, -112(r1)
32 std r3, 128(r1)
33 ld r4, 16(r11)
34 subi r3, r3, MCOUNT_INSN_SIZE
35.globl ftrace_call
36ftrace_call:
37 bl ftrace_stub
38 nop
39#ifdef CONFIG_FUNCTION_GRAPH_TRACER
40.globl ftrace_graph_call
41ftrace_graph_call:
42 b ftrace_graph_stub
43_GLOBAL(ftrace_graph_stub)
44#endif
45 ld r0, 128(r1)
46 mtlr r0
47 addi r1, r1, 112
48
49#else /* CC_USING_MPROFILE_KERNEL */
50/*
51 *
52 * ftrace_caller() is the function that replaces _mcount() when ftrace is
53 * active.
54 *
55 * We arrive here after a function A calls function B, and we are the trace
56 * function for B. When we enter r1 points to A's stack frame, B has not yet
57 * had a chance to allocate one yet.
58 *
59 * Additionally r2 may point either to the TOC for A, or B, depending on
60 * whether B did a TOC setup sequence before calling us.
61 *
62 * On entry the LR points back to the _mcount() call site, and r0 holds the
63 * saved LR as it was on entry to B, ie. the original return address at the
64 * call site in A.
65 *
66 * Our job is to save the register state into a struct pt_regs (on the stack)
67 * and then arrange for the ftrace function to be called.
68 */
69_GLOBAL(ftrace_caller)
70 /* Save the original return address in A's stack frame */
71 std r0,LRSAVE(r1)
72
73 /* Create our stack frame + pt_regs */
74 stdu r1,-SWITCH_FRAME_SIZE(r1)
75
76 /* Save all gprs to pt_regs */
77 SAVE_8GPRS(0,r1)
78 SAVE_8GPRS(8,r1)
79 SAVE_8GPRS(16,r1)
80 SAVE_8GPRS(24,r1)
81
82 /* Load special regs for save below */
83 mfmsr r8
84 mfctr r9
85 mfxer r10
86 mfcr r11
87
88 /* Get the _mcount() call site out of LR */
89 mflr r7
90 /* Save it as pt_regs->nip */
91 std r7, _NIP(r1)
92 /* Save the read LR in pt_regs->link */
93 std r0, _LINK(r1)
94
95 /* Save callee's TOC in the ABI compliant location */
96 std r2, 24(r1)
97 ld r2,PACATOC(r13) /* get kernel TOC in r2 */
98
99 addis r3,r2,function_trace_op@toc@ha
100 addi r3,r3,function_trace_op@toc@l
101 ld r5,0(r3)
102
103#ifdef CONFIG_LIVEPATCH
104 mr r14,r7 /* remember old NIP */
105#endif
106 /* Calculate ip from nip-4 into r3 for call below */
107 subi r3, r7, MCOUNT_INSN_SIZE
108
109 /* Put the original return address in r4 as parent_ip */
110 mr r4, r0
111
112 /* Save special regs */
113 std r8, _MSR(r1)
114 std r9, _CTR(r1)
115 std r10, _XER(r1)
116 std r11, _CCR(r1)
117
118 /* Load &pt_regs in r6 for call below */
119 addi r6, r1 ,STACK_FRAME_OVERHEAD
120
121 /* ftrace_call(r3, r4, r5, r6) */
122.globl ftrace_call
123ftrace_call:
124 bl ftrace_stub
125 nop
126
127 /* Load ctr with the possibly modified NIP */
128 ld r3, _NIP(r1)
129 mtctr r3
130#ifdef CONFIG_LIVEPATCH
131 cmpd r14,r3 /* has NIP been altered? */
132#endif
133
134 /* Restore gprs */
135 REST_8GPRS(0,r1)
136 REST_8GPRS(8,r1)
137 REST_8GPRS(16,r1)
138 REST_8GPRS(24,r1)
139
140 /* Restore possibly modified LR */
141 ld r0, _LINK(r1)
142 mtlr r0
143
144 /* Restore callee's TOC */
145 ld r2, 24(r1)
146
147 /* Pop our stack frame */
148 addi r1, r1, SWITCH_FRAME_SIZE
149
150#ifdef CONFIG_LIVEPATCH
151 /* Based on the cmpd above, if the NIP was altered handle livepatch */
152 bne- livepatch_handler
153#endif
154
155#ifdef CONFIG_FUNCTION_GRAPH_TRACER
156.globl ftrace_graph_call
157ftrace_graph_call:
158 b ftrace_graph_stub
159_GLOBAL(ftrace_graph_stub)
160#endif
161
162 bctr /* jump after _mcount site */
163#endif /* CC_USING_MPROFILE_KERNEL */
164
165_GLOBAL(ftrace_stub)
166 blr
167
168#ifdef CONFIG_LIVEPATCH
169 /*
170 * This function runs in the mcount context, between two functions. As
171 * such it can only clobber registers which are volatile and used in
172 * function linkage.
173 *
174 * We get here when a function A, calls another function B, but B has
175 * been live patched with a new function C.
176 *
177 * On entry:
178 * - we have no stack frame and can not allocate one
179 * - LR points back to the original caller (in A)
180 * - CTR holds the new NIP in C
181 * - r0 & r12 are free
182 *
183 * r0 can't be used as the base register for a DS-form load or store, so
184 * we temporarily shuffle r1 (stack pointer) into r0 and then put it back.
185 */
186livepatch_handler:
187 CURRENT_THREAD_INFO(r12, r1)
188
189 /* Save stack pointer into r0 */
190 mr r0, r1
191
192 /* Allocate 3 x 8 bytes */
193 ld r1, TI_livepatch_sp(r12)
194 addi r1, r1, 24
195 std r1, TI_livepatch_sp(r12)
196
197 /* Save toc & real LR on livepatch stack */
198 std r2, -24(r1)
199 mflr r12
200 std r12, -16(r1)
201
202 /* Store stack end marker */
203 lis r12, STACK_END_MAGIC@h
204 ori r12, r12, STACK_END_MAGIC@l
205 std r12, -8(r1)
206
207 /* Restore real stack pointer */
208 mr r1, r0
209
210 /* Put ctr in r12 for global entry and branch there */
211 mfctr r12
212 bctrl
213
214 /*
215 * Now we are returning from the patched function to the original
216 * caller A. We are free to use r0 and r12, and we can use r2 until we
217 * restore it.
218 */
219
220 CURRENT_THREAD_INFO(r12, r1)
221
222 /* Save stack pointer into r0 */
223 mr r0, r1
224
225 ld r1, TI_livepatch_sp(r12)
226
227 /* Check stack marker hasn't been trashed */
228 lis r2, STACK_END_MAGIC@h
229 ori r2, r2, STACK_END_MAGIC@l
230 ld r12, -8(r1)
2311: tdne r12, r2
232 EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
233
234 /* Restore LR & toc from livepatch stack */
235 ld r12, -16(r1)
236 mtlr r12
237 ld r2, -24(r1)
238
239 /* Pop livepatch stack frame */
240 CURRENT_THREAD_INFO(r12, r0)
241 subi r1, r1, 24
242 std r1, TI_livepatch_sp(r12)
243
244 /* Restore real stack pointer */
245 mr r1, r0
246
247 /* Return to original caller of live patched function */
248 blr
249#endif
250
251
252#else
253_GLOBAL_TOC(_mcount) 27_GLOBAL_TOC(_mcount)
254EXPORT_SYMBOL(_mcount) 28EXPORT_SYMBOL(_mcount)
255 /* Taken from output of objdump from lib64/glibc */ 29 /* Taken from output of objdump from lib64/glibc */
@@ -267,7 +41,6 @@ EXPORT_SYMBOL(_mcount)
267 bctrl 41 bctrl
268 nop 42 nop
269 43
270
271#ifdef CONFIG_FUNCTION_GRAPH_TRACER 44#ifdef CONFIG_FUNCTION_GRAPH_TRACER
272 b ftrace_graph_caller 45 b ftrace_graph_caller
273#endif 46#endif
@@ -276,86 +49,9 @@ EXPORT_SYMBOL(_mcount)
276 addi r1, r1, 112 49 addi r1, r1, 112
277_GLOBAL(ftrace_stub) 50_GLOBAL(ftrace_stub)
278 blr 51 blr
279
280#endif /* CONFIG_DYNAMIC_FTRACE */ 52#endif /* CONFIG_DYNAMIC_FTRACE */
281 53
282#ifdef CONFIG_FUNCTION_GRAPH_TRACER 54#ifdef CONFIG_FUNCTION_GRAPH_TRACER
283#ifndef CC_USING_MPROFILE_KERNEL
284_GLOBAL(ftrace_graph_caller)
285 /* load r4 with local address */
286 ld r4, 128(r1)
287 subi r4, r4, MCOUNT_INSN_SIZE
288
289 /* Grab the LR out of the caller stack frame */
290 ld r11, 112(r1)
291 ld r3, 16(r11)
292
293 bl prepare_ftrace_return
294 nop
295
296 /*
297 * prepare_ftrace_return gives us the address we divert to.
298 * Change the LR in the callers stack frame to this.
299 */
300 ld r11, 112(r1)
301 std r3, 16(r11)
302
303 ld r0, 128(r1)
304 mtlr r0
305 addi r1, r1, 112
306 blr
307
308#else /* CC_USING_MPROFILE_KERNEL */
309_GLOBAL(ftrace_graph_caller)
310 stdu r1, -112(r1)
311 /* with -mprofile-kernel, parameter regs are still alive at _mcount */
312 std r10, 104(r1)
313 std r9, 96(r1)
314 std r8, 88(r1)
315 std r7, 80(r1)
316 std r6, 72(r1)
317 std r5, 64(r1)
318 std r4, 56(r1)
319 std r3, 48(r1)
320
321 /* Save callee's TOC in the ABI compliant location */
322 std r2, 24(r1)
323 ld r2, PACATOC(r13) /* get kernel TOC in r2 */
324
325 mfctr r4 /* ftrace_caller has moved local addr here */
326 std r4, 40(r1)
327 mflr r3 /* ftrace_caller has restored LR from stack */
328 subi r4, r4, MCOUNT_INSN_SIZE
329
330 bl prepare_ftrace_return
331 nop
332
333 /*
334 * prepare_ftrace_return gives us the address we divert to.
335 * Change the LR to this.
336 */
337 mtlr r3
338
339 ld r0, 40(r1)
340 mtctr r0
341 ld r10, 104(r1)
342 ld r9, 96(r1)
343 ld r8, 88(r1)
344 ld r7, 80(r1)
345 ld r6, 72(r1)
346 ld r5, 64(r1)
347 ld r4, 56(r1)
348 ld r3, 48(r1)
349
350 /* Restore callee's TOC */
351 ld r2, 24(r1)
352
353 addi r1, r1, 112
354 mflr r0
355 std r0, LRSAVE(r1)
356 bctr
357#endif /* CC_USING_MPROFILE_KERNEL */
358
359_GLOBAL(return_to_handler) 55_GLOBAL(return_to_handler)
360 /* need to save return values */ 56 /* need to save return values */
361 std r4, -32(r1) 57 std r4, -32(r1)
diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
new file mode 100644
index 000000000000..7c933a99f5d5
--- /dev/null
+++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
@@ -0,0 +1,272 @@
1/*
2 * Split from ftrace_64.S
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/magic.h>
11#include <asm/ppc_asm.h>
12#include <asm/asm-offsets.h>
13#include <asm/ftrace.h>
14#include <asm/ppc-opcode.h>
15#include <asm/export.h>
16#include <asm/thread_info.h>
17#include <asm/bug.h>
18#include <asm/ptrace.h>
19
20#ifdef CONFIG_DYNAMIC_FTRACE
21/*
22 *
23 * ftrace_caller() is the function that replaces _mcount() when ftrace is
24 * active.
25 *
26 * We arrive here after a function A calls function B, and we are the trace
27 * function for B. When we enter r1 points to A's stack frame, B has not yet
28 * had a chance to allocate one yet.
29 *
30 * Additionally r2 may point either to the TOC for A, or B, depending on
31 * whether B did a TOC setup sequence before calling us.
32 *
33 * On entry the LR points back to the _mcount() call site, and r0 holds the
34 * saved LR as it was on entry to B, ie. the original return address at the
35 * call site in A.
36 *
37 * Our job is to save the register state into a struct pt_regs (on the stack)
38 * and then arrange for the ftrace function to be called.
39 */
40_GLOBAL(ftrace_caller)
41 /* Save the original return address in A's stack frame */
42 std r0,LRSAVE(r1)
43
44 /* Create our stack frame + pt_regs */
45 stdu r1,-SWITCH_FRAME_SIZE(r1)
46
47 /* Save all gprs to pt_regs */
48 SAVE_8GPRS(0,r1)
49 SAVE_8GPRS(8,r1)
50 SAVE_8GPRS(16,r1)
51 SAVE_8GPRS(24,r1)
52
53 /* Load special regs for save below */
54 mfmsr r8
55 mfctr r9
56 mfxer r10
57 mfcr r11
58
59 /* Get the _mcount() call site out of LR */
60 mflr r7
61 /* Save it as pt_regs->nip */
62 std r7, _NIP(r1)
63 /* Save the read LR in pt_regs->link */
64 std r0, _LINK(r1)
65
66 /* Save callee's TOC in the ABI compliant location */
67 std r2, 24(r1)
68 ld r2,PACATOC(r13) /* get kernel TOC in r2 */
69
70 addis r3,r2,function_trace_op@toc@ha
71 addi r3,r3,function_trace_op@toc@l
72 ld r5,0(r3)
73
74#ifdef CONFIG_LIVEPATCH
75 mr r14,r7 /* remember old NIP */
76#endif
77 /* Calculate ip from nip-4 into r3 for call below */
78 subi r3, r7, MCOUNT_INSN_SIZE
79
80 /* Put the original return address in r4 as parent_ip */
81 mr r4, r0
82
83 /* Save special regs */
84 std r8, _MSR(r1)
85 std r9, _CTR(r1)
86 std r10, _XER(r1)
87 std r11, _CCR(r1)
88
89 /* Load &pt_regs in r6 for call below */
90 addi r6, r1 ,STACK_FRAME_OVERHEAD
91
92 /* ftrace_call(r3, r4, r5, r6) */
93.globl ftrace_call
94ftrace_call:
95 bl ftrace_stub
96 nop
97
98 /* Load ctr with the possibly modified NIP */
99 ld r3, _NIP(r1)
100 mtctr r3
101#ifdef CONFIG_LIVEPATCH
102 cmpd r14,r3 /* has NIP been altered? */
103#endif
104
105 /* Restore gprs */
106 REST_8GPRS(0,r1)
107 REST_8GPRS(8,r1)
108 REST_8GPRS(16,r1)
109 REST_8GPRS(24,r1)
110
111 /* Restore possibly modified LR */
112 ld r0, _LINK(r1)
113 mtlr r0
114
115 /* Restore callee's TOC */
116 ld r2, 24(r1)
117
118 /* Pop our stack frame */
119 addi r1, r1, SWITCH_FRAME_SIZE
120
121#ifdef CONFIG_LIVEPATCH
122 /* Based on the cmpd above, if the NIP was altered handle livepatch */
123 bne- livepatch_handler
124#endif
125
126#ifdef CONFIG_FUNCTION_GRAPH_TRACER
127.globl ftrace_graph_call
128ftrace_graph_call:
129 b ftrace_graph_stub
130_GLOBAL(ftrace_graph_stub)
131#endif
132
133 bctr /* jump after _mcount site */
134
135_GLOBAL(ftrace_stub)
136 blr
137
138#ifdef CONFIG_LIVEPATCH
139 /*
140 * This function runs in the mcount context, between two functions. As
141 * such it can only clobber registers which are volatile and used in
142 * function linkage.
143 *
144 * We get here when a function A, calls another function B, but B has
145 * been live patched with a new function C.
146 *
147 * On entry:
148 * - we have no stack frame and can not allocate one
149 * - LR points back to the original caller (in A)
150 * - CTR holds the new NIP in C
151 * - r0 & r12 are free
152 *
153 * r0 can't be used as the base register for a DS-form load or store, so
154 * we temporarily shuffle r1 (stack pointer) into r0 and then put it back.
155 */
156livepatch_handler:
157 CURRENT_THREAD_INFO(r12, r1)
158
159 /* Save stack pointer into r0 */
160 mr r0, r1
161
162 /* Allocate 3 x 8 bytes */
163 ld r1, TI_livepatch_sp(r12)
164 addi r1, r1, 24
165 std r1, TI_livepatch_sp(r12)
166
167 /* Save toc & real LR on livepatch stack */
168 std r2, -24(r1)
169 mflr r12
170 std r12, -16(r1)
171
172 /* Store stack end marker */
173 lis r12, STACK_END_MAGIC@h
174 ori r12, r12, STACK_END_MAGIC@l
175 std r12, -8(r1)
176
177 /* Restore real stack pointer */
178 mr r1, r0
179
180 /* Put ctr in r12 for global entry and branch there */
181 mfctr r12
182 bctrl
183
184 /*
185 * Now we are returning from the patched function to the original
186 * caller A. We are free to use r0 and r12, and we can use r2 until we
187 * restore it.
188 */
189
190 CURRENT_THREAD_INFO(r12, r1)
191
192 /* Save stack pointer into r0 */
193 mr r0, r1
194
195 ld r1, TI_livepatch_sp(r12)
196
197 /* Check stack marker hasn't been trashed */
198 lis r2, STACK_END_MAGIC@h
199 ori r2, r2, STACK_END_MAGIC@l
200 ld r12, -8(r1)
2011: tdne r12, r2
202 EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
203
204 /* Restore LR & toc from livepatch stack */
205 ld r12, -16(r1)
206 mtlr r12
207 ld r2, -24(r1)
208
209 /* Pop livepatch stack frame */
210 CURRENT_THREAD_INFO(r12, r0)
211 subi r1, r1, 24
212 std r1, TI_livepatch_sp(r12)
213
214 /* Restore real stack pointer */
215 mr r1, r0
216
217 /* Return to original caller of live patched function */
218 blr
219#endif /* CONFIG_LIVEPATCH */
220
221#endif /* CONFIG_DYNAMIC_FTRACE */
222
223#ifdef CONFIG_FUNCTION_GRAPH_TRACER
224_GLOBAL(ftrace_graph_caller)
225 stdu r1, -112(r1)
226 /* with -mprofile-kernel, parameter regs are still alive at _mcount */
227 std r10, 104(r1)
228 std r9, 96(r1)
229 std r8, 88(r1)
230 std r7, 80(r1)
231 std r6, 72(r1)
232 std r5, 64(r1)
233 std r4, 56(r1)
234 std r3, 48(r1)
235
236 /* Save callee's TOC in the ABI compliant location */
237 std r2, 24(r1)
238 ld r2, PACATOC(r13) /* get kernel TOC in r2 */
239
240 mfctr r4 /* ftrace_caller has moved local addr here */
241 std r4, 40(r1)
242 mflr r3 /* ftrace_caller has restored LR from stack */
243 subi r4, r4, MCOUNT_INSN_SIZE
244
245 bl prepare_ftrace_return
246 nop
247
248 /*
249 * prepare_ftrace_return gives us the address we divert to.
250 * Change the LR to this.
251 */
252 mtlr r3
253
254 ld r0, 40(r1)
255 mtctr r0
256 ld r10, 104(r1)
257 ld r9, 96(r1)
258 ld r8, 88(r1)
259 ld r7, 80(r1)
260 ld r6, 72(r1)
261 ld r5, 64(r1)
262 ld r4, 56(r1)
263 ld r3, 48(r1)
264
265 /* Restore callee's TOC */
266 ld r2, 24(r1)
267
268 addi r1, r1, 112
269 mflr r0
270 std r0, LRSAVE(r1)
271 bctr
272#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/powerpc/kernel/trace/ftrace_64_pg.S b/arch/powerpc/kernel/trace/ftrace_64_pg.S
new file mode 100644
index 000000000000..f095358da96e
--- /dev/null
+++ b/arch/powerpc/kernel/trace/ftrace_64_pg.S
@@ -0,0 +1,68 @@
1/*
2 * Split from ftrace_64.S
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/magic.h>
11#include <asm/ppc_asm.h>
12#include <asm/asm-offsets.h>
13#include <asm/ftrace.h>
14#include <asm/ppc-opcode.h>
15#include <asm/export.h>
16
17#ifdef CONFIG_DYNAMIC_FTRACE
18_GLOBAL_TOC(ftrace_caller)
19 /* Taken from output of objdump from lib64/glibc */
20 mflr r3
21 ld r11, 0(r1)
22 stdu r1, -112(r1)
23 std r3, 128(r1)
24 ld r4, 16(r11)
25 subi r3, r3, MCOUNT_INSN_SIZE
26.globl ftrace_call
27ftrace_call:
28 bl ftrace_stub
29 nop
30#ifdef CONFIG_FUNCTION_GRAPH_TRACER
31.globl ftrace_graph_call
32ftrace_graph_call:
33 b ftrace_graph_stub
34_GLOBAL(ftrace_graph_stub)
35#endif
36 ld r0, 128(r1)
37 mtlr r0
38 addi r1, r1, 112
39
40_GLOBAL(ftrace_stub)
41 blr
42#endif /* CONFIG_DYNAMIC_FTRACE */
43
44#ifdef CONFIG_FUNCTION_GRAPH_TRACER
45_GLOBAL(ftrace_graph_caller)
46 /* load r4 with local address */
47 ld r4, 128(r1)
48 subi r4, r4, MCOUNT_INSN_SIZE
49
50 /* Grab the LR out of the caller stack frame */
51 ld r11, 112(r1)
52 ld r3, 16(r11)
53
54 bl prepare_ftrace_return
55 nop
56
57 /*
58 * prepare_ftrace_return gives us the address we divert to.
59 * Change the LR in the callers stack frame to this.
60 */
61 ld r11, 112(r1)
62 std r3, 16(r11)
63
64 ld r0, 128(r1)
65 mtlr r0
66 addi r1, r1, 112
67 blr
68#endif /* CONFIG_FUNCTION_GRAPH_TRACER */