aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2014-10-28 09:26:48 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2014-11-21 10:25:01 -0500
commit82112379b73c937576f40c99b4d93162343af6f9 (patch)
tree52efb9d4ec5adc902753c57a4314ecabd8ba5a49
parent719c9d1489bad6ff26fa1f6a7e3f760935663398 (diff)
ARM: move ftrace assembly code to separate file
The ftrace assembly code doesn't need to live in entry-common.S and be surrounded with #ifdef CONFIG_FUNCTION_TRACER. Instead, move it to its own file and conditionally assemble it. Tested-by: Felipe Balbi <balbi@ti.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/entry-common.S235
-rw-r--r--arch/arm/kernel/entry-ftrace.S243
3 files changed, 244 insertions, 235 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 38ddd9f83d0e..738ebe5a91e6 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -47,6 +47,7 @@ endif
47obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o 47obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
48obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o 48obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o
49obj-$(CONFIG_ARM_ARCH_TIMER) += arch_timer.o 49obj-$(CONFIG_ARM_ARCH_TIMER) += arch_timer.o
50obj-$(CONFIG_FUNCTION_TRACER) += entry-ftrace.o
50obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o 51obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o
51obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o 52obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o
52obj-$(CONFIG_JUMP_LABEL) += jump_label.o insn.o patch.o 53obj-$(CONFIG_JUMP_LABEL) += jump_label.o insn.o patch.o
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 6bb09d4abdea..f8ccc21fa032 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -109,241 +109,6 @@ ENDPROC(ret_from_fork)
109#undef CALL 109#undef CALL
110#define CALL(x) .long x 110#define CALL(x) .long x
111 111
112#ifdef CONFIG_FUNCTION_TRACER
113/*
114 * When compiling with -pg, gcc inserts a call to the mcount routine at the
115 * start of every function. In mcount, apart from the function's address (in
116 * lr), we need to get hold of the function's caller's address.
117 *
118 * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
119 *
120 * bl mcount
121 *
122 * These versions have the limitation that in order for the mcount routine to
123 * be able to determine the function's caller's address, an APCS-style frame
124 * pointer (which is set up with something like the code below) is required.
125 *
126 * mov ip, sp
127 * push {fp, ip, lr, pc}
128 * sub fp, ip, #4
129 *
130 * With EABI, these frame pointers are not available unless -mapcs-frame is
131 * specified, and if building as Thumb-2, not even then.
132 *
133 * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
134 * with call sites like:
135 *
136 * push {lr}
137 * bl __gnu_mcount_nc
138 *
139 * With these compilers, frame pointers are not necessary.
140 *
141 * mcount can be thought of as a function called in the middle of a subroutine
142 * call. As such, it needs to be transparent for both the caller and the
143 * callee: the original lr needs to be restored when leaving mcount, and no
144 * registers should be clobbered. (In the __gnu_mcount_nc implementation, we
145 * clobber the ip register. This is OK because the ARM calling convention
146 * allows it to be clobbered in subroutines and doesn't use it to hold
147 * parameters.)
148 *
149 * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
150 * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
151 * arch/arm/kernel/ftrace.c).
152 */
153
154#ifndef CONFIG_OLD_MCOUNT
155#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
156#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
157#endif
158#endif
159
160.macro mcount_adjust_addr rd, rn
161 bic \rd, \rn, #1 @ clear the Thumb bit if present
162 sub \rd, \rd, #MCOUNT_INSN_SIZE
163.endm
164
165.macro __mcount suffix
166 mcount_enter
167 ldr r0, =ftrace_trace_function
168 ldr r2, [r0]
169 adr r0, .Lftrace_stub
170 cmp r0, r2
171 bne 1f
172
173#ifdef CONFIG_FUNCTION_GRAPH_TRACER
174 ldr r1, =ftrace_graph_return
175 ldr r2, [r1]
176 cmp r0, r2
177 bne ftrace_graph_caller\suffix
178
179 ldr r1, =ftrace_graph_entry
180 ldr r2, [r1]
181 ldr r0, =ftrace_graph_entry_stub
182 cmp r0, r2
183 bne ftrace_graph_caller\suffix
184#endif
185
186 mcount_exit
187
1881: mcount_get_lr r1 @ lr of instrumented func
189 mcount_adjust_addr r0, lr @ instrumented function
190 adr lr, BSYM(2f)
191 mov pc, r2
1922: mcount_exit
193.endm
194
195.macro __ftrace_caller suffix
196 mcount_enter
197
198 mcount_get_lr r1 @ lr of instrumented func
199 mcount_adjust_addr r0, lr @ instrumented function
200
201 .globl ftrace_call\suffix
202ftrace_call\suffix:
203 bl ftrace_stub
204
205#ifdef CONFIG_FUNCTION_GRAPH_TRACER
206 .globl ftrace_graph_call\suffix
207ftrace_graph_call\suffix:
208 mov r0, r0
209#endif
210
211 mcount_exit
212.endm
213
214.macro __ftrace_graph_caller
215 sub r0, fp, #4 @ &lr of instrumented routine (&parent)
216#ifdef CONFIG_DYNAMIC_FTRACE
217 @ called from __ftrace_caller, saved in mcount_enter
218 ldr r1, [sp, #16] @ instrumented routine (func)
219 mcount_adjust_addr r1, r1
220#else
221 @ called from __mcount, untouched in lr
222 mcount_adjust_addr r1, lr @ instrumented routine (func)
223#endif
224 mov r2, fp @ frame pointer
225 bl prepare_ftrace_return
226 mcount_exit
227.endm
228
229#ifdef CONFIG_OLD_MCOUNT
230/*
231 * mcount
232 */
233
234.macro mcount_enter
235 stmdb sp!, {r0-r3, lr}
236.endm
237
238.macro mcount_get_lr reg
239 ldr \reg, [fp, #-4]
240.endm
241
242.macro mcount_exit
243 ldr lr, [fp, #-4]
244 ldmia sp!, {r0-r3, pc}
245.endm
246
247ENTRY(mcount)
248#ifdef CONFIG_DYNAMIC_FTRACE
249 stmdb sp!, {lr}
250 ldr lr, [fp, #-4]
251 ldmia sp!, {pc}
252#else
253 __mcount _old
254#endif
255ENDPROC(mcount)
256
257#ifdef CONFIG_DYNAMIC_FTRACE
258ENTRY(ftrace_caller_old)
259 __ftrace_caller _old
260ENDPROC(ftrace_caller_old)
261#endif
262
263#ifdef CONFIG_FUNCTION_GRAPH_TRACER
264ENTRY(ftrace_graph_caller_old)
265 __ftrace_graph_caller
266ENDPROC(ftrace_graph_caller_old)
267#endif
268
269.purgem mcount_enter
270.purgem mcount_get_lr
271.purgem mcount_exit
272#endif
273
274/*
275 * __gnu_mcount_nc
276 */
277
278.macro mcount_enter
279/*
280 * This pad compensates for the push {lr} at the call site. Note that we are
281 * unable to unwind through a function which does not otherwise save its lr.
282 */
283 UNWIND(.pad #4)
284 stmdb sp!, {r0-r3, lr}
285 UNWIND(.save {r0-r3, lr})
286.endm
287
288.macro mcount_get_lr reg
289 ldr \reg, [sp, #20]
290.endm
291
292.macro mcount_exit
293 ldmia sp!, {r0-r3, ip, lr}
294 ret ip
295.endm
296
297ENTRY(__gnu_mcount_nc)
298UNWIND(.fnstart)
299#ifdef CONFIG_DYNAMIC_FTRACE
300 mov ip, lr
301 ldmia sp!, {lr}
302 ret ip
303#else
304 __mcount
305#endif
306UNWIND(.fnend)
307ENDPROC(__gnu_mcount_nc)
308
309#ifdef CONFIG_DYNAMIC_FTRACE
310ENTRY(ftrace_caller)
311UNWIND(.fnstart)
312 __ftrace_caller
313UNWIND(.fnend)
314ENDPROC(ftrace_caller)
315#endif
316
317#ifdef CONFIG_FUNCTION_GRAPH_TRACER
318ENTRY(ftrace_graph_caller)
319UNWIND(.fnstart)
320 __ftrace_graph_caller
321UNWIND(.fnend)
322ENDPROC(ftrace_graph_caller)
323#endif
324
325.purgem mcount_enter
326.purgem mcount_get_lr
327.purgem mcount_exit
328
329#ifdef CONFIG_FUNCTION_GRAPH_TRACER
330 .globl return_to_handler
331return_to_handler:
332 stmdb sp!, {r0-r3}
333 mov r0, fp @ frame pointer
334 bl ftrace_return_to_handler
335 mov lr, r0 @ r0 has real ret addr
336 ldmia sp!, {r0-r3}
337 ret lr
338#endif
339
340ENTRY(ftrace_stub)
341.Lftrace_stub:
342 ret lr
343ENDPROC(ftrace_stub)
344
345#endif /* CONFIG_FUNCTION_TRACER */
346
347/*============================================================================= 112/*=============================================================================
348 * SWI handler 113 * SWI handler
349 *----------------------------------------------------------------------------- 114 *-----------------------------------------------------------------------------
diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S
new file mode 100644
index 000000000000..fe57c73e70a4
--- /dev/null
+++ b/arch/arm/kernel/entry-ftrace.S
@@ -0,0 +1,243 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 */
6
7#include <asm/assembler.h>
8#include <asm/ftrace.h>
9#include <asm/unwind.h>
10
11#include "entry-header.S"
12
13/*
14 * When compiling with -pg, gcc inserts a call to the mcount routine at the
15 * start of every function. In mcount, apart from the function's address (in
16 * lr), we need to get hold of the function's caller's address.
17 *
18 * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
19 *
20 * bl mcount
21 *
22 * These versions have the limitation that in order for the mcount routine to
23 * be able to determine the function's caller's address, an APCS-style frame
24 * pointer (which is set up with something like the code below) is required.
25 *
26 * mov ip, sp
27 * push {fp, ip, lr, pc}
28 * sub fp, ip, #4
29 *
30 * With EABI, these frame pointers are not available unless -mapcs-frame is
31 * specified, and if building as Thumb-2, not even then.
32 *
33 * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
34 * with call sites like:
35 *
36 * push {lr}
37 * bl __gnu_mcount_nc
38 *
39 * With these compilers, frame pointers are not necessary.
40 *
41 * mcount can be thought of as a function called in the middle of a subroutine
42 * call. As such, it needs to be transparent for both the caller and the
43 * callee: the original lr needs to be restored when leaving mcount, and no
44 * registers should be clobbered. (In the __gnu_mcount_nc implementation, we
45 * clobber the ip register. This is OK because the ARM calling convention
46 * allows it to be clobbered in subroutines and doesn't use it to hold
47 * parameters.)
48 *
49 * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
50 * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
51 * arch/arm/kernel/ftrace.c).
52 */
53
54#ifndef CONFIG_OLD_MCOUNT
55#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
56#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
57#endif
58#endif
59
60.macro mcount_adjust_addr rd, rn
61 bic \rd, \rn, #1 @ clear the Thumb bit if present
62 sub \rd, \rd, #MCOUNT_INSN_SIZE
63.endm
64
65.macro __mcount suffix
66 mcount_enter
67 ldr r0, =ftrace_trace_function
68 ldr r2, [r0]
69 adr r0, .Lftrace_stub
70 cmp r0, r2
71 bne 1f
72
73#ifdef CONFIG_FUNCTION_GRAPH_TRACER
74 ldr r1, =ftrace_graph_return
75 ldr r2, [r1]
76 cmp r0, r2
77 bne ftrace_graph_caller\suffix
78
79 ldr r1, =ftrace_graph_entry
80 ldr r2, [r1]
81 ldr r0, =ftrace_graph_entry_stub
82 cmp r0, r2
83 bne ftrace_graph_caller\suffix
84#endif
85
86 mcount_exit
87
881: mcount_get_lr r1 @ lr of instrumented func
89 mcount_adjust_addr r0, lr @ instrumented function
90 adr lr, BSYM(2f)
91 mov pc, r2
922: mcount_exit
93.endm
94
95.macro __ftrace_caller suffix
96 mcount_enter
97
98 mcount_get_lr r1 @ lr of instrumented func
99 mcount_adjust_addr r0, lr @ instrumented function
100
101 .globl ftrace_call\suffix
102ftrace_call\suffix:
103 bl ftrace_stub
104
105#ifdef CONFIG_FUNCTION_GRAPH_TRACER
106 .globl ftrace_graph_call\suffix
107ftrace_graph_call\suffix:
108 mov r0, r0
109#endif
110
111 mcount_exit
112.endm
113
114.macro __ftrace_graph_caller
115 sub r0, fp, #4 @ &lr of instrumented routine (&parent)
116#ifdef CONFIG_DYNAMIC_FTRACE
117 @ called from __ftrace_caller, saved in mcount_enter
118 ldr r1, [sp, #16] @ instrumented routine (func)
119 mcount_adjust_addr r1, r1
120#else
121 @ called from __mcount, untouched in lr
122 mcount_adjust_addr r1, lr @ instrumented routine (func)
123#endif
124 mov r2, fp @ frame pointer
125 bl prepare_ftrace_return
126 mcount_exit
127.endm
128
129#ifdef CONFIG_OLD_MCOUNT
130/*
131 * mcount
132 */
133
134.macro mcount_enter
135 stmdb sp!, {r0-r3, lr}
136.endm
137
138.macro mcount_get_lr reg
139 ldr \reg, [fp, #-4]
140.endm
141
142.macro mcount_exit
143 ldr lr, [fp, #-4]
144 ldmia sp!, {r0-r3, pc}
145.endm
146
147ENTRY(mcount)
148#ifdef CONFIG_DYNAMIC_FTRACE
149 stmdb sp!, {lr}
150 ldr lr, [fp, #-4]
151 ldmia sp!, {pc}
152#else
153 __mcount _old
154#endif
155ENDPROC(mcount)
156
157#ifdef CONFIG_DYNAMIC_FTRACE
158ENTRY(ftrace_caller_old)
159 __ftrace_caller _old
160ENDPROC(ftrace_caller_old)
161#endif
162
163#ifdef CONFIG_FUNCTION_GRAPH_TRACER
164ENTRY(ftrace_graph_caller_old)
165 __ftrace_graph_caller
166ENDPROC(ftrace_graph_caller_old)
167#endif
168
169.purgem mcount_enter
170.purgem mcount_get_lr
171.purgem mcount_exit
172#endif
173
174/*
175 * __gnu_mcount_nc
176 */
177
178.macro mcount_enter
179/*
180 * This pad compensates for the push {lr} at the call site. Note that we are
181 * unable to unwind through a function which does not otherwise save its lr.
182 */
183 UNWIND(.pad #4)
184 stmdb sp!, {r0-r3, lr}
185 UNWIND(.save {r0-r3, lr})
186.endm
187
188.macro mcount_get_lr reg
189 ldr \reg, [sp, #20]
190.endm
191
192.macro mcount_exit
193 ldmia sp!, {r0-r3, ip, lr}
194 ret ip
195.endm
196
197ENTRY(__gnu_mcount_nc)
198UNWIND(.fnstart)
199#ifdef CONFIG_DYNAMIC_FTRACE
200 mov ip, lr
201 ldmia sp!, {lr}
202 ret ip
203#else
204 __mcount
205#endif
206UNWIND(.fnend)
207ENDPROC(__gnu_mcount_nc)
208
209#ifdef CONFIG_DYNAMIC_FTRACE
210ENTRY(ftrace_caller)
211UNWIND(.fnstart)
212 __ftrace_caller
213UNWIND(.fnend)
214ENDPROC(ftrace_caller)
215#endif
216
217#ifdef CONFIG_FUNCTION_GRAPH_TRACER
218ENTRY(ftrace_graph_caller)
219UNWIND(.fnstart)
220 __ftrace_graph_caller
221UNWIND(.fnend)
222ENDPROC(ftrace_graph_caller)
223#endif
224
225.purgem mcount_enter
226.purgem mcount_get_lr
227.purgem mcount_exit
228
229#ifdef CONFIG_FUNCTION_GRAPH_TRACER
230 .globl return_to_handler
231return_to_handler:
232 stmdb sp!, {r0-r3}
233 mov r0, fp @ frame pointer
234 bl ftrace_return_to_handler
235 mov lr, r0 @ r0 has real ret addr
236 ldmia sp!, {r0-r3}
237 ret lr
238#endif
239
240ENTRY(ftrace_stub)
241.Lftrace_stub:
242 ret lr
243ENDPROC(ftrace_stub)