diff options
Diffstat (limited to 'arch/arm/kernel/entry-common.S')
-rw-r--r-- | arch/arm/kernel/entry-common.S | 78 |
1 files changed, 64 insertions, 14 deletions
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 7885722bdf4e..8bfa98757cd2 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -129,30 +129,58 @@ ENDPROC(ret_from_fork) | |||
129 | * clobber the ip register. This is OK because the ARM calling convention | 129 | * clobber the ip register. This is OK because the ARM calling convention |
130 | * allows it to be clobbered in subroutines and doesn't use it to hold | 130 | * allows it to be clobbered in subroutines and doesn't use it to hold |
131 | * parameters.) | 131 | * parameters.) |
132 | * | ||
133 | * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0" | ||
134 | * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see | ||
135 | * arch/arm/kernel/ftrace.c). | ||
132 | */ | 136 | */ |
137 | |||
138 | #ifndef CONFIG_OLD_MCOUNT | ||
139 | #if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4)) | ||
140 | #error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0. | ||
141 | #endif | ||
142 | #endif | ||
143 | |||
133 | #ifdef CONFIG_DYNAMIC_FTRACE | 144 | #ifdef CONFIG_DYNAMIC_FTRACE |
134 | ENTRY(mcount) | 145 | ENTRY(__gnu_mcount_nc) |
146 | mov ip, lr | ||
147 | ldmia sp!, {lr} | ||
148 | mov pc, ip | ||
149 | ENDPROC(__gnu_mcount_nc) | ||
150 | |||
151 | ENTRY(ftrace_caller) | ||
135 | stmdb sp!, {r0-r3, lr} | 152 | stmdb sp!, {r0-r3, lr} |
136 | mov r0, lr | 153 | mov r0, lr |
137 | sub r0, r0, #MCOUNT_INSN_SIZE | 154 | sub r0, r0, #MCOUNT_INSN_SIZE |
155 | ldr r1, [sp, #20] | ||
138 | 156 | ||
139 | .globl mcount_call | 157 | .global ftrace_call |
140 | mcount_call: | 158 | ftrace_call: |
141 | bl ftrace_stub | 159 | bl ftrace_stub |
142 | ldr lr, [fp, #-4] @ restore lr | 160 | ldmia sp!, {r0-r3, ip, lr} |
143 | ldmia sp!, {r0-r3, pc} | 161 | mov pc, ip |
162 | ENDPROC(ftrace_caller) | ||
144 | 163 | ||
145 | ENTRY(ftrace_caller) | 164 | #ifdef CONFIG_OLD_MCOUNT |
165 | ENTRY(mcount) | ||
166 | stmdb sp!, {lr} | ||
167 | ldr lr, [fp, #-4] | ||
168 | ldmia sp!, {pc} | ||
169 | ENDPROC(mcount) | ||
170 | |||
171 | ENTRY(ftrace_caller_old) | ||
146 | stmdb sp!, {r0-r3, lr} | 172 | stmdb sp!, {r0-r3, lr} |
147 | ldr r1, [fp, #-4] | 173 | ldr r1, [fp, #-4] |
148 | mov r0, lr | 174 | mov r0, lr |
149 | sub r0, r0, #MCOUNT_INSN_SIZE | 175 | sub r0, r0, #MCOUNT_INSN_SIZE |
150 | 176 | ||
151 | .globl ftrace_call | 177 | .globl ftrace_call_old |
152 | ftrace_call: | 178 | ftrace_call_old: |
153 | bl ftrace_stub | 179 | bl ftrace_stub |
154 | ldr lr, [fp, #-4] @ restore lr | 180 | ldr lr, [fp, #-4] @ restore lr |
155 | ldmia sp!, {r0-r3, pc} | 181 | ldmia sp!, {r0-r3, pc} |
182 | ENDPROC(ftrace_caller_old) | ||
183 | #endif | ||
156 | 184 | ||
157 | #else | 185 | #else |
158 | 186 | ||
@@ -160,7 +188,7 @@ ENTRY(__gnu_mcount_nc) | |||
160 | stmdb sp!, {r0-r3, lr} | 188 | stmdb sp!, {r0-r3, lr} |
161 | ldr r0, =ftrace_trace_function | 189 | ldr r0, =ftrace_trace_function |
162 | ldr r2, [r0] | 190 | ldr r2, [r0] |
163 | adr r0, ftrace_stub | 191 | adr r0, .Lftrace_stub |
164 | cmp r0, r2 | 192 | cmp r0, r2 |
165 | bne gnu_trace | 193 | bne gnu_trace |
166 | ldmia sp!, {r0-r3, ip, lr} | 194 | ldmia sp!, {r0-r3, ip, lr} |
@@ -170,11 +198,19 @@ gnu_trace: | |||
170 | ldr r1, [sp, #20] @ lr of instrumented routine | 198 | ldr r1, [sp, #20] @ lr of instrumented routine |
171 | mov r0, lr | 199 | mov r0, lr |
172 | sub r0, r0, #MCOUNT_INSN_SIZE | 200 | sub r0, r0, #MCOUNT_INSN_SIZE |
173 | mov lr, pc | 201 | adr lr, BSYM(1f) |
174 | mov pc, r2 | 202 | mov pc, r2 |
203 | 1: | ||
175 | ldmia sp!, {r0-r3, ip, lr} | 204 | ldmia sp!, {r0-r3, ip, lr} |
176 | mov pc, ip | 205 | mov pc, ip |
206 | ENDPROC(__gnu_mcount_nc) | ||
177 | 207 | ||
208 | #ifdef CONFIG_OLD_MCOUNT | ||
209 | /* | ||
210 | * This is under an ifdef in order to force link-time errors for people trying | ||
211 | * to build with !FRAME_POINTER with a GCC which doesn't use the new-style | ||
212 | * mcount. | ||
213 | */ | ||
178 | ENTRY(mcount) | 214 | ENTRY(mcount) |
179 | stmdb sp!, {r0-r3, lr} | 215 | stmdb sp!, {r0-r3, lr} |
180 | ldr r0, =ftrace_trace_function | 216 | ldr r0, =ftrace_trace_function |
@@ -193,12 +229,15 @@ trace: | |||
193 | mov pc, r2 | 229 | mov pc, r2 |
194 | ldr lr, [fp, #-4] @ restore lr | 230 | ldr lr, [fp, #-4] @ restore lr |
195 | ldmia sp!, {r0-r3, pc} | 231 | ldmia sp!, {r0-r3, pc} |
232 | ENDPROC(mcount) | ||
233 | #endif | ||
196 | 234 | ||
197 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 235 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
198 | 236 | ||
199 | .globl ftrace_stub | 237 | ENTRY(ftrace_stub) |
200 | ftrace_stub: | 238 | .Lftrace_stub: |
201 | mov pc, lr | 239 | mov pc, lr |
240 | ENDPROC(ftrace_stub) | ||
202 | 241 | ||
203 | #endif /* CONFIG_FUNCTION_TRACER */ | 242 | #endif /* CONFIG_FUNCTION_TRACER */ |
204 | 243 | ||
@@ -295,7 +334,6 @@ ENTRY(vector_swi) | |||
295 | 334 | ||
296 | get_thread_info tsk | 335 | get_thread_info tsk |
297 | adr tbl, sys_call_table @ load syscall table pointer | 336 | adr tbl, sys_call_table @ load syscall table pointer |
298 | ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing | ||
299 | 337 | ||
300 | #if defined(CONFIG_OABI_COMPAT) | 338 | #if defined(CONFIG_OABI_COMPAT) |
301 | /* | 339 | /* |
@@ -312,8 +350,20 @@ ENTRY(vector_swi) | |||
312 | eor scno, scno, #__NR_SYSCALL_BASE @ check OS number | 350 | eor scno, scno, #__NR_SYSCALL_BASE @ check OS number |
313 | #endif | 351 | #endif |
314 | 352 | ||
353 | ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing | ||
315 | stmdb sp!, {r4, r5} @ push fifth and sixth args | 354 | stmdb sp!, {r4, r5} @ push fifth and sixth args |
316 | tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? | 355 | |
356 | #ifdef CONFIG_SECCOMP | ||
357 | tst r10, #_TIF_SECCOMP | ||
358 | beq 1f | ||
359 | mov r0, scno | ||
360 | bl __secure_computing | ||
361 | add r0, sp, #S_R0 + S_OFF @ pointer to regs | ||
362 | ldmia r0, {r0 - r3} @ have to reload r0 - r3 | ||
363 | 1: | ||
364 | #endif | ||
365 | |||
366 | tst r10, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? | ||
317 | bne __sys_trace | 367 | bne __sys_trace |
318 | 368 | ||
319 | cmp scno, #NR_syscalls @ check upper syscall limit | 369 | cmp scno, #NR_syscalls @ check upper syscall limit |