diff options
Diffstat (limited to 'arch/arm/kernel/entry-common.S')
-rw-r--r-- | arch/arm/kernel/entry-common.S | 82 |
1 files changed, 68 insertions, 14 deletions
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index f05a35a59694..8bfa98757cd2 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -48,6 +48,8 @@ work_pending: | |||
48 | beq no_work_pending | 48 | beq no_work_pending |
49 | mov r0, sp @ 'regs' | 49 | mov r0, sp @ 'regs' |
50 | mov r2, why @ 'syscall' | 50 | mov r2, why @ 'syscall' |
51 | tst r1, #_TIF_SIGPENDING @ delivering a signal? | ||
52 | movne why, #0 @ prevent further restarts | ||
51 | bl do_notify_resume | 53 | bl do_notify_resume |
52 | b ret_slow_syscall @ Check work again | 54 | b ret_slow_syscall @ Check work again |
53 | 55 | ||
@@ -127,30 +129,58 @@ ENDPROC(ret_from_fork) | |||
127 | * clobber the ip register. This is OK because the ARM calling convention | 129 | * clobber the ip register. This is OK because the ARM calling convention |
128 | * allows it to be clobbered in subroutines and doesn't use it to hold | 130 | * allows it to be clobbered in subroutines and doesn't use it to hold |
129 | * parameters.) | 131 | * parameters.) |
132 | * | ||
133 | * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0" | ||
134 | * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see | ||
135 | * arch/arm/kernel/ftrace.c). | ||
130 | */ | 136 | */ |
137 | |||
138 | #ifndef CONFIG_OLD_MCOUNT | ||
139 | #if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4)) | ||
140 | #error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0. | ||
141 | #endif | ||
142 | #endif | ||
143 | |||
131 | #ifdef CONFIG_DYNAMIC_FTRACE | 144 | #ifdef CONFIG_DYNAMIC_FTRACE |
132 | ENTRY(mcount) | 145 | ENTRY(__gnu_mcount_nc) |
146 | mov ip, lr | ||
147 | ldmia sp!, {lr} | ||
148 | mov pc, ip | ||
149 | ENDPROC(__gnu_mcount_nc) | ||
150 | |||
151 | ENTRY(ftrace_caller) | ||
133 | stmdb sp!, {r0-r3, lr} | 152 | stmdb sp!, {r0-r3, lr} |
134 | mov r0, lr | 153 | mov r0, lr |
135 | sub r0, r0, #MCOUNT_INSN_SIZE | 154 | sub r0, r0, #MCOUNT_INSN_SIZE |
155 | ldr r1, [sp, #20] | ||
136 | 156 | ||
137 | .globl mcount_call | 157 | .global ftrace_call |
138 | mcount_call: | 158 | ftrace_call: |
139 | bl ftrace_stub | 159 | bl ftrace_stub |
140 | ldr lr, [fp, #-4] @ restore lr | 160 | ldmia sp!, {r0-r3, ip, lr} |
141 | ldmia sp!, {r0-r3, pc} | 161 | mov pc, ip |
162 | ENDPROC(ftrace_caller) | ||
142 | 163 | ||
143 | ENTRY(ftrace_caller) | 164 | #ifdef CONFIG_OLD_MCOUNT |
165 | ENTRY(mcount) | ||
166 | stmdb sp!, {lr} | ||
167 | ldr lr, [fp, #-4] | ||
168 | ldmia sp!, {pc} | ||
169 | ENDPROC(mcount) | ||
170 | |||
171 | ENTRY(ftrace_caller_old) | ||
144 | stmdb sp!, {r0-r3, lr} | 172 | stmdb sp!, {r0-r3, lr} |
145 | ldr r1, [fp, #-4] | 173 | ldr r1, [fp, #-4] |
146 | mov r0, lr | 174 | mov r0, lr |
147 | sub r0, r0, #MCOUNT_INSN_SIZE | 175 | sub r0, r0, #MCOUNT_INSN_SIZE |
148 | 176 | ||
149 | .globl ftrace_call | 177 | .globl ftrace_call_old |
150 | ftrace_call: | 178 | ftrace_call_old: |
151 | bl ftrace_stub | 179 | bl ftrace_stub |
152 | ldr lr, [fp, #-4] @ restore lr | 180 | ldr lr, [fp, #-4] @ restore lr |
153 | ldmia sp!, {r0-r3, pc} | 181 | ldmia sp!, {r0-r3, pc} |
182 | ENDPROC(ftrace_caller_old) | ||
183 | #endif | ||
154 | 184 | ||
155 | #else | 185 | #else |
156 | 186 | ||
@@ -158,7 +188,7 @@ ENTRY(__gnu_mcount_nc) | |||
158 | stmdb sp!, {r0-r3, lr} | 188 | stmdb sp!, {r0-r3, lr} |
159 | ldr r0, =ftrace_trace_function | 189 | ldr r0, =ftrace_trace_function |
160 | ldr r2, [r0] | 190 | ldr r2, [r0] |
161 | adr r0, ftrace_stub | 191 | adr r0, .Lftrace_stub |
162 | cmp r0, r2 | 192 | cmp r0, r2 |
163 | bne gnu_trace | 193 | bne gnu_trace |
164 | ldmia sp!, {r0-r3, ip, lr} | 194 | ldmia sp!, {r0-r3, ip, lr} |
@@ -168,11 +198,19 @@ gnu_trace: | |||
168 | ldr r1, [sp, #20] @ lr of instrumented routine | 198 | ldr r1, [sp, #20] @ lr of instrumented routine |
169 | mov r0, lr | 199 | mov r0, lr |
170 | sub r0, r0, #MCOUNT_INSN_SIZE | 200 | sub r0, r0, #MCOUNT_INSN_SIZE |
171 | mov lr, pc | 201 | adr lr, BSYM(1f) |
172 | mov pc, r2 | 202 | mov pc, r2 |
203 | 1: | ||
173 | ldmia sp!, {r0-r3, ip, lr} | 204 | ldmia sp!, {r0-r3, ip, lr} |
174 | mov pc, ip | 205 | mov pc, ip |
206 | ENDPROC(__gnu_mcount_nc) | ||
175 | 207 | ||
208 | #ifdef CONFIG_OLD_MCOUNT | ||
209 | /* | ||
210 | * This is under an ifdef in order to force link-time errors for people trying | ||
211 | * to build with !FRAME_POINTER with a GCC which doesn't use the new-style | ||
212 | * mcount. | ||
213 | */ | ||
176 | ENTRY(mcount) | 214 | ENTRY(mcount) |
177 | stmdb sp!, {r0-r3, lr} | 215 | stmdb sp!, {r0-r3, lr} |
178 | ldr r0, =ftrace_trace_function | 216 | ldr r0, =ftrace_trace_function |
@@ -191,12 +229,15 @@ trace: | |||
191 | mov pc, r2 | 229 | mov pc, r2 |
192 | ldr lr, [fp, #-4] @ restore lr | 230 | ldr lr, [fp, #-4] @ restore lr |
193 | ldmia sp!, {r0-r3, pc} | 231 | ldmia sp!, {r0-r3, pc} |
232 | ENDPROC(mcount) | ||
233 | #endif | ||
194 | 234 | ||
195 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 235 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
196 | 236 | ||
197 | .globl ftrace_stub | 237 | ENTRY(ftrace_stub) |
198 | ftrace_stub: | 238 | .Lftrace_stub: |
199 | mov pc, lr | 239 | mov pc, lr |
240 | ENDPROC(ftrace_stub) | ||
200 | 241 | ||
201 | #endif /* CONFIG_FUNCTION_TRACER */ | 242 | #endif /* CONFIG_FUNCTION_TRACER */ |
202 | 243 | ||
@@ -293,7 +334,6 @@ ENTRY(vector_swi) | |||
293 | 334 | ||
294 | get_thread_info tsk | 335 | get_thread_info tsk |
295 | adr tbl, sys_call_table @ load syscall table pointer | 336 | adr tbl, sys_call_table @ load syscall table pointer |
296 | ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing | ||
297 | 337 | ||
298 | #if defined(CONFIG_OABI_COMPAT) | 338 | #if defined(CONFIG_OABI_COMPAT) |
299 | /* | 339 | /* |
@@ -310,8 +350,20 @@ ENTRY(vector_swi) | |||
310 | eor scno, scno, #__NR_SYSCALL_BASE @ check OS number | 350 | eor scno, scno, #__NR_SYSCALL_BASE @ check OS number |
311 | #endif | 351 | #endif |
312 | 352 | ||
353 | ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing | ||
313 | stmdb sp!, {r4, r5} @ push fifth and sixth args | 354 | stmdb sp!, {r4, r5} @ push fifth and sixth args |
314 | tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? | 355 | |
356 | #ifdef CONFIG_SECCOMP | ||
357 | tst r10, #_TIF_SECCOMP | ||
358 | beq 1f | ||
359 | mov r0, scno | ||
360 | bl __secure_computing | ||
361 | add r0, sp, #S_R0 + S_OFF @ pointer to regs | ||
362 | ldmia r0, {r0 - r3} @ have to reload r0 - r3 | ||
363 | 1: | ||
364 | #endif | ||
365 | |||
366 | tst r10, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? | ||
315 | bne __sys_trace | 367 | bne __sys_trace |
316 | 368 | ||
317 | cmp scno, #NR_syscalls @ check upper syscall limit | 369 | cmp scno, #NR_syscalls @ check upper syscall limit |
@@ -418,11 +470,13 @@ ENDPROC(sys_clone_wrapper) | |||
418 | 470 | ||
419 | sys_sigreturn_wrapper: | 471 | sys_sigreturn_wrapper: |
420 | add r0, sp, #S_OFF | 472 | add r0, sp, #S_OFF |
473 | mov why, #0 @ prevent syscall restart handling | ||
421 | b sys_sigreturn | 474 | b sys_sigreturn |
422 | ENDPROC(sys_sigreturn_wrapper) | 475 | ENDPROC(sys_sigreturn_wrapper) |
423 | 476 | ||
424 | sys_rt_sigreturn_wrapper: | 477 | sys_rt_sigreturn_wrapper: |
425 | add r0, sp, #S_OFF | 478 | add r0, sp, #S_OFF |
479 | mov why, #0 @ prevent syscall restart handling | ||
426 | b sys_rt_sigreturn | 480 | b sys_rt_sigreturn |
427 | ENDPROC(sys_rt_sigreturn_wrapper) | 481 | ENDPROC(sys_rt_sigreturn_wrapper) |
428 | 482 | ||