diff options
author | Roland McGrath <roland@redhat.com> | 2008-07-09 05:38:07 -0400 |
---|---|---|
committer | Roland McGrath <roland@redhat.com> | 2008-07-16 15:15:17 -0400 |
commit | d4d67150165df8bf1cc05e532f6efca96f907cab (patch) | |
tree | 390d5951231c2a1d97d6453d70c42da7af49eeae /arch/x86/kernel/entry_32.S | |
parent | 64f097331928b01d704047c1dbc738bb6d2a9bf9 (diff) |
x86 ptrace: unify syscall tracing
This unifies and cleans up the syscall tracing code on i386 and x86_64.
Using a single function for entry and exit tracing on 32-bit made the
do_syscall_trace() into some terrible spaghetti. The logic is clear and
simple using separate syscall_trace_enter() and syscall_trace_leave()
functions as on 64-bit.
The unification adds PTRACE_SYSEMU and PTRACE_SYSEMU_SINGLESTEP support
on x86_64, for 32-bit ptrace() callers and for 64-bit ptrace() callers
tracing either 32-bit or 64-bit tasks. It behaves just like 32-bit.
Changing syscall_trace_enter() to return the syscall number shortens
all the assembly paths, while adding the SYSEMU feature in a simple way.
Signed-off-by: Roland McGrath <roland@redhat.com>
Diffstat (limited to 'arch/x86/kernel/entry_32.S')
-rw-r--r-- | arch/x86/kernel/entry_32.S | 19 |
1 files changed, 7 insertions, 12 deletions
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 0ad987d02b72..cadf73f70d33 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -332,7 +332,7 @@ sysenter_past_esp: | |||
332 | GET_THREAD_INFO(%ebp) | 332 | GET_THREAD_INFO(%ebp) |
333 | 333 | ||
334 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ | 334 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ |
335 | testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) | 335 | testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) |
336 | jnz syscall_trace_entry | 336 | jnz syscall_trace_entry |
337 | cmpl $(nr_syscalls), %eax | 337 | cmpl $(nr_syscalls), %eax |
338 | jae syscall_badsys | 338 | jae syscall_badsys |
@@ -370,7 +370,7 @@ ENTRY(system_call) | |||
370 | GET_THREAD_INFO(%ebp) | 370 | GET_THREAD_INFO(%ebp) |
371 | # system call tracing in operation / emulation | 371 | # system call tracing in operation / emulation |
372 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ | 372 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ |
373 | testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) | 373 | testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) |
374 | jnz syscall_trace_entry | 374 | jnz syscall_trace_entry |
375 | cmpl $(nr_syscalls), %eax | 375 | cmpl $(nr_syscalls), %eax |
376 | jae syscall_badsys | 376 | jae syscall_badsys |
@@ -510,12 +510,8 @@ END(work_pending) | |||
510 | syscall_trace_entry: | 510 | syscall_trace_entry: |
511 | movl $-ENOSYS,PT_EAX(%esp) | 511 | movl $-ENOSYS,PT_EAX(%esp) |
512 | movl %esp, %eax | 512 | movl %esp, %eax |
513 | xorl %edx,%edx | 513 | call syscall_trace_enter |
514 | call do_syscall_trace | 514 | /* What it returned is what we'll actually use. */ |
515 | cmpl $0, %eax | ||
516 | jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU, | ||
517 | # so must skip actual syscall | ||
518 | movl PT_ORIG_EAX(%esp), %eax | ||
519 | cmpl $(nr_syscalls), %eax | 515 | cmpl $(nr_syscalls), %eax |
520 | jnae syscall_call | 516 | jnae syscall_call |
521 | jmp syscall_exit | 517 | jmp syscall_exit |
@@ -524,14 +520,13 @@ END(syscall_trace_entry) | |||
524 | # perform syscall exit tracing | 520 | # perform syscall exit tracing |
525 | ALIGN | 521 | ALIGN |
526 | syscall_exit_work: | 522 | syscall_exit_work: |
527 | testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl | 523 | testb $_TIF_WORK_SYSCALL_EXIT, %cl |
528 | jz work_pending | 524 | jz work_pending |
529 | TRACE_IRQS_ON | 525 | TRACE_IRQS_ON |
530 | ENABLE_INTERRUPTS(CLBR_ANY) # could let do_syscall_trace() call | 526 | ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call |
531 | # schedule() instead | 527 | # schedule() instead |
532 | movl %esp, %eax | 528 | movl %esp, %eax |
533 | movl $1, %edx | 529 | call syscall_trace_leave |
534 | call do_syscall_trace | ||
535 | jmp resume_userspace | 530 | jmp resume_userspace |
536 | END(syscall_exit_work) | 531 | END(syscall_exit_work) |
537 | CFI_ENDPROC | 532 | CFI_ENDPROC |