diff options
| author | Ingo Molnar <mingo@kernel.org> | 2013-12-17 09:27:08 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2013-12-17 09:27:08 -0500 |
| commit | bb799d3b980eb803ca2da4a4eefbd9308f8d988a (patch) | |
| tree | 69fbe0cd6d47b23a50f5e1d87bf7489532fae149 /kernel/trace/trace_syscalls.c | |
| parent | 919fc6e34831d1c2b58bfb5ae261dc3facc9b269 (diff) | |
| parent | 319e2e3f63c348a9b66db4667efa73178e18b17d (diff) | |
Merge tag 'v3.13-rc4' into core/locking
Merge Linux 3.13-rc4, to refresh this rather old tree with the latest fixes.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/trace/trace_syscalls.c')
| -rw-r--r-- | kernel/trace/trace_syscalls.c | 10 |
1 files changed, 0 insertions, 10 deletions
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index e4b6d11bdf78..ea90eb5f6f17 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
| @@ -431,11 +431,6 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file, | |||
| 431 | if (!tr->sys_refcount_enter) | 431 | if (!tr->sys_refcount_enter) |
| 432 | unregister_trace_sys_enter(ftrace_syscall_enter, tr); | 432 | unregister_trace_sys_enter(ftrace_syscall_enter, tr); |
| 433 | mutex_unlock(&syscall_trace_lock); | 433 | mutex_unlock(&syscall_trace_lock); |
| 434 | /* | ||
| 435 | * Callers expect the event to be completely disabled on | ||
| 436 | * return, so wait for current handlers to finish. | ||
| 437 | */ | ||
| 438 | synchronize_sched(); | ||
| 439 | } | 434 | } |
| 440 | 435 | ||
| 441 | static int reg_event_syscall_exit(struct ftrace_event_file *file, | 436 | static int reg_event_syscall_exit(struct ftrace_event_file *file, |
| @@ -474,11 +469,6 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file, | |||
| 474 | if (!tr->sys_refcount_exit) | 469 | if (!tr->sys_refcount_exit) |
| 475 | unregister_trace_sys_exit(ftrace_syscall_exit, tr); | 470 | unregister_trace_sys_exit(ftrace_syscall_exit, tr); |
| 476 | mutex_unlock(&syscall_trace_lock); | 471 | mutex_unlock(&syscall_trace_lock); |
| 477 | /* | ||
| 478 | * Callers expect the event to be completely disabled on | ||
| 479 | * return, so wait for current handlers to finish. | ||
| 480 | */ | ||
| 481 | synchronize_sched(); | ||
| 482 | } | 472 | } |
| 483 | 473 | ||
| 484 | static int __init init_syscall_trace(struct ftrace_event_call *call) | 474 | static int __init init_syscall_trace(struct ftrace_event_call *call) |
