diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2013-12-03 12:41:20 -0500 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2013-12-05 14:22:30 -0500 |
commit | 3ccb01239201af06a07482ec686b14cd148102a5 (patch) | |
tree | 5ad078061e71dcf23cd1ee3f4ca353060c185324 /kernel/trace/trace_syscalls.c | |
parent | dc1ccc48159d63eca5089e507c82c7d22ef60839 (diff) |
tracing: Only run synchronize_sched() at instance deletion time
It has been reported that boot up with FTRACE_SELFTEST enabled can take a
very long time. There can be stalls of over a minute.
This was tracked down to the synchronize_sched() called when a system call
event is disabled. As the self tests enable and disable thousands of events,
this makes the synchronize_sched() get called thousands of times.
The synchornize_sched() was added with d562aff93bfb53 "tracing: Add support
for SOFT_DISABLE to syscall events" which caused this regression (added
in 3.13-rc1).
The synchronize_sched() is to protect against the events being accessed
when a tracer instance is being deleted. When an instance is being deleted
all the events associated to it are unregistered. The synchronize_sched()
makes sure that no more users are running when it finishes.
Instead of calling synchronize_sched() for all syscall events, we only
need to call it once, after the events are unregistered and before the
instance is deleted. The event_mutex is held during this action to
prevent new users from enabling events.
Link: http://lkml.kernel.org/r/20131203124120.427b9661@gandalf.local.home
Reported-by: Petr Mladek <pmladek@suse.cz>
Acked-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Acked-by: Petr Mladek <pmladek@suse.cz>
Tested-by: Petr Mladek <pmladek@suse.cz>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_syscalls.c')
-rw-r--r-- | kernel/trace/trace_syscalls.c | 10 |
1 files changed, 0 insertions, 10 deletions
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index e4b6d11bdf78..ea90eb5f6f17 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -431,11 +431,6 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file, | |||
431 | if (!tr->sys_refcount_enter) | 431 | if (!tr->sys_refcount_enter) |
432 | unregister_trace_sys_enter(ftrace_syscall_enter, tr); | 432 | unregister_trace_sys_enter(ftrace_syscall_enter, tr); |
433 | mutex_unlock(&syscall_trace_lock); | 433 | mutex_unlock(&syscall_trace_lock); |
434 | /* | ||
435 | * Callers expect the event to be completely disabled on | ||
436 | * return, so wait for current handlers to finish. | ||
437 | */ | ||
438 | synchronize_sched(); | ||
439 | } | 434 | } |
440 | 435 | ||
441 | static int reg_event_syscall_exit(struct ftrace_event_file *file, | 436 | static int reg_event_syscall_exit(struct ftrace_event_file *file, |
@@ -474,11 +469,6 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file, | |||
474 | if (!tr->sys_refcount_exit) | 469 | if (!tr->sys_refcount_exit) |
475 | unregister_trace_sys_exit(ftrace_syscall_exit, tr); | 470 | unregister_trace_sys_exit(ftrace_syscall_exit, tr); |
476 | mutex_unlock(&syscall_trace_lock); | 471 | mutex_unlock(&syscall_trace_lock); |
477 | /* | ||
478 | * Callers expect the event to be completely disabled on | ||
479 | * return, so wait for current handlers to finish. | ||
480 | */ | ||
481 | synchronize_sched(); | ||
482 | } | 472 | } |
483 | 473 | ||
484 | static int __init init_syscall_trace(struct ftrace_event_call *call) | 474 | static int __init init_syscall_trace(struct ftrace_event_call *call) |