aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-12-06 11:34:16 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-12-06 11:34:16 -0500
commit843f4f4bb1a2c4c196a1af1d18bb6477a580ac78 (patch)
tree53b795cb810b7430e46fe485d081bab675792ddb
parentc537aba00e3f1df8ce6c7c9fcb98b82c0c2d1d2c (diff)
parent3ccb01239201af06a07482ec686b14cd148102a5 (diff)
Merge tag 'trace-fixes-3.13-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing fix from Steven Rostedt: "A regression showed up that there's a large delay when enabling all events. This was prevalent when FTRACE_SELFTEST was enabled which enables all events several times, and caused the system bootup to pause for over a minute. This was tracked down to an addition of a synchronize_sched() performed when system call tracepoints are unregistered. The synchronize_sched() is needed between the unregistering of the system call tracepoint and a deletion of a tracing instance buffer. But placing the synchronize_sched() in the unreg of *every* system call tracepoint is a bit overboard. A single synchronize_sched() before the deletion of the instance is sufficient" * tag 'trace-fixes-3.13-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing: Only run synchronize_sched() at instance deletion time
-rw-r--r--kernel/trace/trace_events.c3
-rw-r--r--kernel/trace/trace_syscalls.c10
2 files changed, 3 insertions, 10 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f919a2e21bf3..a11800ae96de 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2314,6 +2314,9 @@ int event_trace_del_tracer(struct trace_array *tr)
2314 /* Disable any running events */ 2314 /* Disable any running events */
2315 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0); 2315 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2316 2316
2317 /* Access to events are within rcu_read_lock_sched() */
2318 synchronize_sched();
2319
2317 down_write(&trace_event_sem); 2320 down_write(&trace_event_sem);
2318 __trace_remove_event_dirs(tr); 2321 __trace_remove_event_dirs(tr);
2319 debugfs_remove_recursive(tr->event_dir); 2322 debugfs_remove_recursive(tr->event_dir);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index e4b6d11bdf78..ea90eb5f6f17 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -431,11 +431,6 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file,
431 if (!tr->sys_refcount_enter) 431 if (!tr->sys_refcount_enter)
432 unregister_trace_sys_enter(ftrace_syscall_enter, tr); 432 unregister_trace_sys_enter(ftrace_syscall_enter, tr);
433 mutex_unlock(&syscall_trace_lock); 433 mutex_unlock(&syscall_trace_lock);
434 /*
435 * Callers expect the event to be completely disabled on
436 * return, so wait for current handlers to finish.
437 */
438 synchronize_sched();
439} 434}
440 435
441static int reg_event_syscall_exit(struct ftrace_event_file *file, 436static int reg_event_syscall_exit(struct ftrace_event_file *file,
@@ -474,11 +469,6 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file,
474 if (!tr->sys_refcount_exit) 469 if (!tr->sys_refcount_exit)
475 unregister_trace_sys_exit(ftrace_syscall_exit, tr); 470 unregister_trace_sys_exit(ftrace_syscall_exit, tr);
476 mutex_unlock(&syscall_trace_lock); 471 mutex_unlock(&syscall_trace_lock);
477 /*
478 * Callers expect the event to be completely disabled on
479 * return, so wait for current handlers to finish.
480 */
481 synchronize_sched();
482} 472}
483 473
484static int __init init_syscall_trace(struct ftrace_event_call *call) 474static int __init init_syscall_trace(struct ftrace_event_call *call)