diff options
author | Steven Rostedt (Red Hat) <rostedt@goodmis.org> | 2013-03-14 12:10:40 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2013-03-15 00:36:08 -0400 |
commit | 328df4759c03e2c3e7429cc6cb0e180c38f32063 (patch) | |
tree | 874c0aa7642cfe8ce1e4512464eae1d33e99391b /kernel/trace/trace_sched_wakeup.c | |
parent | 4df297129f622bdc18935c856f42b9ddd18f9f28 (diff) |
tracing: Add function-trace option to disable function tracing of latency tracers
Currently, the only way to stop the latency tracers from doing function
tracing is to fully disable the function tracer from the proc file
system:
echo 0 > /proc/sys/kernel/ftrace_enabled
This is a big hammer approach as it disables function tracing for
all users. This includes kprobes, perf, stack tracer, etc.
Instead, create a function-trace option that the latency tracers can
check to determine if it should enable function tracing or not.
This option can be set or cleared even while the tracer is active
and the tracers will disable or enable function tracing depending
on how the option was set.
Instead of using the proc file, disable latency function tracing with
echo 0 > /debug/tracing/options/function-trace
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Clark Williams <williams@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_sched_wakeup.c')
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 63 |
1 files changed, 53 insertions, 10 deletions
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index c16f8cd63c3c..fee77e15d815 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -37,6 +37,7 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace); | |||
37 | static void wakeup_graph_return(struct ftrace_graph_ret *trace); | 37 | static void wakeup_graph_return(struct ftrace_graph_ret *trace); |
38 | 38 | ||
39 | static int save_flags; | 39 | static int save_flags; |
40 | static bool function_enabled; | ||
40 | 41 | ||
41 | #define TRACE_DISPLAY_GRAPH 1 | 42 | #define TRACE_DISPLAY_GRAPH 1 |
42 | 43 | ||
@@ -134,15 +135,60 @@ static struct ftrace_ops trace_ops __read_mostly = | |||
134 | }; | 135 | }; |
135 | #endif /* CONFIG_FUNCTION_TRACER */ | 136 | #endif /* CONFIG_FUNCTION_TRACER */ |
136 | 137 | ||
137 | static int start_func_tracer(int graph) | 138 | static int register_wakeup_function(int graph, int set) |
138 | { | 139 | { |
139 | int ret; | 140 | int ret; |
140 | 141 | ||
141 | if (!graph) | 142 | /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */ |
142 | ret = register_ftrace_function(&trace_ops); | 143 | if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION))) |
143 | else | 144 | return 0; |
145 | |||
146 | if (graph) | ||
144 | ret = register_ftrace_graph(&wakeup_graph_return, | 147 | ret = register_ftrace_graph(&wakeup_graph_return, |
145 | &wakeup_graph_entry); | 148 | &wakeup_graph_entry); |
149 | else | ||
150 | ret = register_ftrace_function(&trace_ops); | ||
151 | |||
152 | if (!ret) | ||
153 | function_enabled = true; | ||
154 | |||
155 | return ret; | ||
156 | } | ||
157 | |||
158 | static void unregister_wakeup_function(int graph) | ||
159 | { | ||
160 | if (!function_enabled) | ||
161 | return; | ||
162 | |||
163 | if (graph) | ||
164 | unregister_ftrace_graph(); | ||
165 | else | ||
166 | unregister_ftrace_function(&trace_ops); | ||
167 | |||
168 | function_enabled = false; | ||
169 | } | ||
170 | |||
171 | static void wakeup_function_set(int set) | ||
172 | { | ||
173 | if (set) | ||
174 | register_wakeup_function(is_graph(), 1); | ||
175 | else | ||
176 | unregister_wakeup_function(is_graph()); | ||
177 | } | ||
178 | |||
179 | static int wakeup_flag_changed(struct tracer *tracer, u32 mask, int set) | ||
180 | { | ||
181 | if (mask & TRACE_ITER_FUNCTION) | ||
182 | wakeup_function_set(set); | ||
183 | |||
184 | return trace_keep_overwrite(tracer, mask, set); | ||
185 | } | ||
186 | |||
187 | static int start_func_tracer(int graph) | ||
188 | { | ||
189 | int ret; | ||
190 | |||
191 | ret = register_wakeup_function(graph, 0); | ||
146 | 192 | ||
147 | if (!ret && tracing_is_enabled()) | 193 | if (!ret && tracing_is_enabled()) |
148 | tracer_enabled = 1; | 194 | tracer_enabled = 1; |
@@ -156,10 +202,7 @@ static void stop_func_tracer(int graph) | |||
156 | { | 202 | { |
157 | tracer_enabled = 0; | 203 | tracer_enabled = 0; |
158 | 204 | ||
159 | if (!graph) | 205 | unregister_wakeup_function(graph); |
160 | unregister_ftrace_function(&trace_ops); | ||
161 | else | ||
162 | unregister_ftrace_graph(); | ||
163 | } | 206 | } |
164 | 207 | ||
165 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 208 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
@@ -600,7 +643,7 @@ static struct tracer wakeup_tracer __read_mostly = | |||
600 | .print_line = wakeup_print_line, | 643 | .print_line = wakeup_print_line, |
601 | .flags = &tracer_flags, | 644 | .flags = &tracer_flags, |
602 | .set_flag = wakeup_set_flag, | 645 | .set_flag = wakeup_set_flag, |
603 | .flag_changed = trace_keep_overwrite, | 646 | .flag_changed = wakeup_flag_changed, |
604 | #ifdef CONFIG_FTRACE_SELFTEST | 647 | #ifdef CONFIG_FTRACE_SELFTEST |
605 | .selftest = trace_selftest_startup_wakeup, | 648 | .selftest = trace_selftest_startup_wakeup, |
606 | #endif | 649 | #endif |
@@ -622,7 +665,7 @@ static struct tracer wakeup_rt_tracer __read_mostly = | |||
622 | .print_line = wakeup_print_line, | 665 | .print_line = wakeup_print_line, |
623 | .flags = &tracer_flags, | 666 | .flags = &tracer_flags, |
624 | .set_flag = wakeup_set_flag, | 667 | .set_flag = wakeup_set_flag, |
625 | .flag_changed = trace_keep_overwrite, | 668 | .flag_changed = wakeup_flag_changed, |
626 | #ifdef CONFIG_FTRACE_SELFTEST | 669 | #ifdef CONFIG_FTRACE_SELFTEST |
627 | .selftest = trace_selftest_startup_wakeup, | 670 | .selftest = trace_selftest_startup_wakeup, |
628 | #endif | 671 | #endif |