aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-01-15 22:21:43 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-16 06:17:46 -0500
commit3eb36aa05329a47cbe201c151fd0024a4a3649cd (patch)
tree305b4805a32b47387dc6f76161c20c9e72280e94 /kernel/trace/trace_functions.c
parent5e4abc9839191e213965e0f1dbf36e2e44356c3a (diff)
ftrace: combine stack trace in function call
Impact: less likely to interleave function and stack traces This patch does replaces the separate stack trace on function with a record function and stack trace together. This will switch between the function only recording to a function and stack recording. Also some whitespace fix ups as well. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_functions.c')
-rw-r--r--kernel/trace/trace_functions.c61
1 files changed, 36 insertions, 25 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 2dce3c7370d1..61d0b73dabf5 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -133,6 +133,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
133 133
134 if (likely(disabled == 1)) { 134 if (likely(disabled == 1)) {
135 pc = preempt_count(); 135 pc = preempt_count();
136 trace_function(tr, data, ip, parent_ip, flags, pc);
136 /* 137 /*
137 * skip over 5 funcs: 138 * skip over 5 funcs:
138 * __ftrace_trace_stack, 139 * __ftrace_trace_stack,
@@ -154,24 +155,6 @@ static struct ftrace_ops trace_ops __read_mostly =
154 .func = function_trace_call, 155 .func = function_trace_call,
155}; 156};
156 157
157void tracing_start_function_trace(void)
158{
159 ftrace_function_enabled = 0;
160
161 if (trace_flags & TRACE_ITER_PREEMPTONLY)
162 trace_ops.func = function_trace_call_preempt_only;
163 else
164 trace_ops.func = function_trace_call;
165
166 register_ftrace_function(&trace_ops);
167 ftrace_function_enabled = 1;
168}
169
170void tracing_stop_function_trace(void)
171{
172 ftrace_function_enabled = 0;
173 unregister_ftrace_function(&trace_ops);
174}
175static struct ftrace_ops trace_stack_ops __read_mostly = 158static struct ftrace_ops trace_stack_ops __read_mostly =
176{ 159{
177 .func = function_stack_trace_call, 160 .func = function_stack_trace_call,
@@ -194,6 +177,31 @@ static struct tracer_flags func_flags = {
194 .opts = func_opts 177 .opts = func_opts
195}; 178};
196 179
180void tracing_start_function_trace(void)
181{
182 ftrace_function_enabled = 0;
183
184 if (trace_flags & TRACE_ITER_PREEMPTONLY)
185 trace_ops.func = function_trace_call_preempt_only;
186 else
187 trace_ops.func = function_trace_call;
188
189 if (func_flags.val & TRACE_FUNC_OPT_STACK)
190 register_ftrace_function(&trace_stack_ops);
191 else
192 register_ftrace_function(&trace_ops);
193
194 ftrace_function_enabled = 1;
195}
196
197void tracing_stop_function_trace(void)
198{
199 ftrace_function_enabled = 0;
200 /* OK if they are not registered */
201 unregister_ftrace_function(&trace_stack_ops);
202 unregister_ftrace_function(&trace_ops);
203}
204
197static int func_set_flag(u32 old_flags, u32 bit, int set) 205static int func_set_flag(u32 old_flags, u32 bit, int set)
198{ 206{
199 if (bit == TRACE_FUNC_OPT_STACK) { 207 if (bit == TRACE_FUNC_OPT_STACK) {
@@ -201,10 +209,13 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)
201 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) 209 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
202 return 0; 210 return 0;
203 211
204 if (set) 212 if (set) {
213 unregister_ftrace_function(&trace_ops);
205 register_ftrace_function(&trace_stack_ops); 214 register_ftrace_function(&trace_stack_ops);
206 else 215 } else {
207 unregister_ftrace_function(&trace_stack_ops); 216 unregister_ftrace_function(&trace_stack_ops);
217 register_ftrace_function(&trace_ops);
218 }
208 219
209 return 0; 220 return 0;
210 } 221 }
@@ -214,14 +225,14 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)
214 225
215static struct tracer function_trace __read_mostly = 226static struct tracer function_trace __read_mostly =
216{ 227{
217 .name = "function", 228 .name = "function",
218 .init = function_trace_init, 229 .init = function_trace_init,
219 .reset = function_trace_reset, 230 .reset = function_trace_reset,
220 .start = function_trace_start, 231 .start = function_trace_start,
221 .flags = &func_flags, 232 .flags = &func_flags,
222 .set_flag = func_set_flag, 233 .set_flag = func_set_flag,
223#ifdef CONFIG_FTRACE_SELFTEST 234#ifdef CONFIG_FTRACE_SELFTEST
224 .selftest = trace_selftest_startup_function, 235 .selftest = trace_selftest_startup_function,
225#endif 236#endif
226}; 237};
227 238