diff options
author | Steven Rostedt (Red Hat) <rostedt@goodmis.org> | 2014-09-10 10:42:46 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2014-09-10 10:48:18 -0400 |
commit | f7aad4e1a8221210db7eb434349cc6fe87aeee8c (patch) | |
tree | 1964991cfecec92b749febb0b58a44400d518b84 /kernel/trace | |
parent | 87354059881ce9315181604dc17076c535f4d744 (diff) |
ftrace: Set callback to ftrace_stub when no ops are registered
The clean up that adds the helper function ftrace_ops_get_func()
caused the default function to not change when DYNAMIC_FTRACE was not
set and no ftrace_ops were registered. Although static tracing is
not very useful (not having DYNAMIC_FTRACE set), it is still supported
and we don't want to break it.
Clean up the if statement even more to specifically have the default
function call ftrace_stub when no ftrace_ops are registered. This
fixes the small bug for static tracing as well as makes the code a
bit more understandable.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ftrace.c | 19 |
1 files changed, 13 insertions, 6 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index dabf734f909c..708aea493d96 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -254,17 +254,24 @@ static void update_ftrace_function(void) | |||
254 | ftrace_func_t func; | 254 | ftrace_func_t func; |
255 | 255 | ||
256 | /* | 256 | /* |
257 | * Prepare the ftrace_ops that the arch callback will use. | ||
258 | * If there's only one ftrace_ops registered, the ftrace_ops_list | ||
259 | * will point to the ops we want. | ||
260 | */ | ||
261 | set_function_trace_op = ftrace_ops_list; | ||
262 | |||
263 | /* If there's no ftrace_ops registered, just call the stub function */ | ||
264 | if (ftrace_ops_list == &ftrace_list_end) { | ||
265 | func = ftrace_stub; | ||
266 | |||
267 | /* | ||
257 | * If we are at the end of the list and this ops is | 268 | * If we are at the end of the list and this ops is |
258 | * recursion safe and not dynamic and the arch supports passing ops, | 269 | * recursion safe and not dynamic and the arch supports passing ops, |
259 | * then have the mcount trampoline call the function directly. | 270 | * then have the mcount trampoline call the function directly. |
260 | */ | 271 | */ |
261 | if (ftrace_ops_list == &ftrace_list_end || | 272 | } else if (ftrace_ops_list->next == &ftrace_list_end) { |
262 | (ftrace_ops_list->next == &ftrace_list_end)) { | ||
263 | |||
264 | /* Set the ftrace_ops that the arch callback uses */ | ||
265 | set_function_trace_op = ftrace_ops_list; | ||
266 | |||
267 | func = ftrace_ops_get_func(ftrace_ops_list); | 273 | func = ftrace_ops_get_func(ftrace_ops_list); |
274 | |||
268 | } else { | 275 | } else { |
269 | /* Just use the default ftrace_ops */ | 276 | /* Just use the default ftrace_ops */ |
270 | set_function_trace_op = &ftrace_list_end; | 277 | set_function_trace_op = &ftrace_list_end; |