diff options
author | Steven Rostedt <srostedt@redhat.com> | 2011-07-13 15:11:02 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2011-07-13 22:25:09 -0400 |
commit | 6331c28c962561aee59e5a493b7556a4bb585957 (patch) | |
tree | 33292a3127aa831ee8ec2394e790bc2a8817beee /kernel/trace | |
parent | 072126f4529196f71a97960248bca54fd4554c2d (diff) |
ftrace: Fix dynamic selftest failure on some archs
Archs that do not implement CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST, will
fail the dynamic ftrace selftest.
The function tracer has a quick 'off' variable that will prevent
the call back functions from being called. This variable is called
function_trace_stop. In x86, this is implemented directly in the mcount
assembly, but for other archs, an intermediate function is used called
ftrace_test_stop_func().
In dynamic ftrace, the function pointer variable ftrace_trace_function is
used to update the caller code in the mcount caller. But for archs that
do not have CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST set, it only calls
ftrace_test_stop_func() instead, which in turn calls __ftrace_trace_function.
When more than one ftrace_ops is registered, the function it calls is
ftrace_ops_list_func(), which will iterate over all registered ftrace_ops
and call the callbacks that have their hash matching.
The issue happens when two ftrace_ops are registered for different functions
and one is then unregistered. The __ftrace_trace_function is then pointed
to the remaining ftrace_ops callback function directly. This mean it will
be called for all functions that were registered to trace by both ftrace_ops
that were registered.
This is not an issue for archs with CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST,
because the update of ftrace_trace_function doesn't happen until after all
functions have been updated, and then the mcount caller is updated. But
for those archs that do use the ftrace_test_stop_func(), the update is
immediate.
The dynamic selftest fails because it hits this situation, and the
ftrace_ops that it registers fails to only trace what it was suppose to
and instead traces all other functions.
The solution is to delay the setting of __ftrace_trace_function until
after all the functions have been updated according to the registered
ftrace_ops. Also, function_trace_stop is set during the update to prevent
function tracing from calling code that is caused by the function tracer
itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ftrace.c | 26 |
1 files changed, 26 insertions, 0 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index a0dc0de8d649..62e26d930535 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -88,6 +88,7 @@ static struct ftrace_ops ftrace_list_end __read_mostly = { | |||
88 | static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; | 88 | static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; |
89 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; | 89 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; |
90 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | 90 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
91 | static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub; | ||
91 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; | 92 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; |
92 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; | 93 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; |
93 | static struct ftrace_ops global_ops; | 94 | static struct ftrace_ops global_ops; |
@@ -146,9 +147,11 @@ void clear_ftrace_function(void) | |||
146 | { | 147 | { |
147 | ftrace_trace_function = ftrace_stub; | 148 | ftrace_trace_function = ftrace_stub; |
148 | __ftrace_trace_function = ftrace_stub; | 149 | __ftrace_trace_function = ftrace_stub; |
150 | __ftrace_trace_function_delay = ftrace_stub; | ||
149 | ftrace_pid_function = ftrace_stub; | 151 | ftrace_pid_function = ftrace_stub; |
150 | } | 152 | } |
151 | 153 | ||
154 | #undef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
152 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | 155 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
153 | /* | 156 | /* |
154 | * For those archs that do not test ftrace_trace_stop in their | 157 | * For those archs that do not test ftrace_trace_stop in their |
@@ -208,7 +211,12 @@ static void update_ftrace_function(void) | |||
208 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | 211 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
209 | ftrace_trace_function = func; | 212 | ftrace_trace_function = func; |
210 | #else | 213 | #else |
214 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
215 | /* do not update till all functions have been modified */ | ||
216 | __ftrace_trace_function_delay = func; | ||
217 | #else | ||
211 | __ftrace_trace_function = func; | 218 | __ftrace_trace_function = func; |
219 | #endif | ||
212 | ftrace_trace_function = ftrace_test_stop_func; | 220 | ftrace_trace_function = ftrace_test_stop_func; |
213 | #endif | 221 | #endif |
214 | } | 222 | } |
@@ -1607,6 +1615,12 @@ static int __ftrace_modify_code(void *data) | |||
1607 | { | 1615 | { |
1608 | int *command = data; | 1616 | int *command = data; |
1609 | 1617 | ||
1618 | /* | ||
1619 | * Do not call function tracer while we update the code. | ||
1620 | * We are in stop machine, no worrying about races. | ||
1621 | */ | ||
1622 | function_trace_stop++; | ||
1623 | |||
1610 | if (*command & FTRACE_ENABLE_CALLS) | 1624 | if (*command & FTRACE_ENABLE_CALLS) |
1611 | ftrace_replace_code(1); | 1625 | ftrace_replace_code(1); |
1612 | else if (*command & FTRACE_DISABLE_CALLS) | 1626 | else if (*command & FTRACE_DISABLE_CALLS) |
@@ -1620,6 +1634,18 @@ static int __ftrace_modify_code(void *data) | |||
1620 | else if (*command & FTRACE_STOP_FUNC_RET) | 1634 | else if (*command & FTRACE_STOP_FUNC_RET) |
1621 | ftrace_disable_ftrace_graph_caller(); | 1635 | ftrace_disable_ftrace_graph_caller(); |
1622 | 1636 | ||
1637 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
1638 | /* | ||
1639 | * For archs that call ftrace_test_stop_func(), we must | ||
1640 | * wait till after we update all the function callers | ||
1641 | * before we update the callback. This keeps different | ||
1642 | * ops that record different functions from corrupting | ||
1643 | * each other. | ||
1644 | */ | ||
1645 | __ftrace_trace_function = __ftrace_trace_function_delay; | ||
1646 | #endif | ||
1647 | function_trace_stop--; | ||
1648 | |||
1623 | return 0; | 1649 | return 0; |
1624 | } | 1650 | } |
1625 | 1651 | ||