aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-01-14 16:33:27 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-15 03:47:40 -0500
commit4a2b8dda3f8705880ec7408135645602d5590f51 (patch)
tree94e13c979bd20c5aeb67b1cc5aadf87f8a47b25a /kernel/trace/ftrace.c
parent0ee6b6cf5bdb793b4c68507dd65adf16341aa4ca (diff)
tracing/function-graph-tracer: fix a regression while suspend to disk
Impact: fix a crash while kernel image restore When the function graph tracer is running and while suspend to disk, some racy and dangerous things happen against this tracer. The current task will save its registers including the stack pointer which contains the return address hooked by the tracer. But the current task will continue to enter other functions after that to save the memory, and then it will store other return addresses, and finally loose the old depth which matches the return address saved in the old stack (during the registers saving). So on image restore, the code will return to wrong addresses. And there are other things: on restore, the task will have it's "current" pointer overwritten during registers restoring....switching from one task to another... That would be insane to try to trace function graphs at these stages. This patch makes the function graph tracer listening on power events, making it's tracing disabled for the current task (the one that performs the hibernation work) while suspend/resume to disk, making the tracing safe during hibernation. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c27
1 files changed, 27 insertions, 0 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 8c1c9c0f4775..7e9a20b69939 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -17,6 +17,7 @@
17#include <linux/clocksource.h> 17#include <linux/clocksource.h>
18#include <linux/kallsyms.h> 18#include <linux/kallsyms.h>
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20#include <linux/suspend.h>
20#include <linux/debugfs.h> 21#include <linux/debugfs.h>
21#include <linux/hardirq.h> 22#include <linux/hardirq.h>
22#include <linux/kthread.h> 23#include <linux/kthread.h>
@@ -1957,6 +1958,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1957#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1958#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1958 1959
1959static atomic_t ftrace_graph_active; 1960static atomic_t ftrace_graph_active;
1961static struct notifier_block ftrace_suspend_notifier;
1960 1962
1961int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) 1963int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
1962{ 1964{
@@ -2035,6 +2037,27 @@ static int start_graph_tracing(void)
2035 return ret; 2037 return ret;
2036} 2038}
2037 2039
2040/*
2041 * Hibernation protection.
2042 * The state of the current task is too much unstable during
2043 * suspend/restore to disk. We want to protect against that.
2044 */
2045static int
2046ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
2047 void *unused)
2048{
2049 switch (state) {
2050 case PM_HIBERNATION_PREPARE:
2051 pause_graph_tracing();
2052 break;
2053
2054 case PM_POST_HIBERNATION:
2055 unpause_graph_tracing();
2056 break;
2057 }
2058 return NOTIFY_DONE;
2059}
2060
2038int register_ftrace_graph(trace_func_graph_ret_t retfunc, 2061int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2039 trace_func_graph_ent_t entryfunc) 2062 trace_func_graph_ent_t entryfunc)
2040{ 2063{
@@ -2042,6 +2065,9 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2042 2065
2043 mutex_lock(&ftrace_sysctl_lock); 2066 mutex_lock(&ftrace_sysctl_lock);
2044 2067
2068 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2069 register_pm_notifier(&ftrace_suspend_notifier);
2070
2045 atomic_inc(&ftrace_graph_active); 2071 atomic_inc(&ftrace_graph_active);
2046 ret = start_graph_tracing(); 2072 ret = start_graph_tracing();
2047 if (ret) { 2073 if (ret) {
@@ -2067,6 +2093,7 @@ void unregister_ftrace_graph(void)
2067 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; 2093 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
2068 ftrace_graph_entry = ftrace_graph_entry_stub; 2094 ftrace_graph_entry = ftrace_graph_entry_stub;
2069 ftrace_shutdown(FTRACE_STOP_FUNC_RET); 2095 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
2096 unregister_pm_notifier(&ftrace_suspend_notifier);
2070 2097
2071 mutex_unlock(&ftrace_sysctl_lock); 2098 mutex_unlock(&ftrace_sysctl_lock);
2072} 2099}