aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-01-14 16:33:27 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-21 09:21:30 -0500
commit00f57f545afa422db3003b0d0b30a30f8de7ecb2 (patch)
tree02d5fc02d95987015b3051a63e7c481a28be4f47 /kernel
parent082605de5f82eb692cc90f7fda071cc01bb5ac34 (diff)
tracing/function-graph-tracer: fix a regression while suspend to disk
Impact: fix a crash while kernel image restore When the function graph tracer is running and while suspend to disk, some racy and dangerous things happen against this tracer. The current task will save its registers including the stack pointer which contains the return address hooked by the tracer. But the current task will continue to enter other functions after that to save the memory, and then it will store other return addresses, and finally loose the old depth which matches the return address saved in the old stack (during the registers saving). So on image restore, the code will return to wrong addresses. And there are other things: on restore, the task will have it's "current" pointer overwritten during registers restoring....switching from one task to another... That would be insane to try to trace function graphs at these stages. This patch makes the function graph tracer listening on power events, making it's tracing disabled for the current task (the one that performs the hibernation work) while suspend/resume to disk, making the tracing safe during hibernation. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/ftrace.c27
1 files changed, 27 insertions, 0 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 2f32969c09df..7dcf6e9f2b04 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -17,6 +17,7 @@
17#include <linux/clocksource.h> 17#include <linux/clocksource.h>
18#include <linux/kallsyms.h> 18#include <linux/kallsyms.h>
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20#include <linux/suspend.h>
20#include <linux/debugfs.h> 21#include <linux/debugfs.h>
21#include <linux/hardirq.h> 22#include <linux/hardirq.h>
22#include <linux/kthread.h> 23#include <linux/kthread.h>
@@ -1965,6 +1966,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1965#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1966#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1966 1967
1967static atomic_t ftrace_graph_active; 1968static atomic_t ftrace_graph_active;
1969static struct notifier_block ftrace_suspend_notifier;
1968 1970
1969int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) 1971int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
1970{ 1972{
@@ -2043,6 +2045,27 @@ static int start_graph_tracing(void)
2043 return ret; 2045 return ret;
2044} 2046}
2045 2047
2048/*
2049 * Hibernation protection.
2050 * The state of the current task is too much unstable during
2051 * suspend/restore to disk. We want to protect against that.
2052 */
2053static int
2054ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
2055 void *unused)
2056{
2057 switch (state) {
2058 case PM_HIBERNATION_PREPARE:
2059 pause_graph_tracing();
2060 break;
2061
2062 case PM_POST_HIBERNATION:
2063 unpause_graph_tracing();
2064 break;
2065 }
2066 return NOTIFY_DONE;
2067}
2068
2046int register_ftrace_graph(trace_func_graph_ret_t retfunc, 2069int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2047 trace_func_graph_ent_t entryfunc) 2070 trace_func_graph_ent_t entryfunc)
2048{ 2071{
@@ -2050,6 +2073,9 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2050 2073
2051 mutex_lock(&ftrace_sysctl_lock); 2074 mutex_lock(&ftrace_sysctl_lock);
2052 2075
2076 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2077 register_pm_notifier(&ftrace_suspend_notifier);
2078
2053 atomic_inc(&ftrace_graph_active); 2079 atomic_inc(&ftrace_graph_active);
2054 ret = start_graph_tracing(); 2080 ret = start_graph_tracing();
2055 if (ret) { 2081 if (ret) {
@@ -2075,6 +2101,7 @@ void unregister_ftrace_graph(void)
2075 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; 2101 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
2076 ftrace_graph_entry = ftrace_graph_entry_stub; 2102 ftrace_graph_entry = ftrace_graph_entry_stub;
2077 ftrace_shutdown(FTRACE_STOP_FUNC_RET); 2103 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
2104 unregister_pm_notifier(&ftrace_suspend_notifier);
2078 2105
2079 mutex_unlock(&ftrace_sysctl_lock); 2106 mutex_unlock(&ftrace_sysctl_lock);
2080} 2107}