diff options
-rw-r--r-- | kernel/trace/trace.c | 26 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 38 |
2 files changed, 60 insertions, 4 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index e3dfefe69348..e6fac0ffe6f0 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -4018,11 +4018,12 @@ trace_printk_seq(struct trace_seq *s) | |||
4018 | trace_seq_init(s); | 4018 | trace_seq_init(s); |
4019 | } | 4019 | } |
4020 | 4020 | ||
4021 | void ftrace_dump(void) | 4021 | static void __ftrace_dump(bool disable_tracing) |
4022 | { | 4022 | { |
4023 | static DEFINE_SPINLOCK(ftrace_dump_lock); | 4023 | static DEFINE_SPINLOCK(ftrace_dump_lock); |
4024 | /* use static because iter can be a bit big for the stack */ | 4024 | /* use static because iter can be a bit big for the stack */ |
4025 | static struct trace_iterator iter; | 4025 | static struct trace_iterator iter; |
4026 | unsigned int old_userobj; | ||
4026 | static int dump_ran; | 4027 | static int dump_ran; |
4027 | unsigned long flags; | 4028 | unsigned long flags; |
4028 | int cnt = 0, cpu; | 4029 | int cnt = 0, cpu; |
@@ -4034,14 +4035,17 @@ void ftrace_dump(void) | |||
4034 | 4035 | ||
4035 | dump_ran = 1; | 4036 | dump_ran = 1; |
4036 | 4037 | ||
4037 | /* No turning back! */ | ||
4038 | tracing_off(); | 4038 | tracing_off(); |
4039 | ftrace_kill(); | 4039 | |
4040 | if (disable_tracing) | ||
4041 | ftrace_kill(); | ||
4040 | 4042 | ||
4041 | for_each_tracing_cpu(cpu) { | 4043 | for_each_tracing_cpu(cpu) { |
4042 | atomic_inc(&global_trace.data[cpu]->disabled); | 4044 | atomic_inc(&global_trace.data[cpu]->disabled); |
4043 | } | 4045 | } |
4044 | 4046 | ||
4047 | old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; | ||
4048 | |||
4045 | /* don't look at user memory in panic mode */ | 4049 | /* don't look at user memory in panic mode */ |
4046 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; | 4050 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; |
4047 | 4051 | ||
@@ -4086,10 +4090,26 @@ void ftrace_dump(void) | |||
4086 | else | 4090 | else |
4087 | printk(KERN_TRACE "---------------------------------\n"); | 4091 | printk(KERN_TRACE "---------------------------------\n"); |
4088 | 4092 | ||
4093 | /* Re-enable tracing if requested */ | ||
4094 | if (!disable_tracing) { | ||
4095 | trace_flags |= old_userobj; | ||
4096 | |||
4097 | for_each_tracing_cpu(cpu) { | ||
4098 | atomic_dec(&global_trace.data[cpu]->disabled); | ||
4099 | } | ||
4100 | tracing_on(); | ||
4101 | } | ||
4102 | |||
4089 | out: | 4103 | out: |
4090 | spin_unlock_irqrestore(&ftrace_dump_lock, flags); | 4104 | spin_unlock_irqrestore(&ftrace_dump_lock, flags); |
4091 | } | 4105 | } |
4092 | 4106 | ||
4107 | /* By default: disable tracing after the dump */ | ||
4108 | void ftrace_dump(void) | ||
4109 | { | ||
4110 | __ftrace_dump(true); | ||
4111 | } | ||
4112 | |||
4093 | __init static int tracer_alloc_buffers(void) | 4113 | __init static int tracer_alloc_buffers(void) |
4094 | { | 4114 | { |
4095 | struct trace_array_cpu *data; | 4115 | struct trace_array_cpu *data; |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 38856ba78a92..b56dcf7d3566 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -248,6 +248,28 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
248 | 248 | ||
249 | 249 | ||
250 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 250 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
251 | |||
252 | /* Maximum number of functions to trace before diagnosing a hang */ | ||
253 | #define GRAPH_MAX_FUNC_TEST 100000000 | ||
254 | |||
255 | static void __ftrace_dump(bool disable_tracing); | ||
256 | static unsigned int graph_hang_thresh; | ||
257 | |||
258 | /* Wrap the real function entry probe to avoid possible hanging */ | ||
259 | static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) | ||
260 | { | ||
261 | /* This is harmlessly racy, we want to approximately detect a hang */ | ||
262 | if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { | ||
263 | ftrace_graph_stop(); | ||
264 | printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); | ||
265 | if (ftrace_dump_on_oops) | ||
266 | __ftrace_dump(false); | ||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | return trace_graph_entry(trace); | ||
271 | } | ||
272 | |||
251 | /* | 273 | /* |
252 | * Pretty much the same than for the function tracer from which the selftest | 274 | * Pretty much the same than for the function tracer from which the selftest |
253 | * has been borrowed. | 275 | * has been borrowed. |
@@ -259,15 +281,29 @@ trace_selftest_startup_function_graph(struct tracer *trace, | |||
259 | int ret; | 281 | int ret; |
260 | unsigned long count; | 282 | unsigned long count; |
261 | 283 | ||
262 | ret = tracer_init(trace, tr); | 284 | /* |
285 | * Simulate the init() callback but we attach a watchdog callback | ||
286 | * to detect and recover from possible hangs | ||
287 | */ | ||
288 | tracing_reset_online_cpus(tr); | ||
289 | ret = register_ftrace_graph(&trace_graph_return, | ||
290 | &trace_graph_entry_watchdog); | ||
263 | if (ret) { | 291 | if (ret) { |
264 | warn_failed_init_tracer(trace, ret); | 292 | warn_failed_init_tracer(trace, ret); |
265 | goto out; | 293 | goto out; |
266 | } | 294 | } |
295 | tracing_start_cmdline_record(); | ||
267 | 296 | ||
268 | /* Sleep for a 1/10 of a second */ | 297 | /* Sleep for a 1/10 of a second */ |
269 | msleep(100); | 298 | msleep(100); |
270 | 299 | ||
300 | /* Have we just recovered from a hang? */ | ||
301 | if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { | ||
302 | trace->reset(tr); | ||
303 | ret = -1; | ||
304 | goto out; | ||
305 | } | ||
306 | |||
271 | tracing_stop(); | 307 | tracing_stop(); |
272 | 308 | ||
273 | /* check the trace buffer */ | 309 | /* check the trace buffer */ |