aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2010-04-18 13:08:41 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2010-04-21 17:11:42 -0400
commitcecbca96da387428e220e307a9c945e37e2f4d9e (patch)
tree2edefda983658c19a8f2b38ff951a3046597a4f7 /kernel
parentb15c7b1cee119999e9eafcd602d24a595e77adac (diff)
tracing: Dump either the oops's cpu source or all cpus buffers
The ftrace_dump_on_oops kernel parameter, sysctl and sysrq let one dump every cpu buffers when an oops or panic happens. It's nice when you have few cpus but it may take ages if have many, plus you miss the real origin of the problem in all the cpu traces. Sometimes, all you need is to dump the cpu buffer that triggered the opps, most of the time it is our main interest. This patch modifies ftrace_dump_on_oops to handle this choice. The ftrace_dump_on_oops kernel parameter, when it comes alone, has the same behaviour than before. But ftrace_dump_on_oops=orig_cpu will only dump the buffer of the cpu that oops'ed. Similarly, sysctl kernel.ftrace_dump_on_oops=1 and echo 1 > /proc/sys/kernel/ftrace_dump_on_oops keep their previous behaviour. But setting 2 jumps into cpu origin dump mode. v2: Fix double setup v3: Fix spelling issues reported by Randy Dunlap v4: Also update __ftrace_dump in the selftests Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Steven Rostedt <rostedt@goodmis.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/trace.c51
-rw-r--r--kernel/trace/trace_selftest.c5
2 files changed, 42 insertions, 14 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index bed83cab6da2..7b516c7ef9a0 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -117,9 +117,12 @@ static cpumask_var_t __read_mostly tracing_buffer_mask;
117 * 117 *
118 * It is default off, but you can enable it with either specifying 118 * It is default off, but you can enable it with either specifying
119 * "ftrace_dump_on_oops" in the kernel command line, or setting 119 * "ftrace_dump_on_oops" in the kernel command line, or setting
120 * /proc/sys/kernel/ftrace_dump_on_oops to true. 120 * /proc/sys/kernel/ftrace_dump_on_oops
121 * Set 1 if you want to dump buffers of all CPUs
122 * Set 2 if you want to dump the buffer of the CPU that triggered oops
121 */ 123 */
122int ftrace_dump_on_oops; 124
125enum ftrace_dump_mode ftrace_dump_on_oops;
123 126
124static int tracing_set_tracer(const char *buf); 127static int tracing_set_tracer(const char *buf);
125 128
@@ -139,8 +142,17 @@ __setup("ftrace=", set_cmdline_ftrace);
139 142
140static int __init set_ftrace_dump_on_oops(char *str) 143static int __init set_ftrace_dump_on_oops(char *str)
141{ 144{
142 ftrace_dump_on_oops = 1; 145 if (*str++ != '=' || !*str) {
143 return 1; 146 ftrace_dump_on_oops = DUMP_ALL;
147 return 1;
148 }
149
150 if (!strcmp("orig_cpu", str)) {
151 ftrace_dump_on_oops = DUMP_ORIG;
152 return 1;
153 }
154
155 return 0;
144} 156}
145__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); 157__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
146 158
@@ -4338,7 +4350,7 @@ static int trace_panic_handler(struct notifier_block *this,
4338 unsigned long event, void *unused) 4350 unsigned long event, void *unused)
4339{ 4351{
4340 if (ftrace_dump_on_oops) 4352 if (ftrace_dump_on_oops)
4341 ftrace_dump(); 4353 ftrace_dump(ftrace_dump_on_oops);
4342 return NOTIFY_OK; 4354 return NOTIFY_OK;
4343} 4355}
4344 4356
@@ -4355,7 +4367,7 @@ static int trace_die_handler(struct notifier_block *self,
4355 switch (val) { 4367 switch (val) {
4356 case DIE_OOPS: 4368 case DIE_OOPS:
4357 if (ftrace_dump_on_oops) 4369 if (ftrace_dump_on_oops)
4358 ftrace_dump(); 4370 ftrace_dump(ftrace_dump_on_oops);
4359 break; 4371 break;
4360 default: 4372 default:
4361 break; 4373 break;
@@ -4396,7 +4408,8 @@ trace_printk_seq(struct trace_seq *s)
4396 trace_seq_init(s); 4408 trace_seq_init(s);
4397} 4409}
4398 4410
4399static void __ftrace_dump(bool disable_tracing) 4411static void
4412__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
4400{ 4413{
4401 static arch_spinlock_t ftrace_dump_lock = 4414 static arch_spinlock_t ftrace_dump_lock =
4402 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 4415 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
@@ -4429,12 +4442,25 @@ static void __ftrace_dump(bool disable_tracing)
4429 /* don't look at user memory in panic mode */ 4442 /* don't look at user memory in panic mode */
4430 trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 4443 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
4431 4444
4432 printk(KERN_TRACE "Dumping ftrace buffer:\n");
4433
4434 /* Simulate the iterator */ 4445 /* Simulate the iterator */
4435 iter.tr = &global_trace; 4446 iter.tr = &global_trace;
4436 iter.trace = current_trace; 4447 iter.trace = current_trace;
4437 iter.cpu_file = TRACE_PIPE_ALL_CPU; 4448
4449 switch (oops_dump_mode) {
4450 case DUMP_ALL:
4451 iter.cpu_file = TRACE_PIPE_ALL_CPU;
4452 break;
4453 case DUMP_ORIG:
4454 iter.cpu_file = raw_smp_processor_id();
4455 break;
4456 case DUMP_NONE:
4457 goto out_enable;
4458 default:
4459 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
4460 iter.cpu_file = TRACE_PIPE_ALL_CPU;
4461 }
4462
4463 printk(KERN_TRACE "Dumping ftrace buffer:\n");
4438 4464
4439 /* 4465 /*
4440 * We need to stop all tracing on all CPUS to read the 4466 * We need to stop all tracing on all CPUS to read the
@@ -4473,6 +4499,7 @@ static void __ftrace_dump(bool disable_tracing)
4473 else 4499 else
4474 printk(KERN_TRACE "---------------------------------\n"); 4500 printk(KERN_TRACE "---------------------------------\n");
4475 4501
4502 out_enable:
4476 /* Re-enable tracing if requested */ 4503 /* Re-enable tracing if requested */
4477 if (!disable_tracing) { 4504 if (!disable_tracing) {
4478 trace_flags |= old_userobj; 4505 trace_flags |= old_userobj;
@@ -4489,9 +4516,9 @@ static void __ftrace_dump(bool disable_tracing)
4489} 4516}
4490 4517
4491/* By default: disable tracing after the dump */ 4518/* By default: disable tracing after the dump */
4492void ftrace_dump(void) 4519void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
4493{ 4520{
4494 __ftrace_dump(true); 4521 __ftrace_dump(true, oops_dump_mode);
4495} 4522}
4496 4523
4497__init static int tracer_alloc_buffers(void) 4524__init static int tracer_alloc_buffers(void)
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 9398034f814a..6a9d36ddfcf2 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -256,7 +256,8 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
256/* Maximum number of functions to trace before diagnosing a hang */ 256/* Maximum number of functions to trace before diagnosing a hang */
257#define GRAPH_MAX_FUNC_TEST 100000000 257#define GRAPH_MAX_FUNC_TEST 100000000
258 258
259static void __ftrace_dump(bool disable_tracing); 259static void
260__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
260static unsigned int graph_hang_thresh; 261static unsigned int graph_hang_thresh;
261 262
262/* Wrap the real function entry probe to avoid possible hanging */ 263/* Wrap the real function entry probe to avoid possible hanging */
@@ -267,7 +268,7 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
267 ftrace_graph_stop(); 268 ftrace_graph_stop();
268 printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); 269 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
269 if (ftrace_dump_on_oops) 270 if (ftrace_dump_on_oops)
270 __ftrace_dump(false); 271 __ftrace_dump(false, DUMP_ALL);
271 return 0; 272 return 0;
272 } 273 }
273 274