diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/trace.c | 51 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 5 |
2 files changed, 42 insertions, 14 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index bed83cab6da2..7b516c7ef9a0 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -117,9 +117,12 @@ static cpumask_var_t __read_mostly tracing_buffer_mask; | |||
117 | * | 117 | * |
118 | * It is default off, but you can enable it with either specifying | 118 | * It is default off, but you can enable it with either specifying |
119 | * "ftrace_dump_on_oops" in the kernel command line, or setting | 119 | * "ftrace_dump_on_oops" in the kernel command line, or setting |
120 | * /proc/sys/kernel/ftrace_dump_on_oops to true. | 120 | * /proc/sys/kernel/ftrace_dump_on_oops |
121 | * Set 1 if you want to dump buffers of all CPUs | ||
122 | * Set 2 if you want to dump the buffer of the CPU that triggered oops | ||
121 | */ | 123 | */ |
122 | int ftrace_dump_on_oops; | 124 | |
125 | enum ftrace_dump_mode ftrace_dump_on_oops; | ||
123 | 126 | ||
124 | static int tracing_set_tracer(const char *buf); | 127 | static int tracing_set_tracer(const char *buf); |
125 | 128 | ||
@@ -139,8 +142,17 @@ __setup("ftrace=", set_cmdline_ftrace); | |||
139 | 142 | ||
140 | static int __init set_ftrace_dump_on_oops(char *str) | 143 | static int __init set_ftrace_dump_on_oops(char *str) |
141 | { | 144 | { |
142 | ftrace_dump_on_oops = 1; | 145 | if (*str++ != '=' || !*str) { |
143 | return 1; | 146 | ftrace_dump_on_oops = DUMP_ALL; |
147 | return 1; | ||
148 | } | ||
149 | |||
150 | if (!strcmp("orig_cpu", str)) { | ||
151 | ftrace_dump_on_oops = DUMP_ORIG; | ||
152 | return 1; | ||
153 | } | ||
154 | |||
155 | return 0; | ||
144 | } | 156 | } |
145 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); | 157 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); |
146 | 158 | ||
@@ -4338,7 +4350,7 @@ static int trace_panic_handler(struct notifier_block *this, | |||
4338 | unsigned long event, void *unused) | 4350 | unsigned long event, void *unused) |
4339 | { | 4351 | { |
4340 | if (ftrace_dump_on_oops) | 4352 | if (ftrace_dump_on_oops) |
4341 | ftrace_dump(); | 4353 | ftrace_dump(ftrace_dump_on_oops); |
4342 | return NOTIFY_OK; | 4354 | return NOTIFY_OK; |
4343 | } | 4355 | } |
4344 | 4356 | ||
@@ -4355,7 +4367,7 @@ static int trace_die_handler(struct notifier_block *self, | |||
4355 | switch (val) { | 4367 | switch (val) { |
4356 | case DIE_OOPS: | 4368 | case DIE_OOPS: |
4357 | if (ftrace_dump_on_oops) | 4369 | if (ftrace_dump_on_oops) |
4358 | ftrace_dump(); | 4370 | ftrace_dump(ftrace_dump_on_oops); |
4359 | break; | 4371 | break; |
4360 | default: | 4372 | default: |
4361 | break; | 4373 | break; |
@@ -4396,7 +4408,8 @@ trace_printk_seq(struct trace_seq *s) | |||
4396 | trace_seq_init(s); | 4408 | trace_seq_init(s); |
4397 | } | 4409 | } |
4398 | 4410 | ||
4399 | static void __ftrace_dump(bool disable_tracing) | 4411 | static void |
4412 | __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | ||
4400 | { | 4413 | { |
4401 | static arch_spinlock_t ftrace_dump_lock = | 4414 | static arch_spinlock_t ftrace_dump_lock = |
4402 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 4415 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
@@ -4429,12 +4442,25 @@ static void __ftrace_dump(bool disable_tracing) | |||
4429 | /* don't look at user memory in panic mode */ | 4442 | /* don't look at user memory in panic mode */ |
4430 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; | 4443 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; |
4431 | 4444 | ||
4432 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); | ||
4433 | |||
4434 | /* Simulate the iterator */ | 4445 | /* Simulate the iterator */ |
4435 | iter.tr = &global_trace; | 4446 | iter.tr = &global_trace; |
4436 | iter.trace = current_trace; | 4447 | iter.trace = current_trace; |
4437 | iter.cpu_file = TRACE_PIPE_ALL_CPU; | 4448 | |
4449 | switch (oops_dump_mode) { | ||
4450 | case DUMP_ALL: | ||
4451 | iter.cpu_file = TRACE_PIPE_ALL_CPU; | ||
4452 | break; | ||
4453 | case DUMP_ORIG: | ||
4454 | iter.cpu_file = raw_smp_processor_id(); | ||
4455 | break; | ||
4456 | case DUMP_NONE: | ||
4457 | goto out_enable; | ||
4458 | default: | ||
4459 | printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); | ||
4460 | iter.cpu_file = TRACE_PIPE_ALL_CPU; | ||
4461 | } | ||
4462 | |||
4463 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); | ||
4438 | 4464 | ||
4439 | /* | 4465 | /* |
4440 | * We need to stop all tracing on all CPUS to read the | 4466 | * We need to stop all tracing on all CPUS to read the |
@@ -4473,6 +4499,7 @@ static void __ftrace_dump(bool disable_tracing) | |||
4473 | else | 4499 | else |
4474 | printk(KERN_TRACE "---------------------------------\n"); | 4500 | printk(KERN_TRACE "---------------------------------\n"); |
4475 | 4501 | ||
4502 | out_enable: | ||
4476 | /* Re-enable tracing if requested */ | 4503 | /* Re-enable tracing if requested */ |
4477 | if (!disable_tracing) { | 4504 | if (!disable_tracing) { |
4478 | trace_flags |= old_userobj; | 4505 | trace_flags |= old_userobj; |
@@ -4489,9 +4516,9 @@ static void __ftrace_dump(bool disable_tracing) | |||
4489 | } | 4516 | } |
4490 | 4517 | ||
4491 | /* By default: disable tracing after the dump */ | 4518 | /* By default: disable tracing after the dump */ |
4492 | void ftrace_dump(void) | 4519 | void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) |
4493 | { | 4520 | { |
4494 | __ftrace_dump(true); | 4521 | __ftrace_dump(true, oops_dump_mode); |
4495 | } | 4522 | } |
4496 | 4523 | ||
4497 | __init static int tracer_alloc_buffers(void) | 4524 | __init static int tracer_alloc_buffers(void) |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 9398034f814a..6a9d36ddfcf2 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -256,7 +256,8 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
256 | /* Maximum number of functions to trace before diagnosing a hang */ | 256 | /* Maximum number of functions to trace before diagnosing a hang */ |
257 | #define GRAPH_MAX_FUNC_TEST 100000000 | 257 | #define GRAPH_MAX_FUNC_TEST 100000000 |
258 | 258 | ||
259 | static void __ftrace_dump(bool disable_tracing); | 259 | static void |
260 | __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode); | ||
260 | static unsigned int graph_hang_thresh; | 261 | static unsigned int graph_hang_thresh; |
261 | 262 | ||
262 | /* Wrap the real function entry probe to avoid possible hanging */ | 263 | /* Wrap the real function entry probe to avoid possible hanging */ |
@@ -267,7 +268,7 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) | |||
267 | ftrace_graph_stop(); | 268 | ftrace_graph_stop(); |
268 | printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); | 269 | printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); |
269 | if (ftrace_dump_on_oops) | 270 | if (ftrace_dump_on_oops) |
270 | __ftrace_dump(false); | 271 | __ftrace_dump(false, DUMP_ALL); |
271 | return 0; | 272 | return 0; |
272 | } | 273 | } |
273 | 274 | ||