aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c99
1 files changed, 70 insertions, 29 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 60f3b6289731..756d7283318b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -117,9 +117,12 @@ static cpumask_var_t __read_mostly tracing_buffer_mask;
117 * 117 *
118 * It is default off, but you can enable it with either specifying 118 * It is default off, but you can enable it with either specifying
119 * "ftrace_dump_on_oops" in the kernel command line, or setting 119 * "ftrace_dump_on_oops" in the kernel command line, or setting
120 * /proc/sys/kernel/ftrace_dump_on_oops to true. 120 * /proc/sys/kernel/ftrace_dump_on_oops
121 * Set 1 if you want to dump buffers of all CPUs
122 * Set 2 if you want to dump the buffer of the CPU that triggered oops
121 */ 123 */
122int ftrace_dump_on_oops; 124
125enum ftrace_dump_mode ftrace_dump_on_oops;
123 126
124static int tracing_set_tracer(const char *buf); 127static int tracing_set_tracer(const char *buf);
125 128
@@ -139,8 +142,17 @@ __setup("ftrace=", set_cmdline_ftrace);
139 142
140static int __init set_ftrace_dump_on_oops(char *str) 143static int __init set_ftrace_dump_on_oops(char *str)
141{ 144{
142 ftrace_dump_on_oops = 1; 145 if (*str++ != '=' || !*str) {
143 return 1; 146 ftrace_dump_on_oops = DUMP_ALL;
147 return 1;
148 }
149
150 if (!strcmp("orig_cpu", str)) {
151 ftrace_dump_on_oops = DUMP_ORIG;
152 return 1;
153 }
154
155 return 0;
144} 156}
145__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); 157__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
146 158
@@ -1571,7 +1583,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
1571{ 1583{
1572 struct ring_buffer *buffer = iter->tr->buffer; 1584 struct ring_buffer *buffer = iter->tr->buffer;
1573 struct trace_entry *ent, *next = NULL; 1585 struct trace_entry *ent, *next = NULL;
1574 unsigned long lost_events, next_lost = 0; 1586 unsigned long lost_events = 0, next_lost = 0;
1575 int cpu_file = iter->cpu_file; 1587 int cpu_file = iter->cpu_file;
1576 u64 next_ts = 0, ts; 1588 u64 next_ts = 0, ts;
1577 int next_cpu = -1; 1589 int next_cpu = -1;
@@ -1796,7 +1808,7 @@ static void print_func_help_header(struct seq_file *m)
1796} 1808}
1797 1809
1798 1810
1799static void 1811void
1800print_trace_header(struct seq_file *m, struct trace_iterator *iter) 1812print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1801{ 1813{
1802 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 1814 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
@@ -2005,7 +2017,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2005 return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED; 2017 return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED;
2006} 2018}
2007 2019
2008static int trace_empty(struct trace_iterator *iter) 2020int trace_empty(struct trace_iterator *iter)
2009{ 2021{
2010 int cpu; 2022 int cpu;
2011 2023
@@ -2072,6 +2084,23 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
2072 return print_trace_fmt(iter); 2084 return print_trace_fmt(iter);
2073} 2085}
2074 2086
2087void trace_default_header(struct seq_file *m)
2088{
2089 struct trace_iterator *iter = m->private;
2090
2091 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2092 /* print nothing if the buffers are empty */
2093 if (trace_empty(iter))
2094 return;
2095 print_trace_header(m, iter);
2096 if (!(trace_flags & TRACE_ITER_VERBOSE))
2097 print_lat_help_header(m);
2098 } else {
2099 if (!(trace_flags & TRACE_ITER_VERBOSE))
2100 print_func_help_header(m);
2101 }
2102}
2103
2075static int s_show(struct seq_file *m, void *v) 2104static int s_show(struct seq_file *m, void *v)
2076{ 2105{
2077 struct trace_iterator *iter = v; 2106 struct trace_iterator *iter = v;
@@ -2084,17 +2113,9 @@ static int s_show(struct seq_file *m, void *v)
2084 } 2113 }
2085 if (iter->trace && iter->trace->print_header) 2114 if (iter->trace && iter->trace->print_header)
2086 iter->trace->print_header(m); 2115 iter->trace->print_header(m);
2087 else if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 2116 else
2088 /* print nothing if the buffers are empty */ 2117 trace_default_header(m);
2089 if (trace_empty(iter)) 2118
2090 return 0;
2091 print_trace_header(m, iter);
2092 if (!(trace_flags & TRACE_ITER_VERBOSE))
2093 print_lat_help_header(m);
2094 } else {
2095 if (!(trace_flags & TRACE_ITER_VERBOSE))
2096 print_func_help_header(m);
2097 }
2098 } else if (iter->leftover) { 2119 } else if (iter->leftover) {
2099 /* 2120 /*
2100 * If we filled the seq_file buffer earlier, we 2121 * If we filled the seq_file buffer earlier, we
@@ -2180,15 +2201,20 @@ __tracing_open(struct inode *inode, struct file *file)
2180 2201
2181 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { 2202 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
2182 for_each_tracing_cpu(cpu) { 2203 for_each_tracing_cpu(cpu) {
2183
2184 iter->buffer_iter[cpu] = 2204 iter->buffer_iter[cpu] =
2185 ring_buffer_read_start(iter->tr->buffer, cpu); 2205 ring_buffer_read_prepare(iter->tr->buffer, cpu);
2206 }
2207 ring_buffer_read_prepare_sync();
2208 for_each_tracing_cpu(cpu) {
2209 ring_buffer_read_start(iter->buffer_iter[cpu]);
2186 tracing_iter_reset(iter, cpu); 2210 tracing_iter_reset(iter, cpu);
2187 } 2211 }
2188 } else { 2212 } else {
2189 cpu = iter->cpu_file; 2213 cpu = iter->cpu_file;
2190 iter->buffer_iter[cpu] = 2214 iter->buffer_iter[cpu] =
2191 ring_buffer_read_start(iter->tr->buffer, cpu); 2215 ring_buffer_read_prepare(iter->tr->buffer, cpu);
2216 ring_buffer_read_prepare_sync();
2217 ring_buffer_read_start(iter->buffer_iter[cpu]);
2192 tracing_iter_reset(iter, cpu); 2218 tracing_iter_reset(iter, cpu);
2193 } 2219 }
2194 2220
@@ -4338,7 +4364,7 @@ static int trace_panic_handler(struct notifier_block *this,
4338 unsigned long event, void *unused) 4364 unsigned long event, void *unused)
4339{ 4365{
4340 if (ftrace_dump_on_oops) 4366 if (ftrace_dump_on_oops)
4341 ftrace_dump(); 4367 ftrace_dump(ftrace_dump_on_oops);
4342 return NOTIFY_OK; 4368 return NOTIFY_OK;
4343} 4369}
4344 4370
@@ -4355,7 +4381,7 @@ static int trace_die_handler(struct notifier_block *self,
4355 switch (val) { 4381 switch (val) {
4356 case DIE_OOPS: 4382 case DIE_OOPS:
4357 if (ftrace_dump_on_oops) 4383 if (ftrace_dump_on_oops)
4358 ftrace_dump(); 4384 ftrace_dump(ftrace_dump_on_oops);
4359 break; 4385 break;
4360 default: 4386 default:
4361 break; 4387 break;
@@ -4396,7 +4422,8 @@ trace_printk_seq(struct trace_seq *s)
4396 trace_seq_init(s); 4422 trace_seq_init(s);
4397} 4423}
4398 4424
4399static void __ftrace_dump(bool disable_tracing) 4425static void
4426__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
4400{ 4427{
4401 static arch_spinlock_t ftrace_dump_lock = 4428 static arch_spinlock_t ftrace_dump_lock =
4402 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 4429 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
@@ -4429,12 +4456,25 @@ static void __ftrace_dump(bool disable_tracing)
4429 /* don't look at user memory in panic mode */ 4456 /* don't look at user memory in panic mode */
4430 trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 4457 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
4431 4458
4432 printk(KERN_TRACE "Dumping ftrace buffer:\n");
4433
4434 /* Simulate the iterator */ 4459 /* Simulate the iterator */
4435 iter.tr = &global_trace; 4460 iter.tr = &global_trace;
4436 iter.trace = current_trace; 4461 iter.trace = current_trace;
4437 iter.cpu_file = TRACE_PIPE_ALL_CPU; 4462
4463 switch (oops_dump_mode) {
4464 case DUMP_ALL:
4465 iter.cpu_file = TRACE_PIPE_ALL_CPU;
4466 break;
4467 case DUMP_ORIG:
4468 iter.cpu_file = raw_smp_processor_id();
4469 break;
4470 case DUMP_NONE:
4471 goto out_enable;
4472 default:
4473 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
4474 iter.cpu_file = TRACE_PIPE_ALL_CPU;
4475 }
4476
4477 printk(KERN_TRACE "Dumping ftrace buffer:\n");
4438 4478
4439 /* 4479 /*
4440 * We need to stop all tracing on all CPUS to read the 4480 * We need to stop all tracing on all CPUS to read the
@@ -4473,6 +4513,7 @@ static void __ftrace_dump(bool disable_tracing)
4473 else 4513 else
4474 printk(KERN_TRACE "---------------------------------\n"); 4514 printk(KERN_TRACE "---------------------------------\n");
4475 4515
4516 out_enable:
4476 /* Re-enable tracing if requested */ 4517 /* Re-enable tracing if requested */
4477 if (!disable_tracing) { 4518 if (!disable_tracing) {
4478 trace_flags |= old_userobj; 4519 trace_flags |= old_userobj;
@@ -4489,9 +4530,9 @@ static void __ftrace_dump(bool disable_tracing)
4489} 4530}
4490 4531
4491/* By default: disable tracing after the dump */ 4532/* By default: disable tracing after the dump */
4492void ftrace_dump(void) 4533void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
4493{ 4534{
4494 __ftrace_dump(true); 4535 __ftrace_dump(true, oops_dump_mode);
4495} 4536}
4496 4537
4497__init static int tracer_alloc_buffers(void) 4538__init static int tracer_alloc_buffers(void)