diff options
author | Jason Wessel <jason.wessel@windriver.com> | 2010-08-05 10:22:23 -0400 |
---|---|---|
committer | Jason Wessel <jason.wessel@windriver.com> | 2010-08-05 10:22:23 -0400 |
commit | 955b61e597984745fb7d34c75708f6503b6aaeab (patch) | |
tree | c928ca54a8231b0432e729d8b675e473a5db9104 /kernel | |
parent | 3f0a55e3579a500ce9f5cdab70a5741f99769118 (diff) |
ftrace,kdb: Extend kdb to be able to dump the ftrace buffer
Add in a helper function to allow the kdb shell to dump the ftrace
buffer.
Modify trace.c to expose the capability to iterate over the ftrace
buffer in a read only capacity.
Signed-off-by: Jason Wessel <jason.wessel@windriver.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
CC: Frederic Weisbecker <fweisbec@gmail.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/Makefile | 3 | ||||
-rw-r--r-- | kernel/trace/trace.c | 43 | ||||
-rw-r--r-- | kernel/trace/trace.h | 19 | ||||
-rw-r--r-- | kernel/trace/trace_kdb.c | 119 |
4 files changed, 163 insertions, 21 deletions
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index ffb1a5b0550e..4215530b490b 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -57,5 +57,8 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o | |||
57 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o | 57 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o |
58 | obj-$(CONFIG_KSYM_TRACER) += trace_ksym.o | 58 | obj-$(CONFIG_KSYM_TRACER) += trace_ksym.o |
59 | obj-$(CONFIG_EVENT_TRACING) += power-traces.o | 59 | obj-$(CONFIG_EVENT_TRACING) += power-traces.o |
60 | ifeq ($(CONFIG_TRACING),y) | ||
61 | obj-$(CONFIG_KGDB_KDB) += trace_kdb.o | ||
62 | endif | ||
60 | 63 | ||
61 | libftrace-y := ftrace.o | 64 | libftrace-y := ftrace.o |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 086d36316805..d6736b93dc2a 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -101,10 +101,7 @@ static inline void ftrace_enable_cpu(void) | |||
101 | preempt_enable(); | 101 | preempt_enable(); |
102 | } | 102 | } |
103 | 103 | ||
104 | static cpumask_var_t __read_mostly tracing_buffer_mask; | 104 | cpumask_var_t __read_mostly tracing_buffer_mask; |
105 | |||
106 | #define for_each_tracing_cpu(cpu) \ | ||
107 | for_each_cpu(cpu, tracing_buffer_mask) | ||
108 | 105 | ||
109 | /* | 106 | /* |
110 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops | 107 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops |
@@ -1539,11 +1536,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
1539 | } | 1536 | } |
1540 | EXPORT_SYMBOL_GPL(trace_vprintk); | 1537 | EXPORT_SYMBOL_GPL(trace_vprintk); |
1541 | 1538 | ||
1542 | enum trace_file_type { | ||
1543 | TRACE_FILE_LAT_FMT = 1, | ||
1544 | TRACE_FILE_ANNOTATE = 2, | ||
1545 | }; | ||
1546 | |||
1547 | static void trace_iterator_increment(struct trace_iterator *iter) | 1539 | static void trace_iterator_increment(struct trace_iterator *iter) |
1548 | { | 1540 | { |
1549 | /* Don't allow ftrace to trace into the ring buffers */ | 1541 | /* Don't allow ftrace to trace into the ring buffers */ |
@@ -1641,7 +1633,7 @@ struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | |||
1641 | } | 1633 | } |
1642 | 1634 | ||
1643 | /* Find the next real entry, and increment the iterator to the next entry */ | 1635 | /* Find the next real entry, and increment the iterator to the next entry */ |
1644 | static void *find_next_entry_inc(struct trace_iterator *iter) | 1636 | void *trace_find_next_entry_inc(struct trace_iterator *iter) |
1645 | { | 1637 | { |
1646 | iter->ent = __find_next_entry(iter, &iter->cpu, | 1638 | iter->ent = __find_next_entry(iter, &iter->cpu, |
1647 | &iter->lost_events, &iter->ts); | 1639 | &iter->lost_events, &iter->ts); |
@@ -1676,19 +1668,19 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) | |||
1676 | return NULL; | 1668 | return NULL; |
1677 | 1669 | ||
1678 | if (iter->idx < 0) | 1670 | if (iter->idx < 0) |
1679 | ent = find_next_entry_inc(iter); | 1671 | ent = trace_find_next_entry_inc(iter); |
1680 | else | 1672 | else |
1681 | ent = iter; | 1673 | ent = iter; |
1682 | 1674 | ||
1683 | while (ent && iter->idx < i) | 1675 | while (ent && iter->idx < i) |
1684 | ent = find_next_entry_inc(iter); | 1676 | ent = trace_find_next_entry_inc(iter); |
1685 | 1677 | ||
1686 | iter->pos = *pos; | 1678 | iter->pos = *pos; |
1687 | 1679 | ||
1688 | return ent; | 1680 | return ent; |
1689 | } | 1681 | } |
1690 | 1682 | ||
1691 | static void tracing_iter_reset(struct trace_iterator *iter, int cpu) | 1683 | void tracing_iter_reset(struct trace_iterator *iter, int cpu) |
1692 | { | 1684 | { |
1693 | struct trace_array *tr = iter->tr; | 1685 | struct trace_array *tr = iter->tr; |
1694 | struct ring_buffer_event *event; | 1686 | struct ring_buffer_event *event; |
@@ -2049,7 +2041,7 @@ int trace_empty(struct trace_iterator *iter) | |||
2049 | } | 2041 | } |
2050 | 2042 | ||
2051 | /* Called with trace_event_read_lock() held. */ | 2043 | /* Called with trace_event_read_lock() held. */ |
2052 | static enum print_line_t print_trace_line(struct trace_iterator *iter) | 2044 | enum print_line_t print_trace_line(struct trace_iterator *iter) |
2053 | { | 2045 | { |
2054 | enum print_line_t ret; | 2046 | enum print_line_t ret; |
2055 | 2047 | ||
@@ -3211,7 +3203,7 @@ waitagain: | |||
3211 | 3203 | ||
3212 | trace_event_read_lock(); | 3204 | trace_event_read_lock(); |
3213 | trace_access_lock(iter->cpu_file); | 3205 | trace_access_lock(iter->cpu_file); |
3214 | while (find_next_entry_inc(iter) != NULL) { | 3206 | while (trace_find_next_entry_inc(iter) != NULL) { |
3215 | enum print_line_t ret; | 3207 | enum print_line_t ret; |
3216 | int len = iter->seq.len; | 3208 | int len = iter->seq.len; |
3217 | 3209 | ||
@@ -3294,7 +3286,7 @@ tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) | |||
3294 | if (ret != TRACE_TYPE_NO_CONSUME) | 3286 | if (ret != TRACE_TYPE_NO_CONSUME) |
3295 | trace_consume(iter); | 3287 | trace_consume(iter); |
3296 | rem -= count; | 3288 | rem -= count; |
3297 | if (!find_next_entry_inc(iter)) { | 3289 | if (!trace_find_next_entry_inc(iter)) { |
3298 | rem = 0; | 3290 | rem = 0; |
3299 | iter->ent = NULL; | 3291 | iter->ent = NULL; |
3300 | break; | 3292 | break; |
@@ -3350,7 +3342,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
3350 | if (ret <= 0) | 3342 | if (ret <= 0) |
3351 | goto out_err; | 3343 | goto out_err; |
3352 | 3344 | ||
3353 | if (!iter->ent && !find_next_entry_inc(iter)) { | 3345 | if (!iter->ent && !trace_find_next_entry_inc(iter)) { |
3354 | ret = -EFAULT; | 3346 | ret = -EFAULT; |
3355 | goto out_err; | 3347 | goto out_err; |
3356 | } | 3348 | } |
@@ -4414,7 +4406,7 @@ static struct notifier_block trace_die_notifier = { | |||
4414 | */ | 4406 | */ |
4415 | #define KERN_TRACE KERN_EMERG | 4407 | #define KERN_TRACE KERN_EMERG |
4416 | 4408 | ||
4417 | static void | 4409 | void |
4418 | trace_printk_seq(struct trace_seq *s) | 4410 | trace_printk_seq(struct trace_seq *s) |
4419 | { | 4411 | { |
4420 | /* Probably should print a warning here. */ | 4412 | /* Probably should print a warning here. */ |
@@ -4429,6 +4421,13 @@ trace_printk_seq(struct trace_seq *s) | |||
4429 | trace_seq_init(s); | 4421 | trace_seq_init(s); |
4430 | } | 4422 | } |
4431 | 4423 | ||
4424 | void trace_init_global_iter(struct trace_iterator *iter) | ||
4425 | { | ||
4426 | iter->tr = &global_trace; | ||
4427 | iter->trace = current_trace; | ||
4428 | iter->cpu_file = TRACE_PIPE_ALL_CPU; | ||
4429 | } | ||
4430 | |||
4432 | static void | 4431 | static void |
4433 | __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | 4432 | __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) |
4434 | { | 4433 | { |
@@ -4454,8 +4453,10 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | |||
4454 | if (disable_tracing) | 4453 | if (disable_tracing) |
4455 | ftrace_kill(); | 4454 | ftrace_kill(); |
4456 | 4455 | ||
4456 | trace_init_global_iter(&iter); | ||
4457 | |||
4457 | for_each_tracing_cpu(cpu) { | 4458 | for_each_tracing_cpu(cpu) { |
4458 | atomic_inc(&global_trace.data[cpu]->disabled); | 4459 | atomic_inc(&iter.tr->data[cpu]->disabled); |
4459 | } | 4460 | } |
4460 | 4461 | ||
4461 | old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; | 4462 | old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; |
@@ -4504,7 +4505,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | |||
4504 | iter.iter_flags |= TRACE_FILE_LAT_FMT; | 4505 | iter.iter_flags |= TRACE_FILE_LAT_FMT; |
4505 | iter.pos = -1; | 4506 | iter.pos = -1; |
4506 | 4507 | ||
4507 | if (find_next_entry_inc(&iter) != NULL) { | 4508 | if (trace_find_next_entry_inc(&iter) != NULL) { |
4508 | int ret; | 4509 | int ret; |
4509 | 4510 | ||
4510 | ret = print_trace_line(&iter); | 4511 | ret = print_trace_line(&iter); |
@@ -4526,7 +4527,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | |||
4526 | trace_flags |= old_userobj; | 4527 | trace_flags |= old_userobj; |
4527 | 4528 | ||
4528 | for_each_tracing_cpu(cpu) { | 4529 | for_each_tracing_cpu(cpu) { |
4529 | atomic_dec(&global_trace.data[cpu]->disabled); | 4530 | atomic_dec(&iter.tr->data[cpu]->disabled); |
4530 | } | 4531 | } |
4531 | tracing_on(); | 4532 | tracing_on(); |
4532 | } | 4533 | } |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 2cd96399463f..0605fc00c176 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -338,6 +338,14 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, | |||
338 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | 338 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
339 | int *ent_cpu, u64 *ent_ts); | 339 | int *ent_cpu, u64 *ent_ts); |
340 | 340 | ||
341 | int trace_empty(struct trace_iterator *iter); | ||
342 | |||
343 | void *trace_find_next_entry_inc(struct trace_iterator *iter); | ||
344 | |||
345 | void trace_init_global_iter(struct trace_iterator *iter); | ||
346 | |||
347 | void tracing_iter_reset(struct trace_iterator *iter, int cpu); | ||
348 | |||
341 | void default_wait_pipe(struct trace_iterator *iter); | 349 | void default_wait_pipe(struct trace_iterator *iter); |
342 | void poll_wait_pipe(struct trace_iterator *iter); | 350 | void poll_wait_pipe(struct trace_iterator *iter); |
343 | 351 | ||
@@ -380,6 +388,15 @@ void tracing_start_sched_switch_record(void); | |||
380 | int register_tracer(struct tracer *type); | 388 | int register_tracer(struct tracer *type); |
381 | void unregister_tracer(struct tracer *type); | 389 | void unregister_tracer(struct tracer *type); |
382 | int is_tracing_stopped(void); | 390 | int is_tracing_stopped(void); |
391 | enum trace_file_type { | ||
392 | TRACE_FILE_LAT_FMT = 1, | ||
393 | TRACE_FILE_ANNOTATE = 2, | ||
394 | }; | ||
395 | |||
396 | extern cpumask_var_t __read_mostly tracing_buffer_mask; | ||
397 | |||
398 | #define for_each_tracing_cpu(cpu) \ | ||
399 | for_each_cpu(cpu, tracing_buffer_mask) | ||
383 | 400 | ||
384 | extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr); | 401 | extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr); |
385 | 402 | ||
@@ -471,6 +488,8 @@ trace_array_vprintk(struct trace_array *tr, | |||
471 | unsigned long ip, const char *fmt, va_list args); | 488 | unsigned long ip, const char *fmt, va_list args); |
472 | int trace_array_printk(struct trace_array *tr, | 489 | int trace_array_printk(struct trace_array *tr, |
473 | unsigned long ip, const char *fmt, ...); | 490 | unsigned long ip, const char *fmt, ...); |
491 | void trace_printk_seq(struct trace_seq *s); | ||
492 | enum print_line_t print_trace_line(struct trace_iterator *iter); | ||
474 | 493 | ||
475 | extern unsigned long trace_flags; | 494 | extern unsigned long trace_flags; |
476 | 495 | ||
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c new file mode 100644 index 000000000000..44cbda25b0a5 --- /dev/null +++ b/kernel/trace/trace_kdb.c | |||
@@ -0,0 +1,119 @@ | |||
1 | /* | ||
2 | * kdb helper for dumping the ftrace buffer | ||
3 | * | ||
4 | * Copyright (C) 2010 Jason Wessel <jason.wessel@windriver.com> | ||
5 | * | ||
6 | * ftrace_dump_buf based on ftrace_dump: | ||
7 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | ||
8 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | ||
9 | * | ||
10 | */ | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/kgdb.h> | ||
13 | #include <linux/kdb.h> | ||
14 | #include <linux/ftrace.h> | ||
15 | |||
16 | #include "../debug/kdb/kdb_private.h" | ||
17 | #include "trace.h" | ||
18 | #include "trace_output.h" | ||
19 | |||
20 | static void ftrace_dump_buf(int skip_lines) | ||
21 | { | ||
22 | /* use static because iter can be a bit big for the stack */ | ||
23 | static struct trace_iterator iter; | ||
24 | unsigned int old_userobj; | ||
25 | int cnt = 0, cpu; | ||
26 | |||
27 | trace_init_global_iter(&iter); | ||
28 | |||
29 | for_each_tracing_cpu(cpu) { | ||
30 | atomic_inc(&iter.tr->data[cpu]->disabled); | ||
31 | } | ||
32 | |||
33 | old_userobj = trace_flags; | ||
34 | |||
35 | /* don't look at user memory in panic mode */ | ||
36 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; | ||
37 | |||
38 | kdb_printf("Dumping ftrace buffer:\n"); | ||
39 | |||
40 | /* reset all but tr, trace, and overruns */ | ||
41 | memset(&iter.seq, 0, | ||
42 | sizeof(struct trace_iterator) - | ||
43 | offsetof(struct trace_iterator, seq)); | ||
44 | iter.iter_flags |= TRACE_FILE_LAT_FMT; | ||
45 | iter.pos = -1; | ||
46 | |||
47 | for_each_tracing_cpu(cpu) { | ||
48 | iter.buffer_iter[cpu] = | ||
49 | ring_buffer_read_prepare(iter.tr->buffer, cpu); | ||
50 | ring_buffer_read_start(iter.buffer_iter[cpu]); | ||
51 | tracing_iter_reset(&iter, cpu); | ||
52 | } | ||
53 | |||
54 | if (!trace_empty(&iter)) | ||
55 | trace_find_next_entry_inc(&iter); | ||
56 | while (!trace_empty(&iter)) { | ||
57 | if (!cnt) | ||
58 | kdb_printf("---------------------------------\n"); | ||
59 | cnt++; | ||
60 | |||
61 | if (trace_find_next_entry_inc(&iter) != NULL && !skip_lines) | ||
62 | print_trace_line(&iter); | ||
63 | if (!skip_lines) | ||
64 | trace_printk_seq(&iter.seq); | ||
65 | else | ||
66 | skip_lines--; | ||
67 | if (KDB_FLAG(CMD_INTERRUPT)) | ||
68 | goto out; | ||
69 | } | ||
70 | |||
71 | if (!cnt) | ||
72 | kdb_printf(" (ftrace buffer empty)\n"); | ||
73 | else | ||
74 | kdb_printf("---------------------------------\n"); | ||
75 | |||
76 | out: | ||
77 | trace_flags = old_userobj; | ||
78 | |||
79 | for_each_tracing_cpu(cpu) { | ||
80 | atomic_dec(&iter.tr->data[cpu]->disabled); | ||
81 | } | ||
82 | |||
83 | for_each_tracing_cpu(cpu) | ||
84 | if (iter.buffer_iter[cpu]) | ||
85 | ring_buffer_read_finish(iter.buffer_iter[cpu]); | ||
86 | } | ||
87 | |||
88 | /* | ||
89 | * kdb_ftdump - Dump the ftrace log buffer | ||
90 | */ | ||
91 | static int kdb_ftdump(int argc, const char **argv) | ||
92 | { | ||
93 | int skip_lines = 0; | ||
94 | char *cp; | ||
95 | |||
96 | if (argc > 1) | ||
97 | return KDB_ARGCOUNT; | ||
98 | |||
99 | if (argc) { | ||
100 | skip_lines = simple_strtol(argv[1], &cp, 0); | ||
101 | if (*cp) | ||
102 | skip_lines = 0; | ||
103 | } | ||
104 | |||
105 | kdb_trap_printk++; | ||
106 | ftrace_dump_buf(skip_lines); | ||
107 | kdb_trap_printk--; | ||
108 | |||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | static __init int kdb_ftrace_register(void) | ||
113 | { | ||
114 | kdb_register_repeat("ftdump", kdb_ftdump, "", "Dump ftrace log", | ||
115 | 0, KDB_REPEAT_NONE); | ||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | late_initcall(kdb_ftrace_register); | ||