aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-08-27 23:31:01 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-14 04:36:19 -0400
commite5a81b629ea8feb9e7530cfac35cfb41c45facf3 (patch)
tree71437955371e0913521d4ddb02db974df9fd8d34
parentb3a320417484a6d6b9d28098944df58341353992 (diff)
ftrace: add stack tracer
This is another tracer using the ftrace infrastructure, that examines at each function call the size of the stack. If the stack use is greater than the previous max it is recorded. You can always see (and set) the max stack size seen. By setting it to zero will start the recording again. The backtrace is also available. For example: # cat /debug/tracing/stack_max_size 1856 # cat /debug/tracing/stack_trace [<c027764d>] stack_trace_call+0x8f/0x101 [<c021b966>] ftrace_call+0x5/0x8 [<c02553cc>] clocksource_get_next+0x12/0x48 [<c02542a5>] update_wall_time+0x538/0x6d1 [<c0245913>] do_timer+0x23/0xb0 [<c0257657>] tick_do_update_jiffies64+0xd9/0xf1 [<c02576b9>] tick_sched_timer+0x4a/0xad [<c0250fe6>] __run_hrtimer+0x3e/0x75 [<c02518ed>] hrtimer_interrupt+0xf1/0x154 [<c022c870>] smp_apic_timer_interrupt+0x71/0x84 [<c021b7e9>] apic_timer_interrupt+0x2d/0x34 [<c0238597>] finish_task_switch+0x29/0xa0 [<c05abd13>] schedule+0x765/0x7be [<c05abfca>] schedule_timeout+0x1b/0x90 [<c05ab4d4>] wait_for_common+0xab/0x101 [<c05ab5ac>] wait_for_completion+0x12/0x14 [<c033cfc3>] blk_execute_rq+0x84/0x99 [<c0402470>] scsi_execute+0xc2/0x105 [<c040250a>] scsi_execute_req+0x57/0x7f [<c043afe0>] sr_test_unit_ready+0x3e/0x97 [<c043bbd6>] sr_media_change+0x43/0x205 [<c046b59f>] media_changed+0x48/0x77 [<c046b5ff>] cdrom_media_changed+0x31/0x37 [<c043b091>] sr_block_media_changed+0x16/0x18 [<c02b9e69>] check_disk_change+0x1b/0x63 [<c046f4c3>] cdrom_open+0x7a1/0x806 [<c043b148>] sr_block_open+0x78/0x8d [<c02ba4c0>] do_open+0x90/0x257 [<c02ba869>] blkdev_open+0x2d/0x56 [<c0296a1f>] __dentry_open+0x14d/0x23c [<c0296b32>] nameidata_to_filp+0x24/0x38 [<c02a1c68>] do_filp_open+0x347/0x626 [<c02967ef>] do_sys_open+0x47/0xbc [<c02968b0>] sys_open+0x23/0x2b [<c021aadd>] sysenter_do_call+0x12/0x26 I've tested this on both x86_64 and i386. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/trace/Kconfig9
-rw-r--r--kernel/trace/Makefile1
-rw-r--r--kernel/trace/trace_stack.c254
3 files changed, 264 insertions, 0 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 14d9505178ca..2a22e46390d3 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -106,6 +106,15 @@ config CONTEXT_SWITCH_TRACER
106 This tracer gets called from the context switch and records 106 This tracer gets called from the context switch and records
107 all switching of tasks. 107 all switching of tasks.
108 108
109config STACK_TRACER
110 bool "Trace max stack"
111 depends on HAVE_FTRACE
112 select FTRACE
113 select STACKTRACE
114 help
115 This tracer records the max stack of the kernel, and displays
116 it in debugfs/tracing/stack_trace
117
109config DYNAMIC_FTRACE 118config DYNAMIC_FTRACE
110 bool "enable/disable ftrace tracepoints dynamically" 119 bool "enable/disable ftrace tracepoints dynamically"
111 depends on FTRACE 120 depends on FTRACE
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 71d17de17288..58ec61c44bd6 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_FTRACE) += trace_functions.o
19obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o 19obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
20obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o 20obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
21obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o 21obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
22obj-$(CONFIG_STACK_TRACER) += trace_stack.o
22obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o 23obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
23 24
24libftrace-y := ftrace.o 25libftrace-y := ftrace.o
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
new file mode 100644
index 000000000000..4d1e522e3fe8
--- /dev/null
+++ b/kernel/trace/trace_stack.c
@@ -0,0 +1,254 @@
1/*
2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3 *
4 */
5#include <linux/stacktrace.h>
6#include <linux/kallsyms.h>
7#include <linux/seq_file.h>
8#include <linux/spinlock.h>
9#include <linux/uaccess.h>
10#include <linux/debugfs.h>
11#include <linux/ftrace.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/fs.h>
15#include "trace.h"
16
17#define STACK_TRACE_ENTRIES 500
18
19static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES] =
20 { [0 ... (STACK_TRACE_ENTRIES-1)] = ULONG_MAX };
21static struct stack_trace max_stack_trace = {
22 .max_entries = STACK_TRACE_ENTRIES,
23 .entries = stack_dump_trace,
24};
25
26static unsigned long max_stack_size;
27static raw_spinlock_t max_stack_lock =
28 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
29
30static int stack_trace_disabled __read_mostly;
31static DEFINE_PER_CPU(int, trace_active);
32
33static inline void check_stack(void)
34{
35 unsigned long this_size;
36 unsigned long flags;
37
38 this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
39 this_size = THREAD_SIZE - this_size;
40
41 if (this_size <= max_stack_size)
42 return;
43
44 raw_local_irq_save(flags);
45 __raw_spin_lock(&max_stack_lock);
46
47 /* a race could have already updated it */
48 if (this_size <= max_stack_size)
49 goto out;
50
51 max_stack_size = this_size;
52
53 max_stack_trace.nr_entries = 0;
54 max_stack_trace.skip = 1;
55
56 save_stack_trace(&max_stack_trace);
57
58 out:
59 __raw_spin_unlock(&max_stack_lock);
60 raw_local_irq_restore(flags);
61}
62
63static void
64stack_trace_call(unsigned long ip, unsigned long parent_ip)
65{
66 int cpu, resched;
67
68 if (unlikely(!ftrace_enabled || stack_trace_disabled))
69 return;
70
71 resched = need_resched();
72 preempt_disable_notrace();
73
74 cpu = raw_smp_processor_id();
75 /* no atomic needed, we only modify this variable by this cpu */
76 if (per_cpu(trace_active, cpu)++ != 0)
77 goto out;
78
79 check_stack();
80
81 out:
82 per_cpu(trace_active, cpu)--;
83 /* prevent recursion in schedule */
84 if (resched)
85 preempt_enable_no_resched_notrace();
86 else
87 preempt_enable_notrace();
88}
89
90static struct ftrace_ops trace_ops __read_mostly =
91{
92 .func = stack_trace_call,
93};
94
95static ssize_t
96stack_max_size_read(struct file *filp, char __user *ubuf,
97 size_t count, loff_t *ppos)
98{
99 unsigned long *ptr = filp->private_data;
100 char buf[64];
101 int r;
102
103 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
104 if (r > sizeof(buf))
105 r = sizeof(buf);
106 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
107}
108
109static ssize_t
110stack_max_size_write(struct file *filp, const char __user *ubuf,
111 size_t count, loff_t *ppos)
112{
113 long *ptr = filp->private_data;
114 unsigned long val, flags;
115 char buf[64];
116 int ret;
117
118 if (count >= sizeof(buf))
119 return -EINVAL;
120
121 if (copy_from_user(&buf, ubuf, count))
122 return -EFAULT;
123
124 buf[count] = 0;
125
126 ret = strict_strtoul(buf, 10, &val);
127 if (ret < 0)
128 return ret;
129
130 raw_local_irq_save(flags);
131 __raw_spin_lock(&max_stack_lock);
132 *ptr = val;
133 __raw_spin_unlock(&max_stack_lock);
134 raw_local_irq_restore(flags);
135
136 return count;
137}
138
139static struct file_operations stack_max_size_fops = {
140 .open = tracing_open_generic,
141 .read = stack_max_size_read,
142 .write = stack_max_size_write,
143};
144
145static void *
146t_next(struct seq_file *m, void *v, loff_t *pos)
147{
148 unsigned long *t = m->private;
149
150 (*pos)++;
151
152 if (!t || *t == ULONG_MAX)
153 return NULL;
154
155 t++;
156 m->private = t;
157
158 return t;
159}
160
161static void *t_start(struct seq_file *m, loff_t *pos)
162{
163 unsigned long *t = m->private;
164 loff_t l = 0;
165
166 local_irq_disable();
167 __raw_spin_lock(&max_stack_lock);
168
169 for (; t && l < *pos; t = t_next(m, t, &l))
170 ;
171
172 return t;
173}
174
175static void t_stop(struct seq_file *m, void *p)
176{
177 __raw_spin_unlock(&max_stack_lock);
178 local_irq_enable();
179}
180
181static int trace_lookup_stack(struct seq_file *m, unsigned long addr)
182{
183#ifdef CONFIG_KALLSYMS
184 char str[KSYM_SYMBOL_LEN];
185
186 sprint_symbol(str, addr);
187
188 return seq_printf(m, "[<%p>] %s\n", (void*)addr, str);
189#else
190 return seq_printf(m, "%p\n", (void*)addr);
191#endif
192}
193
194static int t_show(struct seq_file *m, void *v)
195{
196 unsigned long *t = v;
197
198 if (!t || *t == ULONG_MAX)
199 return 0;
200
201 trace_lookup_stack(m, *t);
202
203 return 0;
204}
205
206static struct seq_operations stack_trace_seq_ops = {
207 .start = t_start,
208 .next = t_next,
209 .stop = t_stop,
210 .show = t_show,
211};
212
213static int stack_trace_open(struct inode *inode, struct file *file)
214{
215 int ret;
216
217 ret = seq_open(file, &stack_trace_seq_ops);
218 if (!ret) {
219 struct seq_file *m = file->private_data;
220 m->private = stack_dump_trace;
221 }
222
223 return ret;
224}
225
226static struct file_operations stack_trace_fops = {
227 .open = stack_trace_open,
228 .read = seq_read,
229 .llseek = seq_lseek,
230};
231
232static __init int stack_trace_init(void)
233{
234 struct dentry *d_tracer;
235 struct dentry *entry;
236
237 d_tracer = tracing_init_dentry();
238
239 entry = debugfs_create_file("stack_max_size", 0644, d_tracer,
240 &max_stack_size, &stack_max_size_fops);
241 if (!entry)
242 pr_warning("Could not create debugfs 'stack_max_size' entry\n");
243
244 entry = debugfs_create_file("stack_trace", 0444, d_tracer,
245 NULL, &stack_trace_fops);
246 if (!entry)
247 pr_warning("Could not create debugfs 'stack_trace' entry\n");
248
249 register_ftrace_function(&trace_ops);
250
251 return 0;
252}
253
254device_initcall(stack_trace_init);