aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/Kconfig9
-rw-r--r--kernel/trace/Makefile1
-rw-r--r--kernel/trace/trace_stack.c254
3 files changed, 264 insertions, 0 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 14d9505178ca..2a22e46390d3 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -106,6 +106,15 @@ config CONTEXT_SWITCH_TRACER
106 This tracer gets called from the context switch and records 106 This tracer gets called from the context switch and records
107 all switching of tasks. 107 all switching of tasks.
108 108
109config STACK_TRACER
110 bool "Trace max stack"
111 depends on HAVE_FTRACE
112 select FTRACE
113 select STACKTRACE
114 help
115 This tracer records the max stack of the kernel, and displays
116 it in debugfs/tracing/stack_trace
117
109config DYNAMIC_FTRACE 118config DYNAMIC_FTRACE
110 bool "enable/disable ftrace tracepoints dynamically" 119 bool "enable/disable ftrace tracepoints dynamically"
111 depends on FTRACE 120 depends on FTRACE
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 71d17de17288..58ec61c44bd6 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_FTRACE) += trace_functions.o
19obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o 19obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
20obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o 20obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
21obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o 21obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
22obj-$(CONFIG_STACK_TRACER) += trace_stack.o
22obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o 23obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
23 24
24libftrace-y := ftrace.o 25libftrace-y := ftrace.o
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
new file mode 100644
index 000000000000..4d1e522e3fe8
--- /dev/null
+++ b/kernel/trace/trace_stack.c
@@ -0,0 +1,254 @@
1/*
2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3 *
4 */
5#include <linux/stacktrace.h>
6#include <linux/kallsyms.h>
7#include <linux/seq_file.h>
8#include <linux/spinlock.h>
9#include <linux/uaccess.h>
10#include <linux/debugfs.h>
11#include <linux/ftrace.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/fs.h>
15#include "trace.h"
16
17#define STACK_TRACE_ENTRIES 500
18
19static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES] =
20 { [0 ... (STACK_TRACE_ENTRIES-1)] = ULONG_MAX };
21static struct stack_trace max_stack_trace = {
22 .max_entries = STACK_TRACE_ENTRIES,
23 .entries = stack_dump_trace,
24};
25
26static unsigned long max_stack_size;
27static raw_spinlock_t max_stack_lock =
28 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
29
30static int stack_trace_disabled __read_mostly;
31static DEFINE_PER_CPU(int, trace_active);
32
33static inline void check_stack(void)
34{
35 unsigned long this_size;
36 unsigned long flags;
37
38 this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
39 this_size = THREAD_SIZE - this_size;
40
41 if (this_size <= max_stack_size)
42 return;
43
44 raw_local_irq_save(flags);
45 __raw_spin_lock(&max_stack_lock);
46
47 /* a race could have already updated it */
48 if (this_size <= max_stack_size)
49 goto out;
50
51 max_stack_size = this_size;
52
53 max_stack_trace.nr_entries = 0;
54 max_stack_trace.skip = 1;
55
56 save_stack_trace(&max_stack_trace);
57
58 out:
59 __raw_spin_unlock(&max_stack_lock);
60 raw_local_irq_restore(flags);
61}
62
63static void
64stack_trace_call(unsigned long ip, unsigned long parent_ip)
65{
66 int cpu, resched;
67
68 if (unlikely(!ftrace_enabled || stack_trace_disabled))
69 return;
70
71 resched = need_resched();
72 preempt_disable_notrace();
73
74 cpu = raw_smp_processor_id();
75 /* no atomic needed, we only modify this variable by this cpu */
76 if (per_cpu(trace_active, cpu)++ != 0)
77 goto out;
78
79 check_stack();
80
81 out:
82 per_cpu(trace_active, cpu)--;
83 /* prevent recursion in schedule */
84 if (resched)
85 preempt_enable_no_resched_notrace();
86 else
87 preempt_enable_notrace();
88}
89
90static struct ftrace_ops trace_ops __read_mostly =
91{
92 .func = stack_trace_call,
93};
94
95static ssize_t
96stack_max_size_read(struct file *filp, char __user *ubuf,
97 size_t count, loff_t *ppos)
98{
99 unsigned long *ptr = filp->private_data;
100 char buf[64];
101 int r;
102
103 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
104 if (r > sizeof(buf))
105 r = sizeof(buf);
106 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
107}
108
109static ssize_t
110stack_max_size_write(struct file *filp, const char __user *ubuf,
111 size_t count, loff_t *ppos)
112{
113 long *ptr = filp->private_data;
114 unsigned long val, flags;
115 char buf[64];
116 int ret;
117
118 if (count >= sizeof(buf))
119 return -EINVAL;
120
121 if (copy_from_user(&buf, ubuf, count))
122 return -EFAULT;
123
124 buf[count] = 0;
125
126 ret = strict_strtoul(buf, 10, &val);
127 if (ret < 0)
128 return ret;
129
130 raw_local_irq_save(flags);
131 __raw_spin_lock(&max_stack_lock);
132 *ptr = val;
133 __raw_spin_unlock(&max_stack_lock);
134 raw_local_irq_restore(flags);
135
136 return count;
137}
138
139static struct file_operations stack_max_size_fops = {
140 .open = tracing_open_generic,
141 .read = stack_max_size_read,
142 .write = stack_max_size_write,
143};
144
145static void *
146t_next(struct seq_file *m, void *v, loff_t *pos)
147{
148 unsigned long *t = m->private;
149
150 (*pos)++;
151
152 if (!t || *t == ULONG_MAX)
153 return NULL;
154
155 t++;
156 m->private = t;
157
158 return t;
159}
160
161static void *t_start(struct seq_file *m, loff_t *pos)
162{
163 unsigned long *t = m->private;
164 loff_t l = 0;
165
166 local_irq_disable();
167 __raw_spin_lock(&max_stack_lock);
168
169 for (; t && l < *pos; t = t_next(m, t, &l))
170 ;
171
172 return t;
173}
174
175static void t_stop(struct seq_file *m, void *p)
176{
177 __raw_spin_unlock(&max_stack_lock);
178 local_irq_enable();
179}
180
181static int trace_lookup_stack(struct seq_file *m, unsigned long addr)
182{
183#ifdef CONFIG_KALLSYMS
184 char str[KSYM_SYMBOL_LEN];
185
186 sprint_symbol(str, addr);
187
188 return seq_printf(m, "[<%p>] %s\n", (void*)addr, str);
189#else
190 return seq_printf(m, "%p\n", (void*)addr);
191#endif
192}
193
194static int t_show(struct seq_file *m, void *v)
195{
196 unsigned long *t = v;
197
198 if (!t || *t == ULONG_MAX)
199 return 0;
200
201 trace_lookup_stack(m, *t);
202
203 return 0;
204}
205
206static struct seq_operations stack_trace_seq_ops = {
207 .start = t_start,
208 .next = t_next,
209 .stop = t_stop,
210 .show = t_show,
211};
212
213static int stack_trace_open(struct inode *inode, struct file *file)
214{
215 int ret;
216
217 ret = seq_open(file, &stack_trace_seq_ops);
218 if (!ret) {
219 struct seq_file *m = file->private_data;
220 m->private = stack_dump_trace;
221 }
222
223 return ret;
224}
225
226static struct file_operations stack_trace_fops = {
227 .open = stack_trace_open,
228 .read = seq_read,
229 .llseek = seq_lseek,
230};
231
232static __init int stack_trace_init(void)
233{
234 struct dentry *d_tracer;
235 struct dentry *entry;
236
237 d_tracer = tracing_init_dentry();
238
239 entry = debugfs_create_file("stack_max_size", 0644, d_tracer,
240 &max_stack_size, &stack_max_size_fops);
241 if (!entry)
242 pr_warning("Could not create debugfs 'stack_max_size' entry\n");
243
244 entry = debugfs_create_file("stack_trace", 0444, d_tracer,
245 NULL, &stack_trace_fops);
246 if (!entry)
247 pr_warning("Could not create debugfs 'stack_trace' entry\n");
248
249 register_ftrace_function(&trace_ops);
250
251 return 0;
252}
253
254device_initcall(stack_trace_init);