diff options
Diffstat (limited to 'kernel/trace/trace_stack.c')
-rw-r--r-- | kernel/trace/trace_stack.c | 314 |
1 files changed, 314 insertions, 0 deletions
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c new file mode 100644 index 000000000000..be682b62fe58 --- /dev/null +++ b/kernel/trace/trace_stack.c | |||
@@ -0,0 +1,314 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> | ||
3 | * | ||
4 | */ | ||
5 | #include <linux/stacktrace.h> | ||
6 | #include <linux/kallsyms.h> | ||
7 | #include <linux/seq_file.h> | ||
8 | #include <linux/spinlock.h> | ||
9 | #include <linux/uaccess.h> | ||
10 | #include <linux/debugfs.h> | ||
11 | #include <linux/ftrace.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/fs.h> | ||
15 | #include "trace.h" | ||
16 | |||
17 | #define STACK_TRACE_ENTRIES 500 | ||
18 | |||
19 | static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = | ||
20 | { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX }; | ||
21 | static unsigned stack_dump_index[STACK_TRACE_ENTRIES]; | ||
22 | |||
23 | static struct stack_trace max_stack_trace = { | ||
24 | .max_entries = STACK_TRACE_ENTRIES, | ||
25 | .entries = stack_dump_trace, | ||
26 | }; | ||
27 | |||
28 | static unsigned long max_stack_size; | ||
29 | static raw_spinlock_t max_stack_lock = | ||
30 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
31 | |||
32 | static int stack_trace_disabled __read_mostly; | ||
33 | static DEFINE_PER_CPU(int, trace_active); | ||
34 | |||
35 | static inline void check_stack(void) | ||
36 | { | ||
37 | unsigned long this_size, flags; | ||
38 | unsigned long *p, *top, *start; | ||
39 | int i; | ||
40 | |||
41 | this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1); | ||
42 | this_size = THREAD_SIZE - this_size; | ||
43 | |||
44 | if (this_size <= max_stack_size) | ||
45 | return; | ||
46 | |||
47 | /* we do not handle interrupt stacks yet */ | ||
48 | if (!object_is_on_stack(&this_size)) | ||
49 | return; | ||
50 | |||
51 | raw_local_irq_save(flags); | ||
52 | __raw_spin_lock(&max_stack_lock); | ||
53 | |||
54 | /* a race could have already updated it */ | ||
55 | if (this_size <= max_stack_size) | ||
56 | goto out; | ||
57 | |||
58 | max_stack_size = this_size; | ||
59 | |||
60 | max_stack_trace.nr_entries = 0; | ||
61 | max_stack_trace.skip = 3; | ||
62 | |||
63 | save_stack_trace(&max_stack_trace); | ||
64 | |||
65 | /* | ||
66 | * Now find where in the stack these are. | ||
67 | */ | ||
68 | i = 0; | ||
69 | start = &this_size; | ||
70 | top = (unsigned long *) | ||
71 | (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); | ||
72 | |||
73 | /* | ||
74 | * Loop through all the entries. One of the entries may | ||
75 | * for some reason be missed on the stack, so we may | ||
76 | * have to account for them. If they are all there, this | ||
77 | * loop will only happen once. This code only takes place | ||
78 | * on a new max, so it is far from a fast path. | ||
79 | */ | ||
80 | while (i < max_stack_trace.nr_entries) { | ||
81 | |||
82 | stack_dump_index[i] = this_size; | ||
83 | p = start; | ||
84 | |||
85 | for (; p < top && i < max_stack_trace.nr_entries; p++) { | ||
86 | if (*p == stack_dump_trace[i]) { | ||
87 | this_size = stack_dump_index[i++] = | ||
88 | (top - p) * sizeof(unsigned long); | ||
89 | /* Start the search from here */ | ||
90 | start = p + 1; | ||
91 | } | ||
92 | } | ||
93 | |||
94 | i++; | ||
95 | } | ||
96 | |||
97 | out: | ||
98 | __raw_spin_unlock(&max_stack_lock); | ||
99 | raw_local_irq_restore(flags); | ||
100 | } | ||
101 | |||
102 | static void | ||
103 | stack_trace_call(unsigned long ip, unsigned long parent_ip) | ||
104 | { | ||
105 | int cpu, resched; | ||
106 | |||
107 | if (unlikely(!ftrace_enabled || stack_trace_disabled)) | ||
108 | return; | ||
109 | |||
110 | resched = need_resched(); | ||
111 | preempt_disable_notrace(); | ||
112 | |||
113 | cpu = raw_smp_processor_id(); | ||
114 | /* no atomic needed, we only modify this variable by this cpu */ | ||
115 | if (per_cpu(trace_active, cpu)++ != 0) | ||
116 | goto out; | ||
117 | |||
118 | check_stack(); | ||
119 | |||
120 | out: | ||
121 | per_cpu(trace_active, cpu)--; | ||
122 | /* prevent recursion in schedule */ | ||
123 | if (resched) | ||
124 | preempt_enable_no_resched_notrace(); | ||
125 | else | ||
126 | preempt_enable_notrace(); | ||
127 | } | ||
128 | |||
129 | static struct ftrace_ops trace_ops __read_mostly = | ||
130 | { | ||
131 | .func = stack_trace_call, | ||
132 | }; | ||
133 | |||
134 | static ssize_t | ||
135 | stack_max_size_read(struct file *filp, char __user *ubuf, | ||
136 | size_t count, loff_t *ppos) | ||
137 | { | ||
138 | unsigned long *ptr = filp->private_data; | ||
139 | char buf[64]; | ||
140 | int r; | ||
141 | |||
142 | r = snprintf(buf, sizeof(buf), "%ld\n", *ptr); | ||
143 | if (r > sizeof(buf)) | ||
144 | r = sizeof(buf); | ||
145 | return simple_read_from_buffer(ubuf, count, ppos, buf, r); | ||
146 | } | ||
147 | |||
148 | static ssize_t | ||
149 | stack_max_size_write(struct file *filp, const char __user *ubuf, | ||
150 | size_t count, loff_t *ppos) | ||
151 | { | ||
152 | long *ptr = filp->private_data; | ||
153 | unsigned long val, flags; | ||
154 | char buf[64]; | ||
155 | int ret; | ||
156 | |||
157 | if (count >= sizeof(buf)) | ||
158 | return -EINVAL; | ||
159 | |||
160 | if (copy_from_user(&buf, ubuf, count)) | ||
161 | return -EFAULT; | ||
162 | |||
163 | buf[count] = 0; | ||
164 | |||
165 | ret = strict_strtoul(buf, 10, &val); | ||
166 | if (ret < 0) | ||
167 | return ret; | ||
168 | |||
169 | raw_local_irq_save(flags); | ||
170 | __raw_spin_lock(&max_stack_lock); | ||
171 | *ptr = val; | ||
172 | __raw_spin_unlock(&max_stack_lock); | ||
173 | raw_local_irq_restore(flags); | ||
174 | |||
175 | return count; | ||
176 | } | ||
177 | |||
178 | static struct file_operations stack_max_size_fops = { | ||
179 | .open = tracing_open_generic, | ||
180 | .read = stack_max_size_read, | ||
181 | .write = stack_max_size_write, | ||
182 | }; | ||
183 | |||
184 | static void * | ||
185 | t_next(struct seq_file *m, void *v, loff_t *pos) | ||
186 | { | ||
187 | long i = (long)m->private; | ||
188 | |||
189 | (*pos)++; | ||
190 | |||
191 | i++; | ||
192 | |||
193 | if (i >= max_stack_trace.nr_entries || | ||
194 | stack_dump_trace[i] == ULONG_MAX) | ||
195 | return NULL; | ||
196 | |||
197 | m->private = (void *)i; | ||
198 | |||
199 | return &m->private; | ||
200 | } | ||
201 | |||
202 | static void *t_start(struct seq_file *m, loff_t *pos) | ||
203 | { | ||
204 | void *t = &m->private; | ||
205 | loff_t l = 0; | ||
206 | |||
207 | local_irq_disable(); | ||
208 | __raw_spin_lock(&max_stack_lock); | ||
209 | |||
210 | for (; t && l < *pos; t = t_next(m, t, &l)) | ||
211 | ; | ||
212 | |||
213 | return t; | ||
214 | } | ||
215 | |||
216 | static void t_stop(struct seq_file *m, void *p) | ||
217 | { | ||
218 | __raw_spin_unlock(&max_stack_lock); | ||
219 | local_irq_enable(); | ||
220 | } | ||
221 | |||
222 | static int trace_lookup_stack(struct seq_file *m, long i) | ||
223 | { | ||
224 | unsigned long addr = stack_dump_trace[i]; | ||
225 | #ifdef CONFIG_KALLSYMS | ||
226 | char str[KSYM_SYMBOL_LEN]; | ||
227 | |||
228 | sprint_symbol(str, addr); | ||
229 | |||
230 | return seq_printf(m, "%s\n", str); | ||
231 | #else | ||
232 | return seq_printf(m, "%p\n", (void*)addr); | ||
233 | #endif | ||
234 | } | ||
235 | |||
236 | static int t_show(struct seq_file *m, void *v) | ||
237 | { | ||
238 | long i = *(long *)v; | ||
239 | int size; | ||
240 | |||
241 | if (i < 0) { | ||
242 | seq_printf(m, " Depth Size Location" | ||
243 | " (%d entries)\n" | ||
244 | " ----- ---- --------\n", | ||
245 | max_stack_trace.nr_entries); | ||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | if (i >= max_stack_trace.nr_entries || | ||
250 | stack_dump_trace[i] == ULONG_MAX) | ||
251 | return 0; | ||
252 | |||
253 | if (i+1 == max_stack_trace.nr_entries || | ||
254 | stack_dump_trace[i+1] == ULONG_MAX) | ||
255 | size = stack_dump_index[i]; | ||
256 | else | ||
257 | size = stack_dump_index[i] - stack_dump_index[i+1]; | ||
258 | |||
259 | seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size); | ||
260 | |||
261 | trace_lookup_stack(m, i); | ||
262 | |||
263 | return 0; | ||
264 | } | ||
265 | |||
266 | static struct seq_operations stack_trace_seq_ops = { | ||
267 | .start = t_start, | ||
268 | .next = t_next, | ||
269 | .stop = t_stop, | ||
270 | .show = t_show, | ||
271 | }; | ||
272 | |||
273 | static int stack_trace_open(struct inode *inode, struct file *file) | ||
274 | { | ||
275 | int ret; | ||
276 | |||
277 | ret = seq_open(file, &stack_trace_seq_ops); | ||
278 | if (!ret) { | ||
279 | struct seq_file *m = file->private_data; | ||
280 | m->private = (void *)-1; | ||
281 | } | ||
282 | |||
283 | return ret; | ||
284 | } | ||
285 | |||
286 | static struct file_operations stack_trace_fops = { | ||
287 | .open = stack_trace_open, | ||
288 | .read = seq_read, | ||
289 | .llseek = seq_lseek, | ||
290 | }; | ||
291 | |||
292 | static __init int stack_trace_init(void) | ||
293 | { | ||
294 | struct dentry *d_tracer; | ||
295 | struct dentry *entry; | ||
296 | |||
297 | d_tracer = tracing_init_dentry(); | ||
298 | |||
299 | entry = debugfs_create_file("stack_max_size", 0644, d_tracer, | ||
300 | &max_stack_size, &stack_max_size_fops); | ||
301 | if (!entry) | ||
302 | pr_warning("Could not create debugfs 'stack_max_size' entry\n"); | ||
303 | |||
304 | entry = debugfs_create_file("stack_trace", 0444, d_tracer, | ||
305 | NULL, &stack_trace_fops); | ||
306 | if (!entry) | ||
307 | pr_warning("Could not create debugfs 'stack_trace' entry\n"); | ||
308 | |||
309 | register_ftrace_function(&trace_ops); | ||
310 | |||
311 | return 0; | ||
312 | } | ||
313 | |||
314 | device_initcall(stack_trace_init); | ||