diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/Kconfig | 13 | ||||
-rw-r--r-- | kernel/trace/trace_stack.c | 52 |
2 files changed, 57 insertions, 8 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index d8bae6f4219e..e2a4ff6fc3a6 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -244,10 +244,15 @@ config STACK_TRACER | |||
244 | 244 | ||
245 | This tracer works by hooking into every function call that the | 245 | This tracer works by hooking into every function call that the |
246 | kernel executes, and keeping a maximum stack depth value and | 246 | kernel executes, and keeping a maximum stack depth value and |
247 | stack-trace saved. Because this logic has to execute in every | 247 | stack-trace saved. If this is configured with DYNAMIC_FTRACE |
248 | kernel function, all the time, this option can slow down the | 248 | then it will not have any overhead while the stack tracer |
249 | kernel measurably and is generally intended for kernel | 249 | is disabled. |
250 | developers only. | 250 | |
251 | To enable the stack tracer on bootup, pass in 'stacktrace' | ||
252 | on the kernel command line. | ||
253 | |||
254 | The stack tracer can also be enabled or disabled via the | ||
255 | sysctl kernel.stack_tracer_enabled | ||
251 | 256 | ||
252 | Say N if unsure. | 257 | Say N if unsure. |
253 | 258 | ||
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 0b863f2cbc8e..4842c969c785 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/debugfs.h> | 10 | #include <linux/debugfs.h> |
11 | #include <linux/ftrace.h> | 11 | #include <linux/ftrace.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/sysctl.h> | ||
13 | #include <linux/init.h> | 14 | #include <linux/init.h> |
14 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
15 | #include "trace.h" | 16 | #include "trace.h" |
@@ -31,6 +32,10 @@ static raw_spinlock_t max_stack_lock = | |||
31 | 32 | ||
32 | static int stack_trace_disabled __read_mostly; | 33 | static int stack_trace_disabled __read_mostly; |
33 | static DEFINE_PER_CPU(int, trace_active); | 34 | static DEFINE_PER_CPU(int, trace_active); |
35 | static DEFINE_MUTEX(stack_sysctl_mutex); | ||
36 | |||
37 | int stack_tracer_enabled; | ||
38 | static int last_stack_tracer_enabled; | ||
34 | 39 | ||
35 | static inline void check_stack(void) | 40 | static inline void check_stack(void) |
36 | { | 41 | { |
@@ -174,7 +179,7 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
174 | return count; | 179 | return count; |
175 | } | 180 | } |
176 | 181 | ||
177 | static struct file_operations stack_max_size_fops = { | 182 | static const struct file_operations stack_max_size_fops = { |
178 | .open = tracing_open_generic, | 183 | .open = tracing_open_generic, |
179 | .read = stack_max_size_read, | 184 | .read = stack_max_size_read, |
180 | .write = stack_max_size_write, | 185 | .write = stack_max_size_write, |
@@ -272,7 +277,7 @@ static int t_show(struct seq_file *m, void *v) | |||
272 | return 0; | 277 | return 0; |
273 | } | 278 | } |
274 | 279 | ||
275 | static struct seq_operations stack_trace_seq_ops = { | 280 | static const struct seq_operations stack_trace_seq_ops = { |
276 | .start = t_start, | 281 | .start = t_start, |
277 | .next = t_next, | 282 | .next = t_next, |
278 | .stop = t_stop, | 283 | .stop = t_stop, |
@@ -288,12 +293,48 @@ static int stack_trace_open(struct inode *inode, struct file *file) | |||
288 | return ret; | 293 | return ret; |
289 | } | 294 | } |
290 | 295 | ||
291 | static struct file_operations stack_trace_fops = { | 296 | static const struct file_operations stack_trace_fops = { |
292 | .open = stack_trace_open, | 297 | .open = stack_trace_open, |
293 | .read = seq_read, | 298 | .read = seq_read, |
294 | .llseek = seq_lseek, | 299 | .llseek = seq_lseek, |
295 | }; | 300 | }; |
296 | 301 | ||
302 | int | ||
303 | stack_trace_sysctl(struct ctl_table *table, int write, | ||
304 | struct file *file, void __user *buffer, size_t *lenp, | ||
305 | loff_t *ppos) | ||
306 | { | ||
307 | int ret; | ||
308 | |||
309 | mutex_lock(&stack_sysctl_mutex); | ||
310 | |||
311 | ret = proc_dointvec(table, write, file, buffer, lenp, ppos); | ||
312 | |||
313 | if (ret || !write || | ||
314 | (last_stack_tracer_enabled == stack_tracer_enabled)) | ||
315 | goto out; | ||
316 | |||
317 | last_stack_tracer_enabled = stack_tracer_enabled; | ||
318 | |||
319 | if (stack_tracer_enabled) | ||
320 | register_ftrace_function(&trace_ops); | ||
321 | else | ||
322 | unregister_ftrace_function(&trace_ops); | ||
323 | |||
324 | out: | ||
325 | mutex_unlock(&stack_sysctl_mutex); | ||
326 | return ret; | ||
327 | } | ||
328 | |||
329 | static int start_stack_trace __initdata; | ||
330 | |||
331 | static __init int enable_stacktrace(char *str) | ||
332 | { | ||
333 | start_stack_trace = 1; | ||
334 | return 1; | ||
335 | } | ||
336 | __setup("stacktrace", enable_stacktrace); | ||
337 | |||
297 | static __init int stack_trace_init(void) | 338 | static __init int stack_trace_init(void) |
298 | { | 339 | { |
299 | struct dentry *d_tracer; | 340 | struct dentry *d_tracer; |
@@ -311,7 +352,10 @@ static __init int stack_trace_init(void) | |||
311 | if (!entry) | 352 | if (!entry) |
312 | pr_warning("Could not create debugfs 'stack_trace' entry\n"); | 353 | pr_warning("Could not create debugfs 'stack_trace' entry\n"); |
313 | 354 | ||
314 | register_ftrace_function(&trace_ops); | 355 | if (start_stack_trace) { |
356 | register_ftrace_function(&trace_ops); | ||
357 | stack_tracer_enabled = 1; | ||
358 | } | ||
315 | 359 | ||
316 | return 0; | 360 | return 0; |
317 | } | 361 | } |