diff options
author | Steven Rostedt <srostedt@redhat.com> | 2008-12-16 23:06:40 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-18 06:56:24 -0500 |
commit | f38f1d2aa5a3520cf05da7cd6bd12fe2b0c509b7 (patch) | |
tree | 5eef795a1a081c41686971eaaa5ba2d1098ceb9d /kernel/trace/trace_stack.c | |
parent | 40874491f9e9a4cb08eaf663dbe018bf5671975a (diff) |
trace: add a way to enable or disable the stack tracer
Impact: enhancement to stack tracer
The stack tracer currently is either on when configured in or
off when it is not. It can not be disabled when it is configured on.
(besides disabling the function tracer that it uses)
This patch adds a way to enable or disable the stack tracer at
run time. It defaults off on bootup, but a kernel parameter 'stacktrace'
has been added to enable it on bootup.
A new sysctl has been added "kernel.stack_tracer_enabled" to let
the user enable or disable the stack tracer at run time.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_stack.c')
-rw-r--r-- | kernel/trace/trace_stack.c | 52 |
1 files changed, 48 insertions, 4 deletions
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 0b863f2cbc8e..4842c969c785 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/debugfs.h> | 10 | #include <linux/debugfs.h> |
11 | #include <linux/ftrace.h> | 11 | #include <linux/ftrace.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/sysctl.h> | ||
13 | #include <linux/init.h> | 14 | #include <linux/init.h> |
14 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
15 | #include "trace.h" | 16 | #include "trace.h" |
@@ -31,6 +32,10 @@ static raw_spinlock_t max_stack_lock = | |||
31 | 32 | ||
32 | static int stack_trace_disabled __read_mostly; | 33 | static int stack_trace_disabled __read_mostly; |
33 | static DEFINE_PER_CPU(int, trace_active); | 34 | static DEFINE_PER_CPU(int, trace_active); |
35 | static DEFINE_MUTEX(stack_sysctl_mutex); | ||
36 | |||
37 | int stack_tracer_enabled; | ||
38 | static int last_stack_tracer_enabled; | ||
34 | 39 | ||
35 | static inline void check_stack(void) | 40 | static inline void check_stack(void) |
36 | { | 41 | { |
@@ -174,7 +179,7 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
174 | return count; | 179 | return count; |
175 | } | 180 | } |
176 | 181 | ||
177 | static struct file_operations stack_max_size_fops = { | 182 | static const struct file_operations stack_max_size_fops = { |
178 | .open = tracing_open_generic, | 183 | .open = tracing_open_generic, |
179 | .read = stack_max_size_read, | 184 | .read = stack_max_size_read, |
180 | .write = stack_max_size_write, | 185 | .write = stack_max_size_write, |
@@ -272,7 +277,7 @@ static int t_show(struct seq_file *m, void *v) | |||
272 | return 0; | 277 | return 0; |
273 | } | 278 | } |
274 | 279 | ||
275 | static struct seq_operations stack_trace_seq_ops = { | 280 | static const struct seq_operations stack_trace_seq_ops = { |
276 | .start = t_start, | 281 | .start = t_start, |
277 | .next = t_next, | 282 | .next = t_next, |
278 | .stop = t_stop, | 283 | .stop = t_stop, |
@@ -288,12 +293,48 @@ static int stack_trace_open(struct inode *inode, struct file *file) | |||
288 | return ret; | 293 | return ret; |
289 | } | 294 | } |
290 | 295 | ||
291 | static struct file_operations stack_trace_fops = { | 296 | static const struct file_operations stack_trace_fops = { |
292 | .open = stack_trace_open, | 297 | .open = stack_trace_open, |
293 | .read = seq_read, | 298 | .read = seq_read, |
294 | .llseek = seq_lseek, | 299 | .llseek = seq_lseek, |
295 | }; | 300 | }; |
296 | 301 | ||
302 | int | ||
303 | stack_trace_sysctl(struct ctl_table *table, int write, | ||
304 | struct file *file, void __user *buffer, size_t *lenp, | ||
305 | loff_t *ppos) | ||
306 | { | ||
307 | int ret; | ||
308 | |||
309 | mutex_lock(&stack_sysctl_mutex); | ||
310 | |||
311 | ret = proc_dointvec(table, write, file, buffer, lenp, ppos); | ||
312 | |||
313 | if (ret || !write || | ||
314 | (last_stack_tracer_enabled == stack_tracer_enabled)) | ||
315 | goto out; | ||
316 | |||
317 | last_stack_tracer_enabled = stack_tracer_enabled; | ||
318 | |||
319 | if (stack_tracer_enabled) | ||
320 | register_ftrace_function(&trace_ops); | ||
321 | else | ||
322 | unregister_ftrace_function(&trace_ops); | ||
323 | |||
324 | out: | ||
325 | mutex_unlock(&stack_sysctl_mutex); | ||
326 | return ret; | ||
327 | } | ||
328 | |||
329 | static int start_stack_trace __initdata; | ||
330 | |||
331 | static __init int enable_stacktrace(char *str) | ||
332 | { | ||
333 | start_stack_trace = 1; | ||
334 | return 1; | ||
335 | } | ||
336 | __setup("stacktrace", enable_stacktrace); | ||
337 | |||
297 | static __init int stack_trace_init(void) | 338 | static __init int stack_trace_init(void) |
298 | { | 339 | { |
299 | struct dentry *d_tracer; | 340 | struct dentry *d_tracer; |
@@ -311,7 +352,10 @@ static __init int stack_trace_init(void) | |||
311 | if (!entry) | 352 | if (!entry) |
312 | pr_warning("Could not create debugfs 'stack_trace' entry\n"); | 353 | pr_warning("Could not create debugfs 'stack_trace' entry\n"); |
313 | 354 | ||
314 | register_ftrace_function(&trace_ops); | 355 | if (start_stack_trace) { |
356 | register_ftrace_function(&trace_ops); | ||
357 | stack_tracer_enabled = 1; | ||
358 | } | ||
315 | 359 | ||
316 | return 0; | 360 | return 0; |
317 | } | 361 | } |