aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorVaibhav Nagarnaik <vnagarnaik@google.com>2011-06-13 20:51:57 -0400
committerSteven Rostedt <rostedt@goodmis.org>2011-06-14 22:48:37 -0400
commit4f271a2a60c748599b30bb4dafff30d770439b96 (patch)
tree4b6aa671e0fbe3e81a423a551ce5aa1032725e2b /kernel/trace
parent7ea5906405a1f3fc1c0033dfd7e02f2cfd1de5e5 (diff)
tracing: Add a proc file to stop tracing and free buffer
The proc file entry buffer_size_kb is used to set the size of tracing buffer. The memory to expand the buffer size is kernel memory. Consider a use case where tracing is handled by a user space utility, which acts as a gate keeper for tracing requests. In an OOM condition, tracing is considered a low priority task and if the utility gets killed the ring buffer memory cannot be released back to the kernel. This patch adds a proc file called "free_buffer" whose purpose is to stop tracing and free up the ring buffer when it is closed. The user space process can then set the desired size in buffer_size_kb file and open the fd to the "free_buffer" file. Under OOM condition, if the process gets killed, the kernel closes the file descriptor. The release handler stops the tracing and releases the kernel memory automatically. Cc: Ingo Molnar <mingo@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Michael Rubin <mrubin@google.com> Cc: David Sharp <dhsharp@google.com> Signed-off-by: Vaibhav Nagarnaik <vnagarnaik@google.com> Link: http://lkml.kernel.org/r/1308012717-11148-1-git-send-email-vnagarnaik@google.com Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace.c108
1 files changed, 73 insertions, 35 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 61fda6b6f1ab..9c557ae6a21e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2768,7 +2768,7 @@ int tracer_init(struct tracer *t, struct trace_array *tr)
2768 return t->init(tr); 2768 return t->init(tr);
2769} 2769}
2770 2770
2771static int tracing_resize_ring_buffer(unsigned long size) 2771static int __tracing_resize_ring_buffer(unsigned long size)
2772{ 2772{
2773 int ret; 2773 int ret;
2774 2774
@@ -2820,6 +2820,41 @@ static int tracing_resize_ring_buffer(unsigned long size)
2820 return ret; 2820 return ret;
2821} 2821}
2822 2822
2823static ssize_t tracing_resize_ring_buffer(unsigned long size)
2824{
2825 int cpu, ret = size;
2826
2827 mutex_lock(&trace_types_lock);
2828
2829 tracing_stop();
2830
2831 /* disable all cpu buffers */
2832 for_each_tracing_cpu(cpu) {
2833 if (global_trace.data[cpu])
2834 atomic_inc(&global_trace.data[cpu]->disabled);
2835 if (max_tr.data[cpu])
2836 atomic_inc(&max_tr.data[cpu]->disabled);
2837 }
2838
2839 if (size != global_trace.entries)
2840 ret = __tracing_resize_ring_buffer(size);
2841
2842 if (ret < 0)
2843 ret = -ENOMEM;
2844
2845 for_each_tracing_cpu(cpu) {
2846 if (global_trace.data[cpu])
2847 atomic_dec(&global_trace.data[cpu]->disabled);
2848 if (max_tr.data[cpu])
2849 atomic_dec(&max_tr.data[cpu]->disabled);
2850 }
2851
2852 tracing_start();
2853 mutex_unlock(&trace_types_lock);
2854
2855 return ret;
2856}
2857
2823 2858
2824/** 2859/**
2825 * tracing_update_buffers - used by tracing facility to expand ring buffers 2860 * tracing_update_buffers - used by tracing facility to expand ring buffers
@@ -2837,7 +2872,7 @@ int tracing_update_buffers(void)
2837 2872
2838 mutex_lock(&trace_types_lock); 2873 mutex_lock(&trace_types_lock);
2839 if (!ring_buffer_expanded) 2874 if (!ring_buffer_expanded)
2840 ret = tracing_resize_ring_buffer(trace_buf_size); 2875 ret = __tracing_resize_ring_buffer(trace_buf_size);
2841 mutex_unlock(&trace_types_lock); 2876 mutex_unlock(&trace_types_lock);
2842 2877
2843 return ret; 2878 return ret;
@@ -2861,7 +2896,7 @@ static int tracing_set_tracer(const char *buf)
2861 mutex_lock(&trace_types_lock); 2896 mutex_lock(&trace_types_lock);
2862 2897
2863 if (!ring_buffer_expanded) { 2898 if (!ring_buffer_expanded) {
2864 ret = tracing_resize_ring_buffer(trace_buf_size); 2899 ret = __tracing_resize_ring_buffer(trace_buf_size);
2865 if (ret < 0) 2900 if (ret < 0)
2866 goto out; 2901 goto out;
2867 ret = 0; 2902 ret = 0;
@@ -3436,7 +3471,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
3436{ 3471{
3437 unsigned long val; 3472 unsigned long val;
3438 char buf[64]; 3473 char buf[64];
3439 int ret, cpu; 3474 int ret;
3440 3475
3441 if (cnt >= sizeof(buf)) 3476 if (cnt >= sizeof(buf))
3442 return -EINVAL; 3477 return -EINVAL;
@@ -3454,48 +3489,43 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
3454 if (!val) 3489 if (!val)
3455 return -EINVAL; 3490 return -EINVAL;
3456 3491
3457 mutex_lock(&trace_types_lock);
3458
3459 tracing_stop();
3460
3461 /* disable all cpu buffers */
3462 for_each_tracing_cpu(cpu) {
3463 if (global_trace.data[cpu])
3464 atomic_inc(&global_trace.data[cpu]->disabled);
3465 if (max_tr.data[cpu])
3466 atomic_inc(&max_tr.data[cpu]->disabled);
3467 }
3468
3469 /* value is in KB */ 3492 /* value is in KB */
3470 val <<= 10; 3493 val <<= 10;
3471 3494
3472 if (val != global_trace.entries) { 3495 ret = tracing_resize_ring_buffer(val);
3473 ret = tracing_resize_ring_buffer(val); 3496 if (ret < 0)
3474 if (ret < 0) { 3497 return ret;
3475 cnt = ret;
3476 goto out;
3477 }
3478 }
3479 3498
3480 *ppos += cnt; 3499 *ppos += cnt;
3481 3500
3482 /* If check pages failed, return ENOMEM */ 3501 return cnt;
3483 if (tracing_disabled) 3502}
3484 cnt = -ENOMEM;
3485 out:
3486 for_each_tracing_cpu(cpu) {
3487 if (global_trace.data[cpu])
3488 atomic_dec(&global_trace.data[cpu]->disabled);
3489 if (max_tr.data[cpu])
3490 atomic_dec(&max_tr.data[cpu]->disabled);
3491 }
3492 3503
3493 tracing_start(); 3504static ssize_t
3494 mutex_unlock(&trace_types_lock); 3505tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
3506 size_t cnt, loff_t *ppos)
3507{
3508 /*
3509 * There is no need to read what the user has written, this function
3510 * is just to make sure that there is no error when "echo" is used
3511 */
3512
3513 *ppos += cnt;
3495 3514
3496 return cnt; 3515 return cnt;
3497} 3516}
3498 3517
3518static int
3519tracing_free_buffer_release(struct inode *inode, struct file *filp)
3520{
3521 /* disable tracing */
3522 tracing_off();
3523 /* resize the ring buffer to 0 */
3524 tracing_resize_ring_buffer(0);
3525
3526 return 0;
3527}
3528
3499static int mark_printk(const char *fmt, ...) 3529static int mark_printk(const char *fmt, ...)
3500{ 3530{
3501 int ret; 3531 int ret;
@@ -3641,6 +3671,11 @@ static const struct file_operations tracing_entries_fops = {
3641 .llseek = generic_file_llseek, 3671 .llseek = generic_file_llseek,
3642}; 3672};
3643 3673
3674static const struct file_operations tracing_free_buffer_fops = {
3675 .write = tracing_free_buffer_write,
3676 .release = tracing_free_buffer_release,
3677};
3678
3644static const struct file_operations tracing_mark_fops = { 3679static const struct file_operations tracing_mark_fops = {
3645 .open = tracing_open_generic, 3680 .open = tracing_open_generic,
3646 .write = tracing_mark_write, 3681 .write = tracing_mark_write,
@@ -4365,6 +4400,9 @@ static __init int tracer_init_debugfs(void)
4365 trace_create_file("buffer_size_kb", 0644, d_tracer, 4400 trace_create_file("buffer_size_kb", 0644, d_tracer,
4366 &global_trace, &tracing_entries_fops); 4401 &global_trace, &tracing_entries_fops);
4367 4402
4403 trace_create_file("free_buffer", 0644, d_tracer,
4404 &global_trace, &tracing_free_buffer_fops);
4405
4368 trace_create_file("trace_marker", 0220, d_tracer, 4406 trace_create_file("trace_marker", 0220, d_tracer,
4369 NULL, &tracing_mark_fops); 4407 NULL, &tracing_mark_fops);
4370 4408