aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorVaibhav Nagarnaik <vnagarnaik@google.com>2012-02-02 15:00:41 -0500
committerSteven Rostedt <rostedt@goodmis.org>2012-04-23 21:17:51 -0400
commit438ced1720b584000a9e8a4349d1f6bb7ee3ad6d (patch)
tree2c769e58411c68b298ab816c577ecb2119c7067c /kernel/trace/trace.c
parent5a26c8f0cf1e95106858bb4e23ca6dd14c9b842f (diff)
ring-buffer: Add per_cpu ring buffer control files
Add a debugfs entry under per_cpu/ folder for each cpu called buffer_size_kb to control the ring buffer size for each CPU independently. If the global file buffer_size_kb is used to set size, the individual ring buffers will be adjusted to the given size. The buffer_size_kb will report the common size to maintain backward compatibility. If the buffer_size_kb file under the per_cpu/ directory is used to change buffer size for a specific CPU, only the size of the respective ring buffer is updated. When tracing/buffer_size_kb is read, it reports 'X' to indicate that sizes of per_cpu ring buffers are not equivalent. Link: http://lkml.kernel.org/r/1328212844-11889-1-git-send-email-vnagarnaik@google.com Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Michael Rubin <mrubin@google.com> Cc: David Sharp <dhsharp@google.com> Cc: Justin Teravest <teravest@google.com> Signed-off-by: Vaibhav Nagarnaik <vnagarnaik@google.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c190
1 files changed, 152 insertions, 38 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index bbcde546f9f7..f11a285ee5bb 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -838,7 +838,8 @@ __acquires(kernel_lock)
838 838
839 /* If we expanded the buffers, make sure the max is expanded too */ 839 /* If we expanded the buffers, make sure the max is expanded too */
840 if (ring_buffer_expanded && type->use_max_tr) 840 if (ring_buffer_expanded && type->use_max_tr)
841 ring_buffer_resize(max_tr.buffer, trace_buf_size); 841 ring_buffer_resize(max_tr.buffer, trace_buf_size,
842 RING_BUFFER_ALL_CPUS);
842 843
843 /* the test is responsible for initializing and enabling */ 844 /* the test is responsible for initializing and enabling */
844 pr_info("Testing tracer %s: ", type->name); 845 pr_info("Testing tracer %s: ", type->name);
@@ -854,7 +855,8 @@ __acquires(kernel_lock)
854 855
855 /* Shrink the max buffer again */ 856 /* Shrink the max buffer again */
856 if (ring_buffer_expanded && type->use_max_tr) 857 if (ring_buffer_expanded && type->use_max_tr)
857 ring_buffer_resize(max_tr.buffer, 1); 858 ring_buffer_resize(max_tr.buffer, 1,
859 RING_BUFFER_ALL_CPUS);
858 860
859 printk(KERN_CONT "PASSED\n"); 861 printk(KERN_CONT "PASSED\n");
860 } 862 }
@@ -3053,7 +3055,14 @@ int tracer_init(struct tracer *t, struct trace_array *tr)
3053 return t->init(tr); 3055 return t->init(tr);
3054} 3056}
3055 3057
3056static int __tracing_resize_ring_buffer(unsigned long size) 3058static void set_buffer_entries(struct trace_array *tr, unsigned long val)
3059{
3060 int cpu;
3061 for_each_tracing_cpu(cpu)
3062 tr->data[cpu]->entries = val;
3063}
3064
3065static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
3057{ 3066{
3058 int ret; 3067 int ret;
3059 3068
@@ -3064,19 +3073,32 @@ static int __tracing_resize_ring_buffer(unsigned long size)
3064 */ 3073 */
3065 ring_buffer_expanded = 1; 3074 ring_buffer_expanded = 1;
3066 3075
3067 ret = ring_buffer_resize(global_trace.buffer, size); 3076 ret = ring_buffer_resize(global_trace.buffer, size, cpu);
3068 if (ret < 0) 3077 if (ret < 0)
3069 return ret; 3078 return ret;
3070 3079
3071 if (!current_trace->use_max_tr) 3080 if (!current_trace->use_max_tr)
3072 goto out; 3081 goto out;
3073 3082
3074 ret = ring_buffer_resize(max_tr.buffer, size); 3083 ret = ring_buffer_resize(max_tr.buffer, size, cpu);
3075 if (ret < 0) { 3084 if (ret < 0) {
3076 int r; 3085 int r = 0;
3086
3087 if (cpu == RING_BUFFER_ALL_CPUS) {
3088 int i;
3089 for_each_tracing_cpu(i) {
3090 r = ring_buffer_resize(global_trace.buffer,
3091 global_trace.data[i]->entries,
3092 i);
3093 if (r < 0)
3094 break;
3095 }
3096 } else {
3097 r = ring_buffer_resize(global_trace.buffer,
3098 global_trace.data[cpu]->entries,
3099 cpu);
3100 }
3077 3101
3078 r = ring_buffer_resize(global_trace.buffer,
3079 global_trace.entries);
3080 if (r < 0) { 3102 if (r < 0) {
3081 /* 3103 /*
3082 * AARGH! We are left with different 3104 * AARGH! We are left with different
@@ -3098,14 +3120,21 @@ static int __tracing_resize_ring_buffer(unsigned long size)
3098 return ret; 3120 return ret;
3099 } 3121 }
3100 3122
3101 max_tr.entries = size; 3123 if (cpu == RING_BUFFER_ALL_CPUS)
3124 set_buffer_entries(&max_tr, size);
3125 else
3126 max_tr.data[cpu]->entries = size;
3127
3102 out: 3128 out:
3103 global_trace.entries = size; 3129 if (cpu == RING_BUFFER_ALL_CPUS)
3130 set_buffer_entries(&global_trace, size);
3131 else
3132 global_trace.data[cpu]->entries = size;
3104 3133
3105 return ret; 3134 return ret;
3106} 3135}
3107 3136
3108static ssize_t tracing_resize_ring_buffer(unsigned long size) 3137static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id)
3109{ 3138{
3110 int cpu, ret = size; 3139 int cpu, ret = size;
3111 3140
@@ -3121,12 +3150,19 @@ static ssize_t tracing_resize_ring_buffer(unsigned long size)
3121 atomic_inc(&max_tr.data[cpu]->disabled); 3150 atomic_inc(&max_tr.data[cpu]->disabled);
3122 } 3151 }
3123 3152
3124 if (size != global_trace.entries) 3153 if (cpu_id != RING_BUFFER_ALL_CPUS) {
3125 ret = __tracing_resize_ring_buffer(size); 3154 /* make sure, this cpu is enabled in the mask */
3155 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3156 ret = -EINVAL;
3157 goto out;
3158 }
3159 }
3126 3160
3161 ret = __tracing_resize_ring_buffer(size, cpu_id);
3127 if (ret < 0) 3162 if (ret < 0)
3128 ret = -ENOMEM; 3163 ret = -ENOMEM;
3129 3164
3165out:
3130 for_each_tracing_cpu(cpu) { 3166 for_each_tracing_cpu(cpu) {
3131 if (global_trace.data[cpu]) 3167 if (global_trace.data[cpu])
3132 atomic_dec(&global_trace.data[cpu]->disabled); 3168 atomic_dec(&global_trace.data[cpu]->disabled);
@@ -3157,7 +3193,8 @@ int tracing_update_buffers(void)
3157 3193
3158 mutex_lock(&trace_types_lock); 3194 mutex_lock(&trace_types_lock);
3159 if (!ring_buffer_expanded) 3195 if (!ring_buffer_expanded)
3160 ret = __tracing_resize_ring_buffer(trace_buf_size); 3196 ret = __tracing_resize_ring_buffer(trace_buf_size,
3197 RING_BUFFER_ALL_CPUS);
3161 mutex_unlock(&trace_types_lock); 3198 mutex_unlock(&trace_types_lock);
3162 3199
3163 return ret; 3200 return ret;
@@ -3181,7 +3218,8 @@ static int tracing_set_tracer(const char *buf)
3181 mutex_lock(&trace_types_lock); 3218 mutex_lock(&trace_types_lock);
3182 3219
3183 if (!ring_buffer_expanded) { 3220 if (!ring_buffer_expanded) {
3184 ret = __tracing_resize_ring_buffer(trace_buf_size); 3221 ret = __tracing_resize_ring_buffer(trace_buf_size,
3222 RING_BUFFER_ALL_CPUS);
3185 if (ret < 0) 3223 if (ret < 0)
3186 goto out; 3224 goto out;
3187 ret = 0; 3225 ret = 0;
@@ -3207,8 +3245,8 @@ static int tracing_set_tracer(const char *buf)
3207 * The max_tr ring buffer has some state (e.g. ring->clock) and 3245 * The max_tr ring buffer has some state (e.g. ring->clock) and
3208 * we want preserve it. 3246 * we want preserve it.
3209 */ 3247 */
3210 ring_buffer_resize(max_tr.buffer, 1); 3248 ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS);
3211 max_tr.entries = 1; 3249 set_buffer_entries(&max_tr, 1);
3212 } 3250 }
3213 destroy_trace_option_files(topts); 3251 destroy_trace_option_files(topts);
3214 3252
@@ -3216,10 +3254,17 @@ static int tracing_set_tracer(const char *buf)
3216 3254
3217 topts = create_trace_option_files(current_trace); 3255 topts = create_trace_option_files(current_trace);
3218 if (current_trace->use_max_tr) { 3256 if (current_trace->use_max_tr) {
3219 ret = ring_buffer_resize(max_tr.buffer, global_trace.entries); 3257 int cpu;
3220 if (ret < 0) 3258 /* we need to make per cpu buffer sizes equivalent */
3221 goto out; 3259 for_each_tracing_cpu(cpu) {
3222 max_tr.entries = global_trace.entries; 3260 ret = ring_buffer_resize(max_tr.buffer,
3261 global_trace.data[cpu]->entries,
3262 cpu);
3263 if (ret < 0)
3264 goto out;
3265 max_tr.data[cpu]->entries =
3266 global_trace.data[cpu]->entries;
3267 }
3223 } 3268 }
3224 3269
3225 if (t->init) { 3270 if (t->init) {
@@ -3721,30 +3766,82 @@ out_err:
3721 goto out; 3766 goto out;
3722} 3767}
3723 3768
3769struct ftrace_entries_info {
3770 struct trace_array *tr;
3771 int cpu;
3772};
3773
3774static int tracing_entries_open(struct inode *inode, struct file *filp)
3775{
3776 struct ftrace_entries_info *info;
3777
3778 if (tracing_disabled)
3779 return -ENODEV;
3780
3781 info = kzalloc(sizeof(*info), GFP_KERNEL);
3782 if (!info)
3783 return -ENOMEM;
3784
3785 info->tr = &global_trace;
3786 info->cpu = (unsigned long)inode->i_private;
3787
3788 filp->private_data = info;
3789
3790 return 0;
3791}
3792
3724static ssize_t 3793static ssize_t
3725tracing_entries_read(struct file *filp, char __user *ubuf, 3794tracing_entries_read(struct file *filp, char __user *ubuf,
3726 size_t cnt, loff_t *ppos) 3795 size_t cnt, loff_t *ppos)
3727{ 3796{
3728 struct trace_array *tr = filp->private_data; 3797 struct ftrace_entries_info *info = filp->private_data;
3729 char buf[96]; 3798 struct trace_array *tr = info->tr;
3730 int r; 3799 char buf[64];
3800 int r = 0;
3801 ssize_t ret;
3731 3802
3732 mutex_lock(&trace_types_lock); 3803 mutex_lock(&trace_types_lock);
3733 if (!ring_buffer_expanded) 3804
3734 r = sprintf(buf, "%lu (expanded: %lu)\n", 3805 if (info->cpu == RING_BUFFER_ALL_CPUS) {
3735 tr->entries >> 10, 3806 int cpu, buf_size_same;
3736 trace_buf_size >> 10); 3807 unsigned long size;
3737 else 3808
3738 r = sprintf(buf, "%lu\n", tr->entries >> 10); 3809 size = 0;
3810 buf_size_same = 1;
3811 /* check if all cpu sizes are same */
3812 for_each_tracing_cpu(cpu) {
3813 /* fill in the size from first enabled cpu */
3814 if (size == 0)
3815 size = tr->data[cpu]->entries;
3816 if (size != tr->data[cpu]->entries) {
3817 buf_size_same = 0;
3818 break;
3819 }
3820 }
3821
3822 if (buf_size_same) {
3823 if (!ring_buffer_expanded)
3824 r = sprintf(buf, "%lu (expanded: %lu)\n",
3825 size >> 10,
3826 trace_buf_size >> 10);
3827 else
3828 r = sprintf(buf, "%lu\n", size >> 10);
3829 } else
3830 r = sprintf(buf, "X\n");
3831 } else
3832 r = sprintf(buf, "%lu\n", tr->data[info->cpu]->entries >> 10);
3833
3739 mutex_unlock(&trace_types_lock); 3834 mutex_unlock(&trace_types_lock);
3740 3835
3741 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3836 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3837 return ret;
3742} 3838}
3743 3839
3744static ssize_t 3840static ssize_t
3745tracing_entries_write(struct file *filp, const char __user *ubuf, 3841tracing_entries_write(struct file *filp, const char __user *ubuf,
3746 size_t cnt, loff_t *ppos) 3842 size_t cnt, loff_t *ppos)
3747{ 3843{
3844 struct ftrace_entries_info *info = filp->private_data;
3748 unsigned long val; 3845 unsigned long val;
3749 int ret; 3846 int ret;
3750 3847
@@ -3759,7 +3856,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
3759 /* value is in KB */ 3856 /* value is in KB */
3760 val <<= 10; 3857 val <<= 10;
3761 3858
3762 ret = tracing_resize_ring_buffer(val); 3859 ret = tracing_resize_ring_buffer(val, info->cpu);
3763 if (ret < 0) 3860 if (ret < 0)
3764 return ret; 3861 return ret;
3765 3862
@@ -3768,6 +3865,16 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
3768 return cnt; 3865 return cnt;
3769} 3866}
3770 3867
3868static int
3869tracing_entries_release(struct inode *inode, struct file *filp)
3870{
3871 struct ftrace_entries_info *info = filp->private_data;
3872
3873 kfree(info);
3874
3875 return 0;
3876}
3877
3771static ssize_t 3878static ssize_t
3772tracing_total_entries_read(struct file *filp, char __user *ubuf, 3879tracing_total_entries_read(struct file *filp, char __user *ubuf,
3773 size_t cnt, loff_t *ppos) 3880 size_t cnt, loff_t *ppos)
@@ -3779,7 +3886,7 @@ tracing_total_entries_read(struct file *filp, char __user *ubuf,
3779 3886
3780 mutex_lock(&trace_types_lock); 3887 mutex_lock(&trace_types_lock);
3781 for_each_tracing_cpu(cpu) { 3888 for_each_tracing_cpu(cpu) {
3782 size += tr->entries >> 10; 3889 size += tr->data[cpu]->entries >> 10;
3783 if (!ring_buffer_expanded) 3890 if (!ring_buffer_expanded)
3784 expanded_size += trace_buf_size >> 10; 3891 expanded_size += trace_buf_size >> 10;
3785 } 3892 }
@@ -3813,7 +3920,7 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
3813 if (trace_flags & TRACE_ITER_STOP_ON_FREE) 3920 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
3814 tracing_off(); 3921 tracing_off();
3815 /* resize the ring buffer to 0 */ 3922 /* resize the ring buffer to 0 */
3816 tracing_resize_ring_buffer(0); 3923 tracing_resize_ring_buffer(0, RING_BUFFER_ALL_CPUS);
3817 3924
3818 return 0; 3925 return 0;
3819} 3926}
@@ -4012,9 +4119,10 @@ static const struct file_operations tracing_pipe_fops = {
4012}; 4119};
4013 4120
4014static const struct file_operations tracing_entries_fops = { 4121static const struct file_operations tracing_entries_fops = {
4015 .open = tracing_open_generic, 4122 .open = tracing_entries_open,
4016 .read = tracing_entries_read, 4123 .read = tracing_entries_read,
4017 .write = tracing_entries_write, 4124 .write = tracing_entries_write,
4125 .release = tracing_entries_release,
4018 .llseek = generic_file_llseek, 4126 .llseek = generic_file_llseek,
4019}; 4127};
4020 4128
@@ -4466,6 +4574,9 @@ static void tracing_init_debugfs_percpu(long cpu)
4466 4574
4467 trace_create_file("stats", 0444, d_cpu, 4575 trace_create_file("stats", 0444, d_cpu,
4468 (void *) cpu, &tracing_stats_fops); 4576 (void *) cpu, &tracing_stats_fops);
4577
4578 trace_create_file("buffer_size_kb", 0444, d_cpu,
4579 (void *) cpu, &tracing_entries_fops);
4469} 4580}
4470 4581
4471#ifdef CONFIG_FTRACE_SELFTEST 4582#ifdef CONFIG_FTRACE_SELFTEST
@@ -4795,7 +4906,7 @@ static __init int tracer_init_debugfs(void)
4795 (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); 4906 (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
4796 4907
4797 trace_create_file("buffer_size_kb", 0644, d_tracer, 4908 trace_create_file("buffer_size_kb", 0644, d_tracer,
4798 &global_trace, &tracing_entries_fops); 4909 (void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops);
4799 4910
4800 trace_create_file("buffer_total_size_kb", 0444, d_tracer, 4911 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
4801 &global_trace, &tracing_total_entries_fops); 4912 &global_trace, &tracing_total_entries_fops);
@@ -5056,7 +5167,6 @@ __init static int tracer_alloc_buffers(void)
5056 WARN_ON(1); 5167 WARN_ON(1);
5057 goto out_free_cpumask; 5168 goto out_free_cpumask;
5058 } 5169 }
5059 global_trace.entries = ring_buffer_size(global_trace.buffer);
5060 if (global_trace.buffer_disabled) 5170 if (global_trace.buffer_disabled)
5061 tracing_off(); 5171 tracing_off();
5062 5172
@@ -5069,7 +5179,6 @@ __init static int tracer_alloc_buffers(void)
5069 ring_buffer_free(global_trace.buffer); 5179 ring_buffer_free(global_trace.buffer);
5070 goto out_free_cpumask; 5180 goto out_free_cpumask;
5071 } 5181 }
5072 max_tr.entries = 1;
5073#endif 5182#endif
5074 5183
5075 /* Allocate the first page for all buffers */ 5184 /* Allocate the first page for all buffers */
@@ -5078,6 +5187,11 @@ __init static int tracer_alloc_buffers(void)
5078 max_tr.data[i] = &per_cpu(max_tr_data, i); 5187 max_tr.data[i] = &per_cpu(max_tr_data, i);
5079 } 5188 }
5080 5189
5190 set_buffer_entries(&global_trace, ring_buf_size);
5191#ifdef CONFIG_TRACER_MAX_TRACE
5192 set_buffer_entries(&max_tr, 1);
5193#endif
5194
5081 trace_init_cmdlines(); 5195 trace_init_cmdlines();
5082 5196
5083 register_tracer(&nop_trace); 5197 register_tracer(&nop_trace);